diff --git a/daemons/based/based_callbacks.c b/daemons/based/based_callbacks.c index b3790880c7..d9f359b572 100644 --- a/daemons/based/based_callbacks.c +++ b/daemons/based/based_callbacks.c @@ -1,1402 +1,1402 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // uint32_t, uint64_t, UINT64_C() #include #include #include // PRIu64 #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include #define EXIT_ESCALATION_MS 10000 qb_ipcs_service_t *ipcs_ro = NULL; qb_ipcs_service_t *ipcs_rw = NULL; qb_ipcs_service_t *ipcs_shm = NULL; static int cib_process_command(xmlNode *request, const cib__operation_t *operation, cib__op_fn_t op_function, xmlNode **reply, xmlNode **cib_diff, bool privileged); static gboolean cib_common_callback(qb_ipcs_connection_t *c, void *data, size_t size, gboolean privileged); static int32_t cib_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { if (cib_shutdown_flag) { crm_info("Ignoring new IPC client [%d] during shutdown", pcmk__client_pid(c)); return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } static int32_t cib_ipc_dispatch_rw(qb_ipcs_connection_t * c, void *data, size_t size) { pcmk__client_t *client = pcmk__find_client(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, TRUE); } static int32_t cib_ipc_dispatch_ro(qb_ipcs_connection_t * c, void *data, size_t size) { pcmk__client_t *client = pcmk__find_client(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, FALSE); } /* Error code means? */ static int32_t cib_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); pcmk__free_client(client); return 0; } static void cib_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); cib_ipc_closed(c); if (cib_shutdown_flag) { cib_shutdown(0); } } struct qb_ipcs_service_handlers ipc_ro_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = NULL, .msg_process = cib_ipc_dispatch_ro, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; struct qb_ipcs_service_handlers ipc_rw_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = NULL, .msg_process = cib_ipc_dispatch_rw, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; /*! * \internal * \brief Create reply XML for a CIB request * * \param[in] op CIB operation type * \param[in] call_id CIB call ID * \param[in] client_id CIB client ID * \param[in] call_options Group of enum cib_call_options flags * \param[in] rc Request return code * \param[in] call_data Request output data * * \return Reply XML (guaranteed not to be \c NULL) * * \note The caller is responsible for freeing the return value using * \p pcmk__xml_free(). */ static xmlNode * create_cib_reply(const char *op, const char *call_id, const char *client_id, uint32_t call_options, int rc, xmlNode *call_data) { xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_CIB_REPLY); crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(reply, PCMK__XA_CIB_OP, op); crm_xml_add(reply, PCMK__XA_CIB_CALLID, call_id); crm_xml_add(reply, PCMK__XA_CIB_CLIENTID, client_id); crm_xml_add_int(reply, PCMK__XA_CIB_CALLOPT, call_options); crm_xml_add_int(reply, PCMK__XA_CIB_RC, rc); if (call_data != NULL) { xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_CIB_CALLDATA); crm_trace("Attaching reply output"); pcmk__xml_copy(wrapper, call_data); } crm_log_xml_explicit(reply, "cib:reply"); return reply; } static void do_local_notify(const xmlNode *notify_src, const char *client_id, bool sync_reply, bool from_peer) { int msg_id = 0; int rc = pcmk_rc_ok; pcmk__client_t *client_obj = NULL; uint32_t flags = crm_ipc_server_event; CRM_CHECK((notify_src != NULL) && (client_id != NULL), return); crm_element_value_int(notify_src, PCMK__XA_CIB_CALLID, &msg_id); client_obj = pcmk__find_client_by_id(client_id); if (client_obj == NULL) { crm_debug("Could not notify client %s%s %s of call %d result: " "client no longer exists", client_id, (from_peer? " (originator of delegated request)" : ""), (sync_reply? "synchronously" : "asynchronously"), msg_id); return; } if (sync_reply) { flags = crm_ipc_flags_none; if (client_obj->ipcs != NULL) { msg_id = client_obj->request_id; client_obj->request_id = 0; } } switch (PCMK__CLIENT_TYPE(client_obj)) { case pcmk__client_ipc: rc = pcmk__ipc_send_xml(client_obj, msg_id, notify_src, flags); break; case pcmk__client_tls: case pcmk__client_tcp: rc = pcmk__remote_send_xml(client_obj->remote, notify_src); break; default: rc = EPROTONOSUPPORT; break; } if (rc == pcmk_rc_ok) { crm_trace("Notified %s client %s%s %s of call %d result", pcmk__client_type_str(PCMK__CLIENT_TYPE(client_obj)), pcmk__client_name(client_obj), (from_peer? " (originator of delegated request)" : ""), (sync_reply? "synchronously" : "asynchronously"), msg_id); } else { crm_warn("Could not notify %s client %s%s %s of call %d result: %s", pcmk__client_type_str(PCMK__CLIENT_TYPE(client_obj)), pcmk__client_name(client_obj), (from_peer? " (originator of delegated request)" : ""), (sync_reply? "synchronously" : "asynchronously"), msg_id, pcmk_rc_str(rc)); } } void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, pcmk__client_t *cib_client, gboolean privileged) { const char *op = crm_element_value(op_request, PCMK__XA_CIB_OP); uint32_t call_options = cib_none; int rc = pcmk_rc_ok; rc = pcmk__xe_get_flags(op_request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } /* Requests with cib_transaction set should not be sent to based directly * (outside of a commit-transaction request) */ if (pcmk_is_set(call_options, cib_transaction)) { return; } if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) { if (flags & crm_ipc_client_response) { xmlNode *ack = pcmk__xe_create(NULL, __func__); crm_xml_add(ack, PCMK__XA_CIB_OP, CRM_OP_REGISTER); crm_xml_add(ack, PCMK__XA_CIB_CLIENTID, cib_client->id); pcmk__ipc_send_xml(cib_client, id, ack, flags); cib_client->request_id = 0; pcmk__xml_free(ack); } return; } else if (pcmk__str_eq(op, PCMK__VALUE_CIB_NOTIFY, pcmk__str_none)) { /* Update the notify filters for this client */ int on_off = 0; crm_exit_t status = CRM_EX_OK; uint64_t bit = UINT64_C(0); const char *type = crm_element_value(op_request, PCMK__XA_CIB_NOTIFY_TYPE); crm_element_value_int(op_request, PCMK__XA_CIB_NOTIFY_ACTIVATE, &on_off); crm_debug("Setting %s callbacks %s for client %s", type, (on_off? "on" : "off"), pcmk__client_name(cib_client)); if (pcmk__str_eq(type, PCMK__VALUE_CIB_POST_NOTIFY, pcmk__str_none)) { bit = cib_notify_post; } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_PRE_NOTIFY, pcmk__str_none)) { bit = cib_notify_pre; } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_UPDATE_CONFIRMATION, pcmk__str_none)) { bit = cib_notify_confirm; } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_DIFF_NOTIFY, pcmk__str_none)) { bit = cib_notify_diff; } else { status = CRM_EX_INVALID_PARAM; } if (bit != 0) { if (on_off) { pcmk__set_client_flags(cib_client, bit); } else { pcmk__clear_client_flags(cib_client, bit); } } pcmk__ipc_send_ack(cib_client, id, flags, PCMK__XE_ACK, NULL, status); return; } cib_process_request(op_request, privileged, cib_client); } int32_t cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean privileged) { uint32_t id = 0; uint32_t flags = 0; uint32_t call_options = cib_none; pcmk__client_t *cib_client = pcmk__find_client(c); xmlNode *op_request = pcmk__client_data2xml(cib_client, data, &id, &flags); if (op_request) { int rc = pcmk_rc_ok; rc = pcmk__xe_get_flags(op_request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } } if (op_request == NULL) { crm_trace("Invalid message from %p", c); pcmk__ipc_send_ack(cib_client, id, flags, PCMK__XE_NACK, NULL, CRM_EX_PROTOCOL); return 0; } else if(cib_client == NULL) { crm_trace("Invalid client %p", c); return 0; } if (pcmk_is_set(call_options, cib_sync_call)) { CRM_LOG_ASSERT(flags & crm_ipc_client_response); CRM_LOG_ASSERT(cib_client->request_id == 0); /* This means the client has two synchronous events in-flight */ cib_client->request_id = id; /* Reply only to the last one */ } if (cib_client->name == NULL) { const char *value = crm_element_value(op_request, PCMK__XA_CIB_CLIENTNAME); if (value == NULL) { cib_client->name = pcmk__itoa(cib_client->pid); } else { cib_client->name = pcmk__str_copy(value); if (pcmk__parse_server(value) != pcmk_ipc_unknown) { pcmk__set_client_flags(cib_client, cib_is_daemon); } } } /* Allow cluster daemons more leeway before being evicted */ if (pcmk_is_set(cib_client->flags, cib_is_daemon)) { const char *qmax = cib_config_lookup(PCMK_OPT_CLUSTER_IPC_LIMIT); pcmk__set_client_queue_max(cib_client, qmax); } crm_xml_add(op_request, PCMK__XA_CIB_CLIENTID, cib_client->id); crm_xml_add(op_request, PCMK__XA_CIB_CLIENTNAME, cib_client->name); CRM_LOG_ASSERT(cib_client->user != NULL); pcmk__update_acl_user(op_request, PCMK__XA_CIB_USER, cib_client->user); cib_common_callback_worker(id, flags, op_request, cib_client, privileged); pcmk__xml_free(op_request); return 0; } static uint64_t ping_seq = 0; static char *ping_digest = NULL; static bool ping_modified_since = FALSE; static gboolean cib_digester_cb(gpointer data) { if (based_is_primary) { char buffer[32]; xmlNode *ping = pcmk__xe_create(NULL, PCMK__XE_PING); ping_seq++; free(ping_digest); ping_digest = NULL; ping_modified_since = FALSE; snprintf(buffer, 32, "%" PRIu64, ping_seq); crm_trace("Requesting peer digests (%s)", buffer); crm_xml_add(ping, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(ping, PCMK__XA_CIB_OP, CRM_OP_PING); crm_xml_add(ping, PCMK__XA_CIB_PING_ID, buffer); crm_xml_add(ping, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); pcmk__cluster_send_message(NULL, pcmk_ipc_based, ping); pcmk__xml_free(ping); } return FALSE; } static void process_ping_reply(xmlNode *reply) { uint64_t seq = 0; const char *host = crm_element_value(reply, PCMK__XA_SRC); xmlNode *wrapper = pcmk__xe_first_child(reply, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *pong = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *seq_s = crm_element_value(pong, PCMK__XA_CIB_PING_ID); const char *digest = crm_element_value(pong, PCMK__XA_DIGEST); if (seq_s == NULL) { crm_debug("Ignoring ping reply with no " PCMK__XA_CIB_PING_ID); return; } else { long long seq_ll; int rc = pcmk__scan_ll(seq_s, &seq_ll, 0LL); if (rc != pcmk_rc_ok) { crm_debug("Ignoring ping reply with invalid " PCMK__XA_CIB_PING_ID " '%s': %s", seq_s, pcmk_rc_str(rc)); return; } seq = (uint64_t) seq_ll; } if(digest == NULL) { crm_trace("Ignoring ping reply %s from %s with no digest", seq_s, host); } else if(seq != ping_seq) { crm_trace("Ignoring out of sequence ping reply %s from %s", seq_s, host); } else if(ping_modified_since) { crm_trace("Ignoring ping reply %s from %s: cib updated since", seq_s, host); } else { if(ping_digest == NULL) { crm_trace("Calculating new digest"); ping_digest = pcmk__digest_xml(the_cib, true); } crm_trace("Processing ping reply %s from %s (%s)", seq_s, host, digest); if (!pcmk__str_eq(ping_digest, digest, pcmk__str_casei)) { xmlNode *wrapper = pcmk__xe_first_child(pong, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *remote_cib = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *admin_epoch_s = NULL; const char *epoch_s = NULL; const char *num_updates_s = NULL; if (remote_cib != NULL) { admin_epoch_s = crm_element_value(remote_cib, PCMK_XA_ADMIN_EPOCH); epoch_s = crm_element_value(remote_cib, PCMK_XA_EPOCH); num_updates_s = crm_element_value(remote_cib, PCMK_XA_NUM_UPDATES); } crm_notice("Local CIB %s.%s.%s.%s differs from %s: %s.%s.%s.%s %p", crm_element_value(the_cib, PCMK_XA_ADMIN_EPOCH), crm_element_value(the_cib, PCMK_XA_EPOCH), crm_element_value(the_cib, PCMK_XA_NUM_UPDATES), ping_digest, host, pcmk__s(admin_epoch_s, "_"), pcmk__s(epoch_s, "_"), pcmk__s(num_updates_s, "_"), digest, remote_cib); if(remote_cib && remote_cib->children) { // Additional debug xml_calculate_changes(the_cib, remote_cib); pcmk__log_xml_changes(LOG_INFO, remote_cib); crm_trace("End of differences"); } pcmk__xml_free(remote_cib); sync_our_cib(reply, FALSE); } } } static void parse_local_options(const pcmk__client_t *cib_client, const cib__operation_t *operation, const char *host, const char *op, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { // Process locally and notify local client *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; if (pcmk_is_set(operation->flags, cib__op_attr_local)) { /* Always process locally if cib__op_attr_local is set. * * @COMPAT: Currently host is ignored. At a compatibility break, throw * an error (from cib_process_request() or earlier) if host is not NULL or * OUR_NODENAME. */ crm_trace("Processing always-local %s op from client %s", op, pcmk__client_name(cib_client)); if (!pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei|pcmk__str_null_matches)) { crm_warn("Operation '%s' is always local but its target host is " "set to '%s'", op, host); } return; } if (pcmk_is_set(operation->flags, cib__op_attr_modifies) || !pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei|pcmk__str_null_matches)) { // Forward modifying and non-local requests via cluster *process = FALSE; *needs_reply = FALSE; *local_notify = FALSE; *needs_forward = TRUE; crm_trace("%s op from %s needs to be forwarded to %s", op, pcmk__client_name(cib_client), pcmk__s(host, "all nodes")); return; } if (stand_alone) { crm_trace("Processing %s op from client %s (stand-alone)", op, pcmk__client_name(cib_client)); } else { crm_trace("Processing %saddressed %s op from client %s", ((host != NULL)? "locally " : "un"), op, pcmk__client_name(cib_client)); } } static gboolean parse_peer_options(const cib__operation_t *operation, xmlNode *request, gboolean *local_notify, gboolean *needs_reply, gboolean *process) { /* TODO: What happens when an update comes in after node A * requests the CIB from node B, but before it gets the reply (and * sends out the replace operation)? * * (This may no longer be relevant since legacy mode was dropped; need to * trace code more closely to check.) */ const char *host = NULL; const char *delegated = crm_element_value(request, PCMK__XA_CIB_DELEGATED_FROM); const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *originator = crm_element_value(request, PCMK__XA_SRC); const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); gboolean is_reply = pcmk__str_eq(reply_to, OUR_NODENAME, pcmk__str_casei); if (originator == NULL) { // Shouldn't be possible originator = "peer"; } if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) { // sync_our_cib() sets PCMK__XA_CIB_ISREPLYTO if (reply_to) { delegated = reply_to; } goto skip_is_reply; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SYNC_TO_ALL, pcmk__str_none)) { // Nothing to do } else if (is_reply && pcmk__str_eq(op, CRM_OP_PING, pcmk__str_casei)) { process_ping_reply(request); return FALSE; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_UPGRADE, pcmk__str_none)) { /* Only the DC (node with the oldest software) should process * this operation if PCMK__XA_CIB_SCHEMA_MAX is unset. * * If the DC is happy it will then send out another * PCMK__CIB_REQUEST_UPGRADE which will tell all nodes to do the actual * upgrade. * * Except this time PCMK__XA_CIB_SCHEMA_MAX will be set which puts a * limit on how far newer nodes will go */ const char *max = crm_element_value(request, PCMK__XA_CIB_SCHEMA_MAX); const char *upgrade_rc = crm_element_value(request, PCMK__XA_CIB_UPGRADE_RC); crm_trace("Parsing upgrade %s for %s with max=%s and upgrade_rc=%s", (is_reply? "reply" : "request"), (based_is_primary? "primary" : "secondary"), pcmk__s(max, "none"), pcmk__s(upgrade_rc, "none")); if (upgrade_rc != NULL) { // Our upgrade request was rejected by DC, notify clients of result crm_xml_add(request, PCMK__XA_CIB_RC, upgrade_rc); } else if ((max == NULL) && based_is_primary) { /* We are the DC, check if this upgrade is allowed */ goto skip_is_reply; } else if(max) { /* Ok, go ahead and upgrade to 'max' */ goto skip_is_reply; } else { // Ignore broadcast client requests when we're not primary return FALSE; } } else if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { crm_info("Detected legacy %s global update from %s", op, originator); send_sync_request(NULL); return FALSE; } else if (is_reply && pcmk_is_set(operation->flags, cib__op_attr_modifies)) { crm_trace("Ignoring legacy %s reply sent from %s to local clients", op, originator); return FALSE; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) { *local_notify = FALSE; if (reply_to == NULL) { *process = TRUE; } else { // Not possible? crm_debug("Ignoring shutdown request from %s because reply_to=%s", originator, reply_to); } return *process; } if (is_reply) { crm_trace("Will notify local clients for %s reply from %s", op, originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } skip_is_reply: *process = TRUE; *needs_reply = FALSE; *local_notify = pcmk__str_eq(delegated, OUR_NODENAME, pcmk__str_casei); host = crm_element_value(request, PCMK__XA_CIB_HOST); if (pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei)) { crm_trace("Processing %s request sent to us from %s", op, originator); *needs_reply = TRUE; return TRUE; } else if (host != NULL) { crm_trace("Ignoring %s request intended for CIB manager on %s", op, host); return FALSE; } else if(is_reply == FALSE && pcmk__str_eq(op, CRM_OP_PING, pcmk__str_casei)) { *needs_reply = TRUE; } crm_trace("Processing %s request broadcast by %s call %s on %s " "(local clients will%s be notified)", op, pcmk__s(crm_element_value(request, PCMK__XA_CIB_CLIENTNAME), "client"), pcmk__s(crm_element_value(request, PCMK__XA_CIB_CALLID), "without ID"), originator, (*local_notify? "" : "not")); return TRUE; } /*! * \internal * \brief Forward a CIB request to the appropriate target host(s) * * \param[in] request CIB request to forward */ static void forward_request(xmlNode *request) { const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *section = crm_element_value(request, PCMK__XA_CIB_SECTION); const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const char *originator = crm_element_value(request, PCMK__XA_SRC); const char *client_name = crm_element_value(request, PCMK__XA_CIB_CLIENTNAME); const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); pcmk__node_status_t *peer = NULL; int log_level = LOG_INFO; if (pcmk__str_eq(op, PCMK__CIB_REQUEST_NOOP, pcmk__str_none)) { log_level = LOG_DEBUG; } do_crm_log(log_level, "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)", pcmk__s(op, "invalid"), pcmk__s(section, "all"), pcmk__s(host, "all"), pcmk__s(originator, "local"), pcmk__s(client_name, "unspecified"), pcmk__s(call_id, "unspecified")); crm_xml_add(request, PCMK__XA_CIB_DELEGATED_FROM, OUR_NODENAME); if (host != NULL) { peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); } pcmk__cluster_send_message(peer, pcmk_ipc_based, request); // Return the request to its original state pcmk__xe_remove_attr(request, PCMK__XA_CIB_DELEGATED_FROM); } static void send_peer_reply(xmlNode *msg, const char *originator) { const pcmk__node_status_t *node = NULL; if ((msg == NULL) || (originator == NULL)) { return; } // Send reply via cluster to originating node node = pcmk__get_node(0, originator, NULL, pcmk__node_search_cluster_member); crm_trace("Sending request result to %s only", originator); crm_xml_add(msg, PCMK__XA_CIB_ISREPLYTO, originator); pcmk__cluster_send_message(node, pcmk_ipc_based, msg); } /*! * \internal * \brief Handle an IPC or CPG message containing a request * * \param[in,out] request Request XML * \param[in] privileged Whether privileged commands may be run * (see cib_server_ops[] definition) * \param[in] cib_client IPC client that sent request (or NULL if CPG) * * \return Legacy Pacemaker return code */ int cib_process_request(xmlNode *request, gboolean privileged, const pcmk__client_t *cib_client) { // @TODO: Break into multiple smaller functions uint32_t call_options = cib_none; gboolean process = TRUE; // Whether to process request locally now gboolean is_update = TRUE; // Whether request would modify CIB gboolean needs_reply = TRUE; // Whether to build a reply gboolean local_notify = FALSE; // Whether to notify (local) requester gboolean needs_forward = FALSE; // Whether to forward request somewhere else xmlNode *op_reply = NULL; xmlNode *result_diff = NULL; int rc = pcmk_ok; const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *originator = crm_element_value(request, PCMK__XA_SRC); const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); const char *client_id = crm_element_value(request, PCMK__XA_CIB_CLIENTID); const char *client_name = crm_element_value(request, PCMK__XA_CIB_CLIENTNAME); const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); const cib__operation_t *operation = NULL; cib__op_fn_t op_function = NULL; rc = pcmk__xe_get_flags(request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } if ((host != NULL) && (*host == '\0')) { host = NULL; } if (cib_client == NULL) { crm_trace("Processing peer %s operation from %s/%s on %s intended for %s (reply=%s)", op, pcmk__s(client_name, "client"), call_id, originator, pcmk__s(host, "all"), reply_to); } else { crm_xml_add(request, PCMK__XA_SRC, OUR_NODENAME); crm_trace("Processing local %s operation from %s/%s intended for %s", op, pcmk__s(client_name, "client"), call_id, pcmk__s(host, "all")); } rc = cib__get_operation(op, &operation); rc = pcmk_rc2legacy(rc); if (rc != pcmk_ok) { /* TODO: construct error reply? */ crm_err("Pre-processing of command failed: %s", pcmk_strerror(rc)); return rc; } op_function = based_get_op_function(operation); if (op_function == NULL) { crm_err("Operation %s not supported by CIB manager", op); return -EOPNOTSUPP; } if (cib_client != NULL) { parse_local_options(cib_client, operation, host, op, &local_notify, &needs_reply, &process, &needs_forward); } else if (!parse_peer_options(operation, request, &local_notify, &needs_reply, &process)) { return rc; } if (pcmk_is_set(call_options, cib_transaction)) { /* All requests in a transaction are processed locally against a working * CIB copy, and we don't notify for individual requests because the * entire transaction is atomic. * * We still call the option parser functions above, for the sake of log * messages and checking whether we're the target for peer requests. */ process = TRUE; needs_reply = FALSE; local_notify = FALSE; needs_forward = FALSE; } is_update = pcmk_is_set(operation->flags, cib__op_attr_modifies); if (pcmk_is_set(call_options, cib_discard_reply)) { /* If the request will modify the CIB, and we are in legacy mode, we * need to build a reply so we can broadcast a diff, even if the * requester doesn't want one. */ needs_reply = FALSE; local_notify = FALSE; crm_trace("Client is not interested in the reply"); } if (needs_forward) { forward_request(request); return rc; } if (cib_status != pcmk_ok) { rc = cib_status; crm_err("Ignoring request because cluster configuration is invalid " "(please repair and restart): %s", pcmk_strerror(rc)); op_reply = create_cib_reply(op, call_id, client_id, call_options, rc, the_cib); } else if (process) { time_t finished = 0; time_t now = time(NULL); int level = LOG_INFO; const char *section = crm_element_value(request, PCMK__XA_CIB_SECTION); const char *admin_epoch_s = NULL; const char *epoch_s = NULL; const char *num_updates_s = NULL; rc = cib_process_command(request, operation, op_function, &op_reply, &result_diff, privileged); if (!is_update) { level = LOG_TRACE; } else if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { switch (rc) { case pcmk_ok: level = LOG_INFO; break; case -pcmk_err_old_data: case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: level = LOG_TRACE; break; default: level = LOG_ERR; } } else if (rc != pcmk_ok) { level = LOG_WARNING; } if (the_cib != NULL) { admin_epoch_s = crm_element_value(the_cib, PCMK_XA_ADMIN_EPOCH); epoch_s = crm_element_value(the_cib, PCMK_XA_EPOCH); num_updates_s = crm_element_value(the_cib, PCMK_XA_NUM_UPDATES); } do_crm_log(level, "Completed %s operation for section %s: %s (rc=%d, origin=%s/%s/%s, version=%s.%s.%s)", op, section ? section : "'all'", pcmk_strerror(rc), rc, originator ? originator : "local", pcmk__s(client_name, "client"), call_id, pcmk__s(admin_epoch_s, "0"), pcmk__s(epoch_s, "0"), pcmk__s(num_updates_s, "0")); finished = time(NULL); if ((finished - now) > 3) { crm_trace("%s operation took %lds to complete", op, (long)(finished - now)); crm_write_blackbox(0, NULL); } if (op_reply == NULL && (needs_reply || local_notify)) { crm_err("Unexpected NULL reply to message"); crm_log_xml_err(request, "null reply"); needs_reply = FALSE; local_notify = FALSE; } } if (is_update) { crm_trace("Completed pre-sync update from %s/%s/%s%s", originator ? originator : "local", pcmk__s(client_name, "client"), call_id, local_notify?" with local notification":""); } else if (!needs_reply || stand_alone) { // This was a non-originating secondary update crm_trace("Completed update as secondary"); } else if ((cib_client == NULL) && !pcmk_is_set(call_options, cib_discard_reply)) { if (is_update == FALSE || result_diff == NULL) { crm_trace("Request not broadcast: R/O call"); } else if (rc != pcmk_ok) { crm_trace("Request not broadcast: call failed: %s", pcmk_strerror(rc)); } else { crm_trace("Directing reply to %s", originator); } send_peer_reply(op_reply, originator); } if (local_notify && client_id) { crm_trace("Performing local %ssync notification for %s", (pcmk_is_set(call_options, cib_sync_call)? "" : "a"), client_id); if (process == FALSE) { do_local_notify(request, client_id, pcmk_is_set(call_options, cib_sync_call), (cib_client == NULL)); } else { do_local_notify(op_reply, client_id, pcmk_is_set(call_options, cib_sync_call), (cib_client == NULL)); } } pcmk__xml_free(op_reply); pcmk__xml_free(result_diff); return rc; } /*! * \internal * \brief Get a CIB operation's input from the request XML * * \param[in] request CIB request XML * \param[in] type CIB operation type * \param[out] section Where to store CIB section name * * \return Input XML for CIB operation * * \note If not \c NULL, the return value is a non-const pointer to part of * \p request. The caller should not free it directly. */ static xmlNode * prepare_input(const xmlNode *request, enum cib__op_type type, const char **section) { xmlNode *wrapper = pcmk__xe_first_child(request, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *input = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (type == cib__op_apply_patch) { *section = NULL; } else { *section = crm_element_value(request, PCMK__XA_CIB_SECTION); } // Grab the specified section if ((*section != NULL) && pcmk__xe_is(input, PCMK_XE_CIB)) { input = pcmk_find_cib_element(input, *section); } return input; } #define XPATH_CONFIG_CHANGE \ "//" PCMK_XE_CHANGE \ "[contains(@" PCMK_XA_PATH ",'/" PCMK_XE_CRM_CONFIG "/')]" static bool contains_config_change(xmlNode *diff) { bool changed = false; if (diff) { xmlXPathObject *xpathObj = pcmk__xpath_search(diff->doc, XPATH_CONFIG_CHANGE); if (pcmk__xpath_num_results(xpathObj) > 0) { changed = true; } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } return changed; } static int cib_process_command(xmlNode *request, const cib__operation_t *operation, cib__op_fn_t op_function, xmlNode **reply, xmlNode **cib_diff, bool privileged) { xmlNode *input = NULL; xmlNode *output = NULL; xmlNode *result_cib = NULL; uint32_t call_options = cib_none; const char *op = NULL; const char *section = NULL; const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); const char *client_id = crm_element_value(request, PCMK__XA_CIB_CLIENTID); const char *client_name = crm_element_value(request, PCMK__XA_CIB_CLIENTNAME); const char *originator = crm_element_value(request, PCMK__XA_SRC); int rc = pcmk_ok; bool config_changed = false; bool manage_counters = true; static mainloop_timer_t *digest_timer = NULL; pcmk__assert(cib_status == pcmk_ok); if(digest_timer == NULL) { digest_timer = mainloop_timer_add("digester", 5000, FALSE, cib_digester_cb, NULL); } *reply = NULL; *cib_diff = NULL; /* Start processing the request... */ op = crm_element_value(request, PCMK__XA_CIB_OP); rc = pcmk__xe_get_flags(request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } if (!privileged && pcmk_is_set(operation->flags, cib__op_attr_privileged)) { rc = -EACCES; crm_trace("Failed due to lack of privileges: %s", pcmk_strerror(rc)); goto done; } input = prepare_input(request, operation->type, §ion); if (!pcmk_is_set(operation->flags, cib__op_attr_modifies)) { rc = cib_perform_op(NULL, op, call_options, op_function, true, section, request, input, false, &config_changed, &the_cib, &result_cib, NULL, &output); CRM_CHECK(result_cib == NULL, pcmk__xml_free(result_cib)); goto done; } /* @COMPAT: Handle a valid write action (legacy) * * @TODO: Re-evaluate whether this is all truly legacy. The cib_force_diff * portion is. However, PCMK__XA_CIB_UPDATE may be set by a sync operation * even in non-legacy mode, and manage_counters tells xml_create_patchset() * whether to update version/epoch info. */ if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { manage_counters = false; cib__set_call_options(call_options, "call", cib_force_diff); crm_trace("Global update detected"); CRM_LOG_ASSERT(pcmk__str_any_of(op, PCMK__CIB_REQUEST_APPLY_PATCH, PCMK__CIB_REQUEST_REPLACE, NULL)); } ping_modified_since = TRUE; // result_cib must not be modified after cib_perform_op() returns rc = cib_perform_op(NULL, op, call_options, op_function, false, section, request, input, manage_counters, &config_changed, &the_cib, &result_cib, cib_diff, &output); /* Always write to disk for successful ops with the flag set. This also * negates the need to detect ordering changes. */ if ((rc == pcmk_ok) && pcmk_is_set(operation->flags, cib__op_attr_writes_through)) { config_changed = true; } if ((rc == pcmk_ok) && !pcmk_any_flags_set(call_options, cib_dryrun|cib_transaction)) { if (result_cib != the_cib) { if (pcmk_is_set(operation->flags, cib__op_attr_writes_through)) { config_changed = true; } crm_trace("Activating %s->%s%s", crm_element_value(the_cib, PCMK_XA_NUM_UPDATES), crm_element_value(result_cib, PCMK_XA_NUM_UPDATES), (config_changed? " changed" : "")); rc = activateCibXml(result_cib, config_changed, op); if (rc != pcmk_ok) { crm_err("Failed to activate new CIB: %s", pcmk_strerror(rc)); } } if ((rc == pcmk_ok) && contains_config_change(*cib_diff)) { cib_read_config(config_hash, result_cib); } /* @COMPAT Nodes older than feature set 3.19.0 don't support * transactions. In a mixed-version cluster with nodes <3.19.0, we must * sync the updated CIB, so that the older nodes receive the changes. * Any node that has already applied the transaction will ignore the * synced CIB. * * To ensure the updated CIB is synced from only one node, we sync it * from the originator. */ if ((operation->type == cib__op_commit_transact) && pcmk__str_eq(originator, OUR_NODENAME, pcmk__str_casei) && compare_version(crm_element_value(the_cib, PCMK_XA_CRM_FEATURE_SET), "3.19.0") < 0) { sync_our_cib(request, TRUE); } mainloop_timer_stop(digest_timer); mainloop_timer_start(digest_timer); } else if (rc == -pcmk_err_schema_validation) { pcmk__assert(result_cib != the_cib); if (output != NULL) { crm_log_xml_info(output, "cib:output"); pcmk__xml_free(output); } output = result_cib; } else { crm_trace("Not activating %d %d %s", rc, pcmk_is_set(call_options, cib_dryrun), crm_element_value(result_cib, PCMK_XA_NUM_UPDATES)); if (result_cib != the_cib) { pcmk__xml_free(result_cib); } } if (!pcmk_any_flags_set(call_options, cib_dryrun|cib_inhibit_notify|cib_transaction)) { crm_trace("Sending notifications %d", pcmk_is_set(call_options, cib_dryrun)); cib_diff_notify(op, rc, call_id, client_id, client_name, originator, input, *cib_diff); } pcmk__log_xml_patchset(LOG_TRACE, *cib_diff); done: if (!pcmk_is_set(call_options, cib_discard_reply)) { *reply = create_cib_reply(op, call_id, client_id, call_options, rc, output); } if (output != the_cib) { pcmk__xml_free(output); } crm_trace("done"); return rc; } void cib_peer_callback(xmlNode * msg, void *private_data) { const char *reason = NULL; const char *originator = crm_element_value(msg, PCMK__XA_SRC); if (pcmk__peer_cache == NULL) { reason = "membership not established"; goto bail; } if (crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME) == NULL) { crm_xml_add(msg, PCMK__XA_CIB_CLIENTNAME, originator); } /* crm_log_xml_trace(msg, "Peer[inbound]"); */ cib_process_request(msg, TRUE, NULL); return; bail: if (reason) { const char *op = crm_element_value(msg, PCMK__XA_CIB_OP); crm_warn("Discarding %s message from %s: %s", op, originator, reason); } } static gboolean cib_force_exit(gpointer data) { crm_notice("Exiting immediately after %s without shutdown acknowledgment", pcmk__readable_interval(EXIT_ESCALATION_MS)); terminate_cib(CRM_EX_ERROR); return FALSE; } static void disconnect_remote_client(gpointer key, gpointer value, gpointer user_data) { pcmk__client_t *a_client = value; crm_err("Can't disconnect client %s: Not implemented", pcmk__client_name(a_client)); } static void initiate_exit(void) { int active = 0; xmlNode *leaving = NULL; active = pcmk__cluster_num_active_nodes(); if (active < 2) { // This is the last active node crm_info("Exiting without sending shutdown request (no active peers)"); terminate_cib(CRM_EX_OK); return; } crm_info("Sending shutdown request to %d peers", active); leaving = pcmk__xe_create(NULL, PCMK__XE_EXIT_NOTIFICATION); crm_xml_add(leaving, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(leaving, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_SHUTDOWN); pcmk__cluster_send_message(NULL, pcmk_ipc_based, leaving); pcmk__xml_free(leaving); pcmk__create_timer(EXIT_ESCALATION_MS, cib_force_exit, NULL); } void cib_shutdown(int nsig) { struct qb_ipcs_stats srv_stats; if (cib_shutdown_flag == FALSE) { int disconnects = 0; qb_ipcs_connection_t *c = NULL; cib_shutdown_flag = TRUE; c = qb_ipcs_connection_first_get(ipcs_rw); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_rw, last); crm_debug("Disconnecting r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_ro); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_ro, last); crm_debug("Disconnecting r/o client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_shm); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_shm, last); crm_debug("Disconnecting non-blocking r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } disconnects += pcmk__ipc_client_count(); crm_debug("Disconnecting %d remote clients", pcmk__ipc_client_count()); pcmk__foreach_ipc_client(disconnect_remote_client, NULL); crm_info("Disconnected %d clients", disconnects); } qb_ipcs_stats_get(ipcs_rw, &srv_stats, QB_FALSE); if (pcmk__ipc_client_count() == 0) { crm_info("All clients disconnected (%d)", srv_stats.active_connections); initiate_exit(); } else { crm_info("Waiting on %d clients to disconnect (%d)", pcmk__ipc_client_count(), srv_stats.active_connections); } } extern int remote_fd; extern int remote_tls_fd; /*! * \internal * \brief Close remote sockets, free the global CIB and quit * * \param[in] exit_status What exit status to use (if -1, use CRM_EX_OK, but * skip disconnecting from the cluster layer) */ void terminate_cib(int exit_status) { if (remote_fd > 0) { close(remote_fd); remote_fd = 0; } if (remote_tls_fd > 0) { close(remote_tls_fd); remote_tls_fd = 0; } uninitializeCib(); // Exit immediately on error if (exit_status > CRM_EX_OK) { pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(exit_status); return; } if ((mainloop != NULL) && g_main_loop_is_running(mainloop)) { /* Quit via returning from the main loop. If exit_status has the special * value -1, we skip the disconnect here, and it will be done when the * main loop returns (this allows the peer status callback to avoid * messing with the peer caches). */ if (exit_status == CRM_EX_OK) { pcmk_cluster_disconnect(crm_cluster); } g_main_loop_quit(mainloop); return; } /* Exit cleanly. Even the peer status callback can disconnect here, because * we're not returning control to the caller. */ pcmk_cluster_disconnect(crm_cluster); pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(CRM_EX_OK); } diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c index 1fa2fe5543..a5e63ff092 100644 --- a/daemons/controld/controld_remote_ra.c +++ b/daemons/controld/controld_remote_ra.c @@ -1,1492 +1,1492 @@ /* * Copyright 2013-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #define REMOTE_LRMD_RA "remote" /* The max start timeout before cmd retry */ #define MAX_START_TIMEOUT_MS 10000 #define cmd_set_flags(cmd, flags_to_set) do { \ (cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Remote command", (cmd)->rsc_id, (cmd)->status, \ (flags_to_set), #flags_to_set); \ } while (0) #define cmd_clear_flags(cmd, flags_to_clear) do { \ (cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Remote command", (cmd)->rsc_id, (cmd)->status, \ (flags_to_clear), #flags_to_clear); \ } while (0) enum remote_cmd_status { cmd_reported_success = (1 << 0), cmd_cancel = (1 << 1), }; typedef struct remote_ra_cmd_s { /*! the local node the cmd is issued from */ char *owner; /*! the remote node the cmd is executed on */ char *rsc_id; /*! the action to execute */ char *action; /*! some string the client wants us to give it back */ char *userdata; /*! start delay in ms */ int start_delay; /*! timer id used for start delay. */ int delay_id; /*! timeout in ms for cmd */ int timeout; /*! recurring interval in ms */ guint interval_ms; /*! interval timer id */ int interval_id; int monitor_timeout_id; int takeover_timeout_id; /*! action parameters */ lrmd_key_value_t *params; pcmk__action_result_t result; int call_id; time_t start_time; uint32_t status; } remote_ra_cmd_t; #define lrm_remote_set_flags(lrm_state, flags_to_set) do { \ lrm_state_t *lrm = (lrm_state); \ remote_ra_data_t *ra = lrm->remote_ra_data; \ ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \ lrm->node_name, ra->status, \ (flags_to_set), #flags_to_set); \ } while (0) #define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \ lrm_state_t *lrm = (lrm_state); \ remote_ra_data_t *ra = lrm->remote_ra_data; \ ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \ lrm->node_name, ra->status, \ (flags_to_clear), #flags_to_clear); \ } while (0) enum remote_status { expect_takeover = (1 << 0), takeover_complete = (1 << 1), remote_active = (1 << 2), /* Maintenance mode is difficult to determine from the controller's context, * so we have it signalled back with the transition from the scheduler. */ remote_in_maint = (1 << 3), /* Similar for whether we are controlling a guest node or remote node. * Fortunately there is a meta-attribute in the transition already and * as the situation doesn't change over time we can use the * resource start for noting down the information for later use when * the attributes aren't at hand. */ controlling_guest = (1 << 4), }; typedef struct remote_ra_data_s { crm_trigger_t *work; remote_ra_cmd_t *cur_cmd; GList *cmds; GList *recurring_cmds; uint32_t status; } remote_ra_data_t; static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms); static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd); static GList *fail_all_monitor_cmds(GList * list); static void free_cmd(gpointer user_data) { remote_ra_cmd_t *cmd = user_data; if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->interval_id) { g_source_remove(cmd->interval_id); } if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); } if (cmd->takeover_timeout_id) { g_source_remove(cmd->takeover_timeout_id); } free(cmd->owner); free(cmd->rsc_id); free(cmd->action); free(cmd->userdata); pcmk__reset_result(&(cmd->result)); lrmd_key_value_freeall(cmd->params); free(cmd); } static int generate_callid(void) { static int remote_ra_callid = 0; remote_ra_callid++; if (remote_ra_callid <= 0) { remote_ra_callid = 1; } return remote_ra_callid; } static gboolean recurring_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->interval_id = 0; connection_rsc = controld_get_executor_state(cmd->rsc_id, false); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd); ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); } return FALSE; } static gboolean start_delay_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->delay_id = 0; connection_rsc = controld_get_executor_state(cmd->rsc_id, false); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; mainloop_set_trigger(ra_data->work); } return FALSE; } static bool should_purge_attributes(pcmk__node_status_t *node) { pcmk__node_status_t *conn_node = NULL; lrm_state_t *connection_rsc = NULL; if ((node->conn_host == NULL) || (node->name == NULL)) { return true; } /* Get the node that was hosting the remote connection resource from the * peer cache. That's the one we really care about here. */ conn_node = pcmk__get_node(0, node->conn_host, NULL, pcmk__node_search_cluster_member); if (conn_node == NULL) { return true; } /* Check the uptime of connection_rsc. If it hasn't been running long * enough, set purge=true. "Long enough" means it started running earlier * than the timestamp when we noticed it went away in the first place. */ connection_rsc = controld_get_executor_state(node->name, false); if (connection_rsc != NULL) { lrmd_t *lrm = connection_rsc->conn; time_t uptime = lrmd__uptime(lrm); time_t now = time(NULL); /* Add 20s of fuzziness to give corosync a while to notice the remote * host is gone. On various error conditions (failure to get uptime, * peer_lost isn't set) we default to purging. */ if (uptime > 0 && conn_node->peer_lost > 0 && uptime + 20 >= now - conn_node->peer_lost) { return false; } } return true; } static enum controld_section_e section_to_delete(bool purge) { if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { if (purge) { return controld_section_all_unlocked; } else { return controld_section_lrm_unlocked; } } else { if (purge) { return controld_section_all; } else { return controld_section_lrm; } } } static void purge_remote_node_attrs(int call_opt, pcmk__node_status_t *node) { bool purge = should_purge_attributes(node); enum controld_section_e section = section_to_delete(purge); /* Purge node from attrd's memory */ if (purge) { update_attrd_remote_node_removed(node->name, NULL); } controld_delete_node_state(node->name, section, call_opt); } /*! * \internal * \brief Handle cluster communication related to pacemaker_remote node joining * * \param[in] node_name Name of newly integrated pacemaker_remote node */ static void remote_node_up(const char *node_name) { int call_opt; xmlNode *update, *state; pcmk__node_status_t *node = NULL; lrm_state_t *connection_rsc = NULL; CRM_CHECK(node_name != NULL, return); crm_info("Announcing Pacemaker Remote node %s", node_name); call_opt = crmd_cib_smart_opt(); /* Delete node's CRM_OP_PROBED attribute. Deleting any attribute ensures * that the attribute manager learns the node is remote. Deletion of this * specfic attribute is a holdover from when it had special meaning. * * @COMPAT Find another way to tell attrd that the node is remote, without * risking deletion or overwrite of an arbitrary attribute. Then work on * deprecating CRM_OP_PROBED. */ update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE); /* Ensure node is in the remote peer cache with member status */ node = pcmk__cluster_lookup_remote_node(node_name); CRM_CHECK((node != NULL) && (node->name != NULL), return); purge_remote_node_attrs(call_opt, node); pcmk__update_peer_state(__func__, node, PCMK_VALUE_MEMBER, 0); /* Apply any start state that we were given from the environment on the * remote node. */ connection_rsc = controld_get_executor_state(node->name, false); if (connection_rsc != NULL) { lrmd_t *lrm = connection_rsc->conn; const char *start_state = lrmd__node_start_state(lrm); if (start_state) { set_join_state(start_state, node->name, node->xml_id, true); } } /* pacemaker_remote nodes don't participate in the membership layer, * so cluster nodes don't automatically get notified when they come and go. * We send a cluster message to the DC, and update the CIB node state entry, * so the DC will get it sooner (via message) or later (via CIB refresh), * and any other interested parties can query the CIB. */ broadcast_remote_state_message(node_name, true); update = pcmk__xe_create(NULL, PCMK_XE_STATUS); state = create_node_state_update(node, node_update_cluster, update, __func__); /* Clear the PCMK__XA_NODE_FENCED flag in the node state. If the node ever * needs to be fenced, this flag will allow various actions to determine * whether the fencing has happened yet. */ crm_xml_add(state, PCMK__XA_NODE_FENCED, "0"); /* TODO: If the remote connection drops, and this (async) CIB update either * failed or has not yet completed, later actions could mistakenly think the * node has already been fenced (if the PCMK__XA_NODE_FENCED attribute was * previously set, because it won't have been cleared). This could prevent * actual fencing or allow recurring monitor failures to be cleared too * soon. Ideally, we wouldn't rely on the CIB for the fenced status. */ controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL); pcmk__xml_free(update); } enum down_opts { DOWN_KEEP_LRM, DOWN_ERASE_LRM }; /*! * \internal * \brief Handle cluster communication related to pacemaker_remote node leaving * * \param[in] node_name Name of lost node * \param[in] opts Whether to keep or erase LRM history */ static void remote_node_down(const char *node_name, const enum down_opts opts) { xmlNode *update; int call_opt = crmd_cib_smart_opt(); pcmk__node_status_t *node = NULL; /* Purge node from attrd's memory */ update_attrd_remote_node_removed(node_name, NULL); /* Normally, only node attributes should be erased, and the resource history * should be kept until the node comes back up. However, after a successful * fence, we want to clear the history as well, so we don't think resources * are still running on the node. */ if (opts == DOWN_ERASE_LRM) { controld_delete_node_state(node_name, controld_section_all, call_opt); } else { controld_delete_node_state(node_name, controld_section_attrs, call_opt); } /* Ensure node is in the remote peer cache with lost state */ node = pcmk__cluster_lookup_remote_node(node_name); CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, PCMK__VALUE_LOST, 0); /* Notify DC */ broadcast_remote_state_message(node_name, false); /* Update CIB node state */ update = pcmk__xe_create(NULL, PCMK_XE_STATUS); create_node_state_update(node, node_update_cluster, update, __func__); controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL); pcmk__xml_free(update); } /*! * \internal * \brief Handle effects of a remote RA command on node state * * \param[in] cmd Completed remote RA command */ static void check_remote_node_state(const remote_ra_cmd_t *cmd) { /* Only successful actions can change node state */ if (!pcmk__result_ok(&(cmd->result))) { return; } if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { remote_node_up(cmd->rsc_id); } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM, pcmk__str_casei)) { /* After a successful migration, we don't need to do remote_node_up() * because the DC already knows the node is up, and we don't want to * clear LRM history etc. We do need to add the remote node to this * host's remote peer cache, because (unless it happens to be DC) * it hasn't been tracking the remote node, and other code relies on * the cache to distinguish remote nodes from unseen cluster nodes. */ pcmk__node_status_t *node = pcmk__cluster_lookup_remote_node(cmd->rsc_id); CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, PCMK_VALUE_MEMBER, 0); } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { lrm_state_t *lrm_state = controld_get_executor_state(cmd->rsc_id, false); remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL; if (ra_data) { if (!pcmk_is_set(ra_data->status, takeover_complete)) { /* Stop means down if we didn't successfully migrate elsewhere */ remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM); } else if (AM_I_DC == FALSE) { /* Only the connection host and DC track node state, * so if the connection migrated elsewhere and we aren't DC, * un-cache the node, so we don't have stale info */ pcmk__cluster_forget_remote_node(cmd->rsc_id); } } } /* We don't do anything for successful monitors, which is correct for * routine recurring monitors, and for monitors on nodes where the * connection isn't supposed to be (the cluster will stop the connection in * that case). However, if the initial probe finds the connection already * active on the node where we want it, we probably should do * remote_node_up(). Unfortunately, we can't distinguish that case here. * Given that connections have to be initiated by the cluster, the chance of * that should be close to zero. */ } static void report_remote_ra_result(remote_ra_cmd_t * cmd) { lrmd_event_data_t op = { 0, }; check_remote_node_state(cmd); op.type = lrmd_event_exec_complete; op.rsc_id = cmd->rsc_id; op.op_type = cmd->action; op.user_data = cmd->userdata; op.timeout = cmd->timeout; op.interval_ms = cmd->interval_ms; op.t_run = cmd->start_time; op.t_rcchange = cmd->start_time; lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status, cmd->result.exit_reason); if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) { op.t_rcchange = time(NULL); /* This edge case will likely never ever occur, but if it does the * result is that a failure will not be processed correctly. This is only * remotely possible because we are able to detect a connection resource's tcp * connection has failed at any moment after start has completed. The actual * recurring operation is just a connectivity ping. * * basically, we are not guaranteed that the first successful monitor op and * a subsequent failed monitor op will not occur in the same timestamp. We have to * make it look like the operations occurred at separate times though. */ if (op.t_rcchange == op.t_run) { op.t_rcchange++; } } if (cmd->params) { lrmd_key_value_t *tmp; op.params = pcmk__strkey_table(free, free); for (tmp = cmd->params; tmp; tmp = tmp->next) { pcmk__insert_dup(op.params, tmp->key, tmp->value); } } op.call_id = cmd->call_id; op.remote_nodename = cmd->owner; lrm_op_callback(&op); if (op.params) { g_hash_table_destroy(op.params); } lrmd__reset_result(&op); } /*! * \internal * \brief Return a remote command's remaining timeout in seconds * * \param[in] cmd Remote command to check * * \return Command's remaining timeout in seconds */ static int remaining_timeout_sec(const remote_ra_cmd_t *cmd) { return pcmk__timeout_ms2s(cmd->timeout) - (time(NULL) - cmd->start_time); } static gboolean retry_start_cmd_cb(gpointer data) { lrm_state_t *lrm_state = data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd = NULL; int rc = ETIME; int remaining = 0; if (!ra_data || !ra_data->cur_cmd) { return FALSE; } cmd = ra_data->cur_cmd; if (!pcmk__is_up_action(cmd->action)) { return FALSE; } remaining = remaining_timeout_sec(cmd); if (remaining > 0) { rc = handle_remote_ra_start(lrm_state, cmd, remaining * 1000); } else { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "Not enough time remains to retry remote connection"); } if (rc != pcmk_rc_ok) { report_remote_ra_result(cmd); if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } ra_data->cur_cmd = NULL; free_cmd(cmd); } else { /* wait for connection event */ } return FALSE; } static gboolean connection_takeover_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; crm_info("takeover event timed out for node %s", cmd->rsc_id); cmd->takeover_timeout_id = 0; lrm_state = controld_get_executor_state(cmd->rsc_id, false); handle_remote_ra_stop(lrm_state, cmd); free_cmd(cmd); return FALSE; } static gboolean monitor_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; lrm_state = controld_get_executor_state(cmd->rsc_id, false); crm_info("Timed out waiting for remote poke response from %s%s", cmd->rsc_id, (lrm_state? "" : " (no LRM state)")); cmd->monitor_timeout_id = 0; pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "Remote executor did not respond"); if (lrm_state && lrm_state->remote_ra_data) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (ra_data->cur_cmd == cmd) { ra_data->cur_cmd = NULL; } if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } } report_remote_ra_result(cmd); free_cmd(cmd); if(lrm_state) { // @TODO Should we move this before reporting the result above? lrm_state_disconnect(lrm_state); } return FALSE; } static void synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type) { lrmd_event_data_t op = { 0, }; if (lrm_state == NULL) { /* if lrm_state not given assume local */ lrm_state = controld_get_executor_state(NULL, false); } pcmk__assert(lrm_state != NULL); op.type = lrmd_event_exec_complete; op.rsc_id = rsc_id; op.op_type = op_type; op.t_run = time(NULL); op.t_rcchange = op.t_run; op.call_id = generate_callid(); lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); process_lrm_event(lrm_state, &op, NULL, NULL); } void remote_lrm_op_callback(lrmd_event_data_t * op) { gboolean cmd_handled = FALSE; lrm_state_t *lrm_state = NULL; remote_ra_data_t *ra_data = NULL; remote_ra_cmd_t *cmd = NULL; CRM_CHECK((op != NULL) && (op->remote_nodename != NULL), return); crm_debug("Processing '%s%s%s' event on remote connection to %s: %s " "(%d) status=%s (%d)", (op->op_type? op->op_type : ""), (op->op_type? " " : ""), lrmd_event_type2str(op->type), op->remote_nodename, crm_exit_str((crm_exit_t) op->rc), op->rc, pcmk_exec_status_str(op->op_status), op->op_status); lrm_state = controld_get_executor_state(op->remote_nodename, false); if (!lrm_state || !lrm_state->remote_ra_data) { crm_debug("No state information found for remote connection event"); return; } ra_data = lrm_state->remote_ra_data; if (op->type == lrmd_event_new_client) { // Another client has connected to the remote daemon if (pcmk_is_set(ra_data->status, expect_takeover)) { // Great, we knew this was coming lrm_remote_clear_flags(lrm_state, expect_takeover); lrm_remote_set_flags(lrm_state, takeover_complete); } else { crm_err("Disconnecting from Pacemaker Remote node %s due to " "unexpected client takeover", op->remote_nodename); /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */ /* Do not free lrm_state->conn yet. */ /* It'll be freed in the following stop action. */ lrm_state_disconnect_only(lrm_state); } return; } /* filter all EXEC events up */ if (op->type == lrmd_event_exec_complete) { if (pcmk_is_set(ra_data->status, takeover_complete)) { crm_debug("ignoring event, this connection is taken over by another node"); } else { lrm_op_callback(op); } return; } if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) { if (!pcmk_is_set(ra_data->status, remote_active)) { crm_debug("Disconnection from Pacemaker Remote node %s complete", lrm_state->node_name); } else if (!remote_ra_is_in_maintenance(lrm_state)) { crm_err("Lost connection to Pacemaker Remote node %s", lrm_state->node_name); ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds); ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds); } else { crm_notice("Unmanaged Pacemaker Remote node %s disconnected", lrm_state->node_name); /* Do roughly what a 'stop' on the remote-resource would do */ handle_remote_ra_stop(lrm_state, NULL); remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM); /* now fake the reply of a successful 'stop' */ synthesize_lrmd_success(NULL, lrm_state->node_name, PCMK_ACTION_STOP); } return; } if (!ra_data->cur_cmd) { crm_debug("no event to match"); return; } cmd = ra_data->cur_cmd; /* Start actions and migrate from actions complete after connection * comes back to us. */ if ((op->type == lrmd_event_connect) && pcmk__is_up_action(cmd->action)) { if (op->connection_rc < 0) { int remaining = remaining_timeout_sec(cmd); if ((op->connection_rc == -ENOKEY) || (op->connection_rc == -EKEYREJECTED)) { // Hard error, don't retry pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM, PCMK_EXEC_ERROR, pcmk_strerror(op->connection_rc)); } else if (remaining > 3) { crm_trace("Rescheduling start (%ds remains before timeout)", remaining); pcmk__create_timer(1000, retry_start_cmd_cb, lrm_state); return; } else { crm_trace("Not enough time before timeout (%ds) " "to reschedule start", remaining); pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "%s without enough time to retry", pcmk_strerror(op->connection_rc)); } } else { lrm_state_reset_tables(lrm_state, TRUE); pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); lrm_remote_set_flags(lrm_state, remote_active); } crm_debug("Remote connection event matched %s action", cmd->action); report_remote_ra_result(cmd); cmd_handled = TRUE; } else if ((op->type == lrmd_event_poke) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); cmd->monitor_timeout_id = 0; } /* Only report success the first time, after that only worry about failures. * For this function, if we get the poke pack, it is always a success. Pokes * only fail if the send fails, or the response times out. */ if (!pcmk_is_set(cmd->status, cmd_reported_success)) { pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); cmd_set_flags(cmd, cmd_reported_success); } crm_debug("Remote poke event matched %s action", cmd->action); /* success, keep rescheduling if interval is present. */ if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) { ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd); cmd->interval_id = pcmk__create_timer(cmd->interval_ms, recurring_helper, cmd); cmd = NULL; /* prevent free */ } cmd_handled = TRUE; } else if ((op->type == lrmd_event_disconnect) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { if (pcmk_is_set(ra_data->status, remote_active) && !pcmk_is_set(cmd->status, cmd_cancel)) { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Remote connection unexpectedly dropped " "during monitor"); report_remote_ra_result(cmd); crm_err("Remote connection to %s unexpectedly dropped during monitor", lrm_state->node_name); } cmd_handled = TRUE; } else { crm_debug("Event did not match %s action", ra_data->cur_cmd->action); } if (cmd_handled) { ra_data->cur_cmd = NULL; if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } free_cmd(cmd); } } static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd) { remote_ra_data_t *ra_data = NULL; pcmk__assert(lrm_state != NULL); ra_data = lrm_state->remote_ra_data; if (!pcmk_is_set(ra_data->status, takeover_complete)) { /* delete pending ops when ever the remote connection is intentionally stopped */ g_hash_table_remove_all(lrm_state->active_ops); } else { /* we no longer hold the history if this connection has been migrated, * however, we keep metadata cache for future use */ lrm_state_reset_tables(lrm_state, FALSE); } lrm_remote_clear_flags(lrm_state, remote_active); lrm_state_disconnect(lrm_state); if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } ra_data->cmds = NULL; ra_data->recurring_cmds = NULL; ra_data->cur_cmd = NULL; if (cmd) { pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); } } // \return Standard Pacemaker return code static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms) { const char *server = NULL; lrmd_key_value_t *tmp = NULL; int port = 0; int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms; int rc = pcmk_rc_ok; for (tmp = cmd->params; tmp; tmp = tmp->next) { if (pcmk__strcase_any_of(tmp->key, PCMK_REMOTE_RA_ADDR, PCMK_REMOTE_RA_SERVER, NULL)) { server = tmp->value; } else if (pcmk__str_eq(tmp->key, PCMK_REMOTE_RA_PORT, pcmk__str_none)) { port = atoi(tmp->value); } else if (pcmk__str_eq(tmp->key, CRM_META "_" PCMK__META_CONTAINER, pcmk__str_none)) { lrm_remote_set_flags(lrm_state, controlling_guest); } } rc = controld_connect_remote_executor(lrm_state, server, port, timeout_used); if (rc != pcmk_rc_ok) { pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Could not connect to Pacemaker Remote node %s: %s", lrm_state->node_name, pcmk_rc_str(rc)); } return rc; } static gboolean handle_remote_ra_exec(gpointer user_data) { int rc = 0; lrm_state_t *lrm_state = user_data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd; GList *first = NULL; if (ra_data->cur_cmd) { /* still waiting on previous cmd */ return TRUE; } while (ra_data->cmds) { first = ra_data->cmds; cmd = first->data; if (cmd->delay_id) { /* still waiting for start delay timer to trip */ return TRUE; } ra_data->cmds = g_list_remove_link(ra_data->cmds, first); g_list_free_1(first); if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete); if (handle_remote_ra_start(lrm_state, cmd, cmd->timeout) == pcmk_rc_ok) { /* take care of this later when we get async connection result */ crm_debug("Initiated async remote connection, %s action will complete after connect event", cmd->action); ra_data->cur_cmd = cmd; return TRUE; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) { if (lrm_state_is_connected(lrm_state) == TRUE) { rc = lrm_state_poke_connection(lrm_state); if (rc < 0) { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, pcmk_strerror(rc)); } } else { rc = -1; pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING, PCMK_EXEC_DONE, "Remote connection inactive"); } if (rc == 0) { crm_debug("Poked Pacemaker Remote at node %s, waiting for async response", cmd->rsc_id); ra_data->cur_cmd = cmd; cmd->monitor_timeout_id = pcmk__create_timer(cmd->timeout, monitor_timeout_cb, cmd); return TRUE; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) { if (pcmk_is_set(ra_data->status, expect_takeover)) { /* Briefly wait on stop for an expected takeover to occur. If * the takeover does not occur during the wait, that's fine; it * just means that the remote node's resource history will be * cleared, which will require probing all resources on the * remote node. If the takeover does occur successfully, then we * can leave the status section intact. */ cmd->takeover_timeout_id = pcmk__create_timer((cmd->timeout/2), connection_takeover_timeout_cb, cmd); ra_data->cur_cmd = cmd; return TRUE; } handle_remote_ra_stop(lrm_state, cmd); } else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) { lrm_remote_clear_flags(lrm_state, takeover_complete); lrm_remote_set_flags(lrm_state, expect_takeover); pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, NULL)) { /* Currently the only reloadable parameter is * PCMK_REMOTE_RA_RECONNECT_INTERVAL, which is only used by the * scheduler via the CIB, so reloads are a no-op. * * @COMPAT DC <2.1.0: We only need to check for "reload" in case * we're in a rolling upgrade with a DC scheduling "reload" instead * of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway, * so this would work for that purpose as well. */ pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); } free_cmd(cmd); } return TRUE; } static void remote_ra_data_init(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = NULL; if (lrm_state->remote_ra_data) { return; } ra_data = pcmk__assert_alloc(1, sizeof(remote_ra_data_t)); ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state); lrm_state->remote_ra_data = ra_data; } void remote_ra_cleanup(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (!ra_data) { return; } if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } mainloop_destroy_trigger(ra_data->work); free(ra_data); lrm_state->remote_ra_data = NULL; } gboolean is_remote_lrmd_ra(const char *agent, const char *provider, const char *id) { if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) { return TRUE; } return (id != NULL) && (controld_get_executor_state(id, false) != NULL) && !controld_is_local_node(id); } lrmd_rsc_info_t * remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id) { lrmd_rsc_info_t *info = NULL; CRM_CHECK(rsc_id != NULL, return NULL); if (controld_get_executor_state(rsc_id, false) != NULL) { info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t)); info->id = pcmk__str_copy(rsc_id); info->type = pcmk__str_copy(REMOTE_LRMD_RA); info->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_OCF); info->provider = pcmk__str_copy("pacemaker"); } return info; } static gboolean is_remote_ra_supported_action(const char *action) { return pcmk__str_any_of(action, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, PCMK_ACTION_RELOAD_AGENT, PCMK_ACTION_RELOAD, NULL); } static GList * fail_all_monitor_cmds(GList * list) { GList *rm_list = NULL; remote_ra_cmd_t *cmd = NULL; GList *gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { rm_list = g_list_append(rm_list, cmd); } } for (gIter = rm_list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Lost connection to remote executor"); crm_trace("Pre-emptively failing %s %s (interval=%u, %s)", cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata); report_remote_ra_result(cmd); list = g_list_remove(list, cmd); free_cmd(cmd); } /* frees only the list data, not the cmds */ g_list_free(rm_list); return list; } static GList * remove_cmd(GList * list, const char *action, guint interval_ms) { remote_ra_cmd_t *cmd = NULL; GList *gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, action, pcmk__str_casei)) { break; } cmd = NULL; } if (cmd) { list = g_list_remove(list, cmd); free_cmd(cmd); } return list; } int remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action, guint interval_ms) { lrm_state_t *connection_rsc = NULL; remote_ra_data_t *ra_data = NULL; CRM_CHECK(rsc_id != NULL, return -EINVAL); connection_rsc = controld_get_executor_state(rsc_id, false); if (!connection_rsc || !connection_rsc->remote_ra_data) { return -EINVAL; } ra_data = connection_rsc->remote_ra_data; ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms); ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action, interval_ms); if (ra_data->cur_cmd && (ra_data->cur_cmd->interval_ms == interval_ms) && (pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) { cmd_set_flags(ra_data->cur_cmd, cmd_cancel); } return 0; } static remote_ra_cmd_t * handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms, const char *userdata) { GList *gIter = NULL; remote_ra_cmd_t *cmd = NULL; /* there are 3 places a potential duplicate monitor operation * could exist. * 1. recurring_cmds list. where the op is waiting for its next interval * 2. cmds list, where the op is queued to get executed immediately * 3. cur_cmd, which means the monitor op is in flight right now. */ if (interval_ms == 0) { return NULL; } if (ra_data->cur_cmd && !pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) && (ra_data->cur_cmd->interval_ms == interval_ms) && pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { cmd = ra_data->cur_cmd; goto handle_dup; } for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { goto handle_dup; } } for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { goto handle_dup; } } return NULL; handle_dup: crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT, cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms); /* update the userdata */ if (userdata) { free(cmd->userdata); cmd->userdata = pcmk__str_copy(userdata); } /* if we've already reported success, generate a new call id */ if (pcmk_is_set(cmd->status, cmd_reported_success)) { cmd->start_time = time(NULL); cmd->call_id = generate_callid(); cmd_clear_flags(cmd, cmd_reported_success); } /* if we have an interval_id set, that means we are in the process of * waiting for this cmd's next interval. instead of waiting, cancel * the timer and execute the action immediately */ if (cmd->interval_id) { g_source_remove(cmd->interval_id); cmd->interval_id = 0; recurring_helper(cmd); } return cmd; } /*! * \internal * \brief Execute an action using the (internal) ocf:pacemaker:remote agent * * \param[in] lrm_state Executor state object for remote connection * \param[in] rsc_id Connection resource ID * \param[in] action Action to execute * \param[in] userdata String to copy and pass to execution callback * \param[in] interval_ms Action interval (in milliseconds) * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] start_delay_ms Delay (in milliseconds) before executing action * \param[in,out] params Connection resource parameters * \param[out] call_id Where to store call ID on success * * \return Standard Pacemaker return code * \note This takes ownership of \p params, which should not be used or freed * after calling this function. */ int controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id, const char *action, const char *userdata, guint interval_ms, int timeout_ms, int start_delay_ms, lrmd_key_value_t *params, int *call_id) { lrm_state_t *connection_rsc = NULL; remote_ra_cmd_t *cmd = NULL; remote_ra_data_t *ra_data = NULL; *call_id = 0; CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL) && (userdata != NULL) && (call_id != NULL), lrmd_key_value_freeall(params); return EINVAL); if (!is_remote_ra_supported_action(action)) { lrmd_key_value_freeall(params); return EOPNOTSUPP; } connection_rsc = controld_get_executor_state(rsc_id, false); if (connection_rsc == NULL) { lrmd_key_value_freeall(params); return ENOTCONN; } remote_ra_data_init(connection_rsc); ra_data = connection_rsc->remote_ra_data; cmd = handle_dup_monitor(ra_data, interval_ms, userdata); if (cmd) { *call_id = cmd->call_id; lrmd_key_value_freeall(params); return pcmk_rc_ok; } cmd = pcmk__assert_alloc(1, sizeof(remote_ra_cmd_t)); cmd->owner = pcmk__str_copy(lrm_state->node_name); cmd->rsc_id = pcmk__str_copy(rsc_id); cmd->action = pcmk__str_copy(action); cmd->userdata = pcmk__str_copy(userdata); cmd->interval_ms = interval_ms; cmd->timeout = timeout_ms; cmd->start_delay = start_delay_ms; cmd->params = params; cmd->start_time = time(NULL); cmd->call_id = generate_callid(); if (cmd->start_delay) { cmd->delay_id = pcmk__create_timer(cmd->start_delay, start_delay_helper, cmd); } ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); *call_id = cmd->call_id; return pcmk_rc_ok; } /*! * \internal * \brief Immediately fail all monitors of a remote node, if proxied here * * \param[in] node_name Name of pacemaker_remote node */ void remote_ra_fail(const char *node_name) { lrm_state_t *lrm_state = NULL; CRM_CHECK(node_name != NULL, return); lrm_state = controld_get_executor_state(node_name, false); if (lrm_state && lrm_state_is_connected(lrm_state)) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; crm_info("Failing monitors on Pacemaker Remote node %s", node_name); ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds); ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds); } } /* A guest node fencing implied by host fencing looks like: * * * * * * * */ #define XPATH_PSEUDO_FENCE "/" PCMK__XE_PSEUDO_EVENT \ "[@" PCMK_XA_OPERATION "='stonith']/" PCMK__XE_DOWNED "/" PCMK_XE_NODE /*! * \internal * \brief Check a pseudo-action for Pacemaker Remote node side effects * * \param[in,out] xml XML of pseudo-action to check */ void remote_ra_process_pseudo(xmlNode *xml) { xmlXPathObject *search = pcmk__xpath_search(xml->doc, XPATH_PSEUDO_FENCE); if (pcmk__xpath_num_results(search) == 1) { xmlNode *result = pcmk__xpath_result(search, 0); /* Normally, we handle the necessary side effects of a guest node stop * action when reporting the remote agent's result. However, if the stop * is implied due to fencing, it will be a fencing pseudo-event, and * there won't be a result to report. Handle that case here. * * This will result in a duplicate call to remote_node_down() if the * guest stop was real instead of implied, but that shouldn't hurt. * * There is still one corner case that isn't handled: if a guest node * isn't running any resources when its host is fenced, it will appear * to be cleanly stopped, so there will be no pseudo-fence, and our * peer cache state will be incorrect unless and until the guest is * recovered. */ if (result) { const char *remote = pcmk__xe_id(result); if (remote) { remote_node_down(remote, DOWN_ERASE_LRM); } } } - freeXpathObject(search); + xmlXPathFreeObject(search); } static void remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance) { xmlNode *update, *state; int call_opt; pcmk__node_status_t *node = NULL; call_opt = crmd_cib_smart_opt(); node = pcmk__cluster_lookup_remote_node(lrm_state->node_name); CRM_CHECK(node != NULL, return); update = pcmk__xe_create(NULL, PCMK_XE_STATUS); state = create_node_state_update(node, node_update_none, update, __func__); crm_xml_add(state, PCMK__XA_NODE_IN_MAINTENANCE, (maintenance? "1" : "0")); if (controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL) == pcmk_rc_ok) { /* TODO: still not 100% sure that async update will succeed ... */ if (maintenance) { lrm_remote_set_flags(lrm_state, remote_in_maint); } else { lrm_remote_clear_flags(lrm_state, remote_in_maint); } } pcmk__xml_free(update); } #define XPATH_PSEUDO_MAINTENANCE "//" PCMK__XE_PSEUDO_EVENT \ "[@" PCMK_XA_OPERATION "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \ PCMK__XE_MAINTENANCE /*! * \internal * \brief Check a pseudo-action holding updates for maintenance state * * \param[in,out] xml XML of pseudo-action to check */ void remote_ra_process_maintenance_nodes(xmlNode *xml) { xmlXPathObject *search = pcmk__xpath_search(xml->doc, XPATH_PSEUDO_MAINTENANCE); if (pcmk__xpath_num_results(search) == 1) { xmlNode *node; int cnt = 0, cnt_remote = 0; for (node = pcmk__xe_first_child(pcmk__xpath_result(search, 0), PCMK_XE_NODE, NULL, NULL); node != NULL; node = pcmk__xe_next(node, PCMK_XE_NODE)) { lrm_state_t *lrm_state = NULL; const char *id = pcmk__xe_id(node); cnt++; if (id == NULL) { continue; // Shouldn't be possible } lrm_state = controld_get_executor_state(id, false); if (lrm_state && lrm_state->remote_ra_data && pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) { const char *in_maint_s = NULL; int in_maint; cnt_remote++; in_maint_s = crm_element_value(node, PCMK__XA_NODE_IN_MAINTENANCE); pcmk__scan_min_int(in_maint_s, &in_maint, 0); remote_ra_maintenance(lrm_state, in_maint); } } crm_trace("Action holds %d nodes (%d remotes found) adjusting " PCMK_OPT_MAINTENANCE_MODE, cnt, cnt_remote); } - freeXpathObject(search); + xmlXPathFreeObject(search); } gboolean remote_ra_is_in_maintenance(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; return pcmk_is_set(ra_data->status, remote_in_maint); } gboolean remote_ra_controlling_guest(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; return pcmk_is_set(ra_data->status, controlling_guest); } diff --git a/daemons/controld/controld_schedulerd.c b/daemons/controld/controld_schedulerd.c index df82aefa19..26eb315f79 100644 --- a/daemons/controld/controld_schedulerd.c +++ b/daemons/controld/controld_schedulerd.c @@ -1,514 +1,514 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include /* pid_t, sleep, ssize_t */ #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include static void handle_disconnect(void); static pcmk_ipc_api_t *schedulerd_api = NULL; /*! * \internal * \brief Close any scheduler connection and free associated memory */ void controld_shutdown_schedulerd_ipc(void) { controld_clear_fsa_input_flags(R_PE_REQUIRED); pcmk_disconnect_ipc(schedulerd_api); handle_disconnect(); pcmk_free_ipc_api(schedulerd_api); schedulerd_api = NULL; } /*! * \internal * \brief Save CIB query result to file, raising FSA error * * \param[in] msg Ignored * \param[in] call_id Call ID of CIB query * \param[in] rc Return code of CIB query * \param[in] output Result of CIB query * \param[in] user_data Unique identifier for filename * * \note This is intended to be called after a scheduler connection fails. */ static void save_cib_contents(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { const char *id = user_data; register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__); CRM_CHECK(id != NULL, return); if (rc == pcmk_ok) { char *filename = crm_strdup_printf(PCMK_SCHEDULER_INPUT_DIR "/pe-core-%s.bz2", id); if (pcmk__xml_write_file(output, filename, true) != pcmk_rc_ok) { crm_err("Could not save Cluster Information Base to %s after scheduler crash", filename); } else { crm_notice("Saved Cluster Information Base to %s after scheduler crash", filename); } free(filename); } } /*! * \internal * \brief Respond to scheduler connection failure */ static void handle_disconnect(void) { // If we aren't connected to the scheduler, we can't expect a reply controld_expect_sched_reply(NULL); if (pcmk_is_set(controld_globals.fsa_input_register, R_PE_REQUIRED)) { int rc = pcmk_ok; char *uuid_str = crm_generate_uuid(); crm_crit("Lost connection to the scheduler " QB_XS " CIB will be saved to " PCMK_SCHEDULER_INPUT_DIR "/pe-core-%s.bz2", uuid_str); /* * The scheduler died... * * Save the current CIB so that we have a chance of * figuring out what killed it. * * Delay raising the I_ERROR until the query below completes or * 5s is up, whichever comes first. * */ rc = controld_globals.cib_conn->cmds->query(controld_globals.cib_conn, NULL, NULL, cib_none); fsa_register_cib_callback(rc, uuid_str, save_cib_contents); } controld_clear_fsa_input_flags(R_PE_CONNECTED); controld_trigger_fsa(); return; } static void handle_reply(pcmk_schedulerd_api_reply_t *reply) { const char *msg_ref = NULL; if (!AM_I_DC) { return; } msg_ref = reply->data.graph.reference; if (msg_ref == NULL) { crm_err("%s - Ignoring calculation with no reference", CRM_OP_PECALC); } else if (pcmk__str_eq(msg_ref, controld_globals.fsa_pe_ref, pcmk__str_none)) { ha_msg_input_t fsa_input; xmlNode *crm_data_node; controld_stop_sched_timer(); /* do_te_invoke (which will eventually process the fsa_input we are constructing * here) requires that fsa_input.xml be non-NULL. That will only happen if * copy_ha_msg_input (which is called by register_fsa_input_adv) sees the * fsa_input.msg that it is expecting. The scheduler's IPC dispatch function * gave us the values we need, we just need to put them into XML. * * The name of the top level element here is irrelevant. Nothing checks it. */ fsa_input.msg = pcmk__xe_create(NULL, "dummy-reply"); crm_xml_add(fsa_input.msg, PCMK_XA_REFERENCE, msg_ref); crm_xml_add(fsa_input.msg, PCMK__XA_CRM_TGRAPH_IN, reply->data.graph.input); crm_data_node = pcmk__xe_create(fsa_input.msg, PCMK__XE_CRM_XML); pcmk__xml_copy(crm_data_node, reply->data.graph.tgraph); register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input); pcmk__xml_free(fsa_input.msg); } else { crm_info("%s calculation %s is obsolete", CRM_OP_PECALC, msg_ref); } } static void scheduler_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { pcmk_schedulerd_api_reply_t *reply = event_data; switch (event_type) { case pcmk_ipc_event_disconnect: handle_disconnect(); break; case pcmk_ipc_event_reply: handle_reply(reply); break; default: break; } } static bool new_schedulerd_ipc_connection(void) { int rc; controld_set_fsa_input_flags(R_PE_REQUIRED); if (schedulerd_api == NULL) { rc = pcmk_new_ipc_api(&schedulerd_api, pcmk_ipc_schedulerd); if (rc != pcmk_rc_ok) { crm_err("Error connecting to the scheduler: %s", pcmk_rc_str(rc)); return false; } } pcmk_register_ipc_callback(schedulerd_api, scheduler_event_callback, NULL); rc = pcmk__connect_ipc(schedulerd_api, pcmk_ipc_dispatch_main, 3); if (rc != pcmk_rc_ok) { crm_err("Error connecting to %s: %s", pcmk_ipc_name(schedulerd_api, true), pcmk_rc_str(rc)); return false; } controld_set_fsa_input_flags(R_PE_CONNECTED); return true; } static void do_pe_invoke_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); /* A_PE_START, A_PE_STOP, O_PE_RESTART */ void do_pe_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { if (pcmk_is_set(action, A_PE_STOP)) { controld_clear_fsa_input_flags(R_PE_REQUIRED); pcmk_disconnect_ipc(schedulerd_api); handle_disconnect(); } if (pcmk_is_set(action, A_PE_START) && !pcmk_is_set(controld_globals.fsa_input_register, R_PE_CONNECTED)) { if (cur_state == S_STOPPING) { crm_info("Ignoring request to connect to scheduler while shutting down"); } else if (!new_schedulerd_ipc_connection()) { crm_warn("Could not connect to scheduler"); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } } } static int fsa_pe_query = 0; static mainloop_timer_t *controld_sched_timer = NULL; // @TODO Make this a configurable cluster option if there's demand for it #define SCHED_TIMEOUT_MS (120000) /*! * \internal * \brief Handle a timeout waiting for scheduler reply * * \param[in] user_data Ignored * * \return FALSE (indicating that timer should not be restarted) */ static gboolean controld_sched_timeout(gpointer user_data) { if (AM_I_DC) { /* If this node is the DC but can't communicate with the scheduler, just * exit (and likely get fenced) so this node doesn't interfere with any * further DC elections. * * @TODO We could try something less drastic first, like disconnecting * and reconnecting to the scheduler, but something is likely going * seriously wrong, so perhaps it's better to just fail as quickly as * possible. */ crmd_exit(CRM_EX_FATAL); } return FALSE; } void controld_stop_sched_timer(void) { if ((controld_sched_timer != NULL) && (controld_globals.fsa_pe_ref != NULL)) { crm_trace("Stopping timer for scheduler reply %s", controld_globals.fsa_pe_ref); } mainloop_timer_stop(controld_sched_timer); } /*! * \internal * \brief Set the scheduler request currently being waited on * * \param[in] ref Request to expect reply to (or NULL for none) * * \note This function takes ownership of \p ref. */ void controld_expect_sched_reply(char *ref) { if (ref) { if (controld_sched_timer == NULL) { controld_sched_timer = mainloop_timer_add("scheduler_reply_timer", SCHED_TIMEOUT_MS, FALSE, controld_sched_timeout, NULL); } mainloop_timer_start(controld_sched_timer); } else { controld_stop_sched_timer(); } free(controld_globals.fsa_pe_ref); controld_globals.fsa_pe_ref = ref; } /*! * \internal * \brief Free the scheduler reply timer */ void controld_free_sched_timer(void) { if (controld_sched_timer != NULL) { mainloop_timer_del(controld_sched_timer); controld_sched_timer = NULL; } } /* A_PE_INVOKE */ void do_pe_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { cib_t *cib_conn = controld_globals.cib_conn; if (AM_I_DC == FALSE) { crm_err("Not invoking scheduler because not DC: %s", fsa_action2string(action)); return; } if (!pcmk_is_set(controld_globals.fsa_input_register, R_PE_CONNECTED)) { if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { crm_err("Cannot shut down gracefully without the scheduler"); register_fsa_input_before(C_FSA_INTERNAL, I_TERMINATE, NULL); } else { crm_info("Waiting for the scheduler to connect"); crmd_fsa_stall(FALSE); controld_set_fsa_action_flags(A_PE_START); controld_trigger_fsa(); } return; } if (cur_state != S_POLICY_ENGINE) { crm_notice("Not invoking scheduler because in state %s", fsa_state2string(cur_state)); return; } if (!pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { crm_err("Attempted to invoke scheduler without consistent Cluster Information Base!"); /* start the join from scratch */ register_fsa_input_before(C_FSA_INTERNAL, I_ELECTION, NULL); return; } fsa_pe_query = cib_conn->cmds->query(cib_conn, NULL, NULL, cib_none); crm_debug("Query %d: Requesting the current CIB: %s", fsa_pe_query, fsa_state2string(controld_globals.fsa_state)); controld_expect_sched_reply(NULL); fsa_register_cib_callback(fsa_pe_query, NULL, do_pe_invoke_callback); } static void force_local_option(xmlNode *xml, const char *attr_name, const char *attr_value) { int max = 0; int lpc = 0; const char *xpath_base = NULL; char *xpath_string = NULL; xmlXPathObject *xpathObj = NULL; xpath_base = pcmk_cib_xpath_for(PCMK_XE_CRM_CONFIG); if (xpath_base == NULL) { crm_err(PCMK_XE_CRM_CONFIG " CIB element not known (bug?)"); return; } xpath_string = crm_strdup_printf("%s//%s//nvpair[@name='%s']", xpath_base, PCMK_XE_CLUSTER_PROPERTY_SET, attr_name); xpathObj = pcmk__xpath_search(xml->doc, xpath_string); max = pcmk__xpath_num_results(xpathObj); free(xpath_string); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); crm_trace("Forcing %s/%s = %s", pcmk__xe_id(match), attr_name, attr_value); crm_xml_add(match, PCMK_XA_VALUE, attr_value); } if(max == 0) { xmlNode *configuration = NULL; xmlNode *crm_config = NULL; xmlNode *cluster_property_set = NULL; crm_trace("Creating %s-%s for %s=%s", PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, attr_name, attr_name, attr_value); configuration = pcmk__xe_first_child(xml, PCMK_XE_CONFIGURATION, NULL, NULL); if (configuration == NULL) { configuration = pcmk__xe_create(xml, PCMK_XE_CONFIGURATION); } crm_config = pcmk__xe_first_child(configuration, PCMK_XE_CRM_CONFIG, NULL, NULL); if (crm_config == NULL) { crm_config = pcmk__xe_create(configuration, PCMK_XE_CRM_CONFIG); } cluster_property_set = pcmk__xe_first_child(crm_config, PCMK_XE_CLUSTER_PROPERTY_SET, NULL, NULL); if (cluster_property_set == NULL) { cluster_property_set = pcmk__xe_create(crm_config, PCMK_XE_CLUSTER_PROPERTY_SET); crm_xml_add(cluster_property_set, PCMK_XA_ID, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS); } xml = pcmk__xe_create(cluster_property_set, PCMK_XE_NVPAIR); pcmk__xe_set_id(xml, "%s-%s", PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, attr_name); crm_xml_add(xml, PCMK_XA_NAME, attr_name); crm_xml_add(xml, PCMK_XA_VALUE, attr_value); } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } static void do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *ref = NULL; pid_t watchdog = pcmk__locate_sbd(); if (rc != pcmk_ok) { crm_err("Could not retrieve the Cluster Information Base: %s " QB_XS " rc=%d call=%d", pcmk_strerror(rc), rc, call_id); register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__); return; } else if (call_id != fsa_pe_query) { crm_trace("Skipping superseded CIB query: %d (current=%d)", call_id, fsa_pe_query); return; } else if (!AM_I_DC || !pcmk_is_set(controld_globals.fsa_input_register, R_PE_CONNECTED)) { crm_debug("No need to invoke the scheduler anymore"); return; } else if (controld_globals.fsa_state != S_POLICY_ENGINE) { crm_debug("Discarding scheduler request in state: %s", fsa_state2string(controld_globals.fsa_state)); return; /* this callback counts as 1 */ } else if (num_cib_op_callbacks() > 1) { crm_debug("Re-asking for the CIB: %d other peer updates still pending", (num_cib_op_callbacks() - 1)); sleep(1); controld_set_fsa_action_flags(A_PE_INVOKE); controld_trigger_fsa(); return; } CRM_LOG_ASSERT(output != NULL); /* Refresh the remote node cache and the known node cache when the * scheduler is invoked */ pcmk__refresh_node_caches_from_cib(output); crm_xml_add(output, PCMK_XA_DC_UUID, controld_globals.our_uuid); pcmk__xe_set_bool_attr(output, PCMK_XA_HAVE_QUORUM, pcmk_is_set(controld_globals.flags, controld_has_quorum)); force_local_option(output, PCMK_OPT_HAVE_WATCHDOG, pcmk__btoa(watchdog)); if (pcmk_is_set(controld_globals.flags, controld_ever_had_quorum) && !pcmk__cluster_has_quorum()) { crm_xml_add_int(output, PCMK_XA_NO_QUORUM_PANIC, 1); } rc = pcmk_schedulerd_api_graph(schedulerd_api, output, &ref); if (rc != pcmk_rc_ok) { free(ref); crm_err("Could not contact the scheduler: %s " QB_XS " rc=%d", pcmk_rc_str(rc), rc); register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__); } else { pcmk__assert(ref != NULL); controld_expect_sched_reply(ref); crm_debug("Invoking the scheduler: query=%d, ref=%s, seq=%llu, " "quorate=%s", fsa_pe_query, controld_globals.fsa_pe_ref, controld_globals.peer_seq, pcmk__flag_text(controld_globals.flags, controld_has_quorum)); } } diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c index 6c498e7fe9..0b1561aa03 100644 --- a/daemons/controld/controld_te_callbacks.c +++ b/daemons/controld/controld_te_callbacks.c @@ -1,524 +1,524 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include // An explicit PCMK_OPT_SHUTDOWN_LOCK of 0 means the lock has been cleared static bool shutdown_lock_cleared(xmlNode *lrm_resource) { time_t shutdown_lock = 0; return (crm_element_value_epoch(lrm_resource, PCMK_OPT_SHUTDOWN_LOCK, &shutdown_lock) == pcmk_ok) && (shutdown_lock == 0); } static void process_lrm_resource_diff(xmlNode *lrm_resource, const char *node) { for (xmlNode *rsc_op = pcmk__xe_first_child(lrm_resource, NULL, NULL, NULL); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, NULL)) { process_graph_event(rsc_op, node); } if (shutdown_lock_cleared(lrm_resource)) { // @TODO would be more efficient to abort once after transition done abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Shutdown lock cleared", lrm_resource); } } static void process_resource_updates(const char *node, xmlNode *xml, xmlNode *change, const char *op, const char *xpath) { xmlNode *rsc = NULL; if (xml == NULL) { return; } if (pcmk__xe_is(xml, PCMK__XE_LRM)) { xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL); CRM_CHECK(xml != NULL, return); } CRM_CHECK(pcmk__xe_is(xml, PCMK__XE_LRM_RESOURCES), return); /* * Updates by, or in response to, TE actions will never contain updates * for more than one resource at a time, so such updates indicate an * LRM refresh. * * In that case, start a new transition rather than check each result * individually, which can result in _huge_ speedups in large clusters. * * Unfortunately, we can only do so when there are no pending actions. * Otherwise, we could mistakenly throw away those results here, and * the cluster will stall waiting for them and time out the operation. */ if ((controld_globals.transition_graph->pending == 0) && (xml->children != NULL) && (xml->children->next != NULL)) { crm_log_xml_trace(change, "lrm-refresh"); abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "History refresh", NULL); return; } for (rsc = pcmk__xe_first_child(xml, NULL, NULL, NULL); rsc != NULL; rsc = pcmk__xe_next(rsc, NULL)) { crm_trace("Processing %s", pcmk__xe_id(rsc)); process_lrm_resource_diff(rsc, node); } } static char *extract_node_uuid(const char *xpath) { char *mutable_path = pcmk__str_copy(xpath); char *node_uuid = NULL; char *search = NULL; char *match = NULL; match = strstr(mutable_path, PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "=\'"); if (match == NULL) { free(mutable_path); return NULL; } match += strlen(PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "=\'"); search = strchr(match, '\''); if (search == NULL) { free(mutable_path); return NULL; } search[0] = 0; node_uuid = pcmk__str_copy(match); free(mutable_path); return node_uuid; } static void abort_unless_down(const char *xpath, const char *op, xmlNode *change, const char *reason) { char *node_uuid = NULL; pcmk__graph_action_t *down = NULL; if (!pcmk__str_eq(op, PCMK_VALUE_DELETE, pcmk__str_none)) { abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, reason, change); return; } node_uuid = extract_node_uuid(xpath); if(node_uuid == NULL) { crm_err("Could not extract node ID from %s", xpath); abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, reason, change); return; } down = match_down_event(node_uuid); if (down == NULL) { crm_trace("Not expecting %s to be down (%s)", node_uuid, xpath); abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, reason, change); } else { crm_trace("Expecting changes to %s (%s)", node_uuid, xpath); } free(node_uuid); } static void process_op_deletion(const char *xpath, xmlNode *change) { char *mutable_key = pcmk__str_copy(xpath); char *key; char *node_uuid; // Extract the part of xpath between last pair of single quotes key = strrchr(mutable_key, '\''); if (key != NULL) { *key = '\0'; key = strrchr(mutable_key, '\''); } if (key == NULL) { crm_warn("Ignoring malformed CIB update (resource deletion of %s)", xpath); free(mutable_key); return; } ++key; node_uuid = extract_node_uuid(xpath); if (confirm_cancel_action(key, node_uuid) == FALSE) { abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Resource operation removal", change); } free(mutable_key); free(node_uuid); } static void process_delete_diff(const char *xpath, const char *op, xmlNode *change) { if (strstr(xpath, "/" PCMK__XE_LRM_RSC_OP "[")) { process_op_deletion(xpath, change); } else if (strstr(xpath, "/" PCMK__XE_LRM "[")) { abort_unless_down(xpath, op, change, "Resource state removal"); } else if (strstr(xpath, "/" PCMK__XE_NODE_STATE "[")) { abort_unless_down(xpath, op, change, "Node state removal"); } else { crm_trace("Ignoring delete of %s", xpath); } } static void process_node_state_diff(xmlNode *state, xmlNode *change, const char *op, const char *xpath) { xmlNode *lrm = pcmk__xe_first_child(state, PCMK__XE_LRM, NULL, NULL); process_resource_updates(pcmk__xe_id(state), lrm, change, op, xpath); } static void process_status_diff(xmlNode *status, xmlNode *change, const char *op, const char *xpath) { for (xmlNode *state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL; state = pcmk__xe_next(state, NULL)) { process_node_state_diff(state, change, op, xpath); } } static void process_cib_diff(xmlNode *cib, xmlNode *change, const char *op, const char *xpath) { xmlNode *status = pcmk__xe_first_child(cib, PCMK_XE_STATUS, NULL, NULL); xmlNode *config = pcmk__xe_first_child(cib, PCMK_XE_CONFIGURATION, NULL, NULL); if (status) { process_status_diff(status, change, op, xpath); } if (config) { abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Non-status-only change", change); } } static int te_update_diff_element(xmlNode *change, void *userdata) { xmlNode *match = NULL; const char *name = NULL; const char *xpath = crm_element_value(change, PCMK_XA_PATH); // Possible ops: create, modify, delete, move const char *op = crm_element_value(change, PCMK_XA_OPERATION); // Ignore uninteresting updates if (op == NULL) { return pcmk_rc_ok; } else if (xpath == NULL) { crm_trace("Ignoring %s change for version field", op); return pcmk_rc_ok; } else if ((strcmp(op, PCMK_VALUE_MOVE) == 0) && (strstr(xpath, "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES) == NULL)) { /* We still need to consider moves within the resources section, * since they affect placement order. */ crm_trace("Ignoring move change at %s", xpath); return pcmk_rc_ok; } // Find the result of create/modify ops if (strcmp(op, PCMK_VALUE_CREATE) == 0) { match = change->children; } else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) { match = pcmk__xe_first_child(change, PCMK_XE_CHANGE_RESULT, NULL, NULL); if(match) { match = match->children; } } else if (!pcmk__str_any_of(op, PCMK_VALUE_DELETE, PCMK_VALUE_MOVE, NULL)) { crm_warn("Ignoring malformed CIB update (%s operation on %s is unrecognized)", op, xpath); return pcmk_rc_ok; } if (match) { if (match->type == XML_COMMENT_NODE) { crm_trace("Ignoring %s operation for comment at %s", op, xpath); return pcmk_rc_ok; } name = (const char *)match->name; } crm_trace("Handling %s operation for %s%s%s", op, (xpath? xpath : "CIB"), (name? " matched by " : ""), (name? name : "")); if (strstr(xpath, "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION)) { abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Configuration change", change); return pcmk_rc_cib_modified; // Won't be packaged with operation results we may be waiting for } else if (strstr(xpath, "/" PCMK_XE_TICKETS) || pcmk__str_eq(name, PCMK_XE_TICKETS, pcmk__str_none)) { abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Ticket attribute change", change); return pcmk_rc_cib_modified; // Won't be packaged with operation results we may be waiting for } else if (strstr(xpath, "/" PCMK__XE_TRANSIENT_ATTRIBUTES "[") || pcmk__str_eq(name, PCMK__XE_TRANSIENT_ATTRIBUTES, pcmk__str_none)) { abort_unless_down(xpath, op, change, "Transient attribute change"); return pcmk_rc_cib_modified; // Won't be packaged with operation results we may be waiting for } else if (strcmp(op, PCMK_VALUE_DELETE) == 0) { process_delete_diff(xpath, op, change); } else if (name == NULL) { crm_warn("Ignoring malformed CIB update (%s at %s has no result)", op, xpath); } else if (strcmp(name, PCMK_XE_CIB) == 0) { process_cib_diff(match, change, op, xpath); } else if (strcmp(name, PCMK_XE_STATUS) == 0) { process_status_diff(match, change, op, xpath); } else if (strcmp(name, PCMK__XE_NODE_STATE) == 0) { process_node_state_diff(match, change, op, xpath); } else if (strcmp(name, PCMK__XE_LRM) == 0) { process_resource_updates(pcmk__xe_id(match), match, change, op, xpath); } else if (strcmp(name, PCMK__XE_LRM_RESOURCES) == 0) { char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM); process_resource_updates(local_node, match, change, op, xpath); free(local_node); } else if (strcmp(name, PCMK__XE_LRM_RESOURCE) == 0) { char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM); process_lrm_resource_diff(match, local_node); free(local_node); } else if (strcmp(name, PCMK__XE_LRM_RSC_OP) == 0) { char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM); process_graph_event(match, local_node); free(local_node); } else { crm_warn("Ignoring malformed CIB update (%s at %s has unrecognized result %s)", op, xpath, name); } return pcmk_rc_ok; } void te_update_diff(const char *event, xmlNode * msg) { xmlNode *wrapper = NULL; xmlNode *diff = NULL; const char *op = NULL; int rc = -EINVAL; int format = 1; int p_add[] = { 0, 0, 0 }; int p_del[] = { 0, 0, 0 }; CRM_CHECK(msg != NULL, return); crm_element_value_int(msg, PCMK__XA_CIB_RC, &rc); if (controld_globals.transition_graph == NULL) { crm_trace("No graph"); return; } else if (rc < pcmk_ok) { crm_trace("Filter rc=%d (%s)", rc, pcmk_strerror(rc)); return; } else if (controld_globals.transition_graph->complete && (controld_globals.fsa_state != S_IDLE) && (controld_globals.fsa_state != S_TRANSITION_ENGINE) && (controld_globals.fsa_state != S_POLICY_ENGINE)) { crm_trace("Filter state=%s (complete)", fsa_state2string(controld_globals.fsa_state)); return; } op = crm_element_value(msg, PCMK__XA_CIB_OP); wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); diff = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); xml_patch_versions(diff, p_add, p_del); crm_debug("Processing (%s) diff: %d.%d.%d -> %d.%d.%d (%s)", op, p_del[0], p_del[1], p_del[2], p_add[0], p_add[1], p_add[2], fsa_state2string(controld_globals.fsa_state)); crm_element_value_int(diff, PCMK_XA_FORMAT, &format); if (format == 2) { crm_log_xml_trace(diff, "patch"); pcmk__xe_foreach_child(diff, NULL, te_update_diff_element, NULL); } else { crm_warn("Ignoring malformed CIB update (unknown patch format %d)", format); } controld_remove_all_outside_events(); } void process_te_message(xmlNode * msg, xmlNode * xml_data) { const char *value = NULL; xmlXPathObject *xpathObj = NULL; int nmatches = 0; CRM_CHECK(msg != NULL, return); // Transition requests must specify transition engine as subsystem value = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); if (pcmk__str_empty(value) || !pcmk__str_eq(value, CRM_SYSTEM_TENGINE, pcmk__str_none)) { crm_info("Received invalid transition request: subsystem '%s' not '" CRM_SYSTEM_TENGINE "'", pcmk__s(value, "")); return; } // Only the lrm_invoke command is supported as a transition request value = crm_element_value(msg, PCMK__XA_CRM_TASK); if (!pcmk__str_eq(value, CRM_OP_INVOKE_LRM, pcmk__str_none)) { crm_info("Received invalid transition request: command '%s' not '" CRM_OP_INVOKE_LRM "'", pcmk__s(value, "")); return; } // Transition requests must be marked as coming from the executor value = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM); if (!pcmk__str_eq(value, CRM_SYSTEM_LRMD, pcmk__str_none)) { crm_info("Received invalid transition request: from '%s' not '" CRM_SYSTEM_LRMD "'", pcmk__s(value, "")); return; } crm_debug("Processing transition request with ref='%s' origin='%s'", pcmk__s(crm_element_value(msg, PCMK_XA_REFERENCE), ""), pcmk__s(crm_element_value(msg, PCMK__XA_SRC), "")); xpathObj = pcmk__xpath_search(xml_data->doc, "//" PCMK__XE_LRM_RSC_OP); nmatches = pcmk__xpath_num_results(xpathObj); if (nmatches == 0) { crm_err("Received transition request with no results (bug?)"); } else { for (int lpc = 0; lpc < nmatches; lpc++) { xmlNode *rsc_op = pcmk__xpath_result(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } void cib_action_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Update %d FAILED: %s", call_id, pcmk_strerror(rc)); } } /*! * \brief Handle a timeout in node-to-node communication * * \param[in,out] data Pointer to graph action * * \return FALSE (indicating that source should be not be re-added) */ gboolean action_timer_callback(gpointer data) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) data; const char *task = NULL; const char *on_node = NULL; const char *via_node = NULL; CRM_CHECK(data != NULL, return FALSE); stop_te_timer(action); task = crm_element_value(action->xml, PCMK_XA_OPERATION); on_node = crm_element_value(action->xml, PCMK__META_ON_NODE); via_node = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if (controld_globals.transition_graph->complete) { crm_notice("Node %s did not send %s result (via %s) within %dms " "(ignoring because transition not in progress)", (on_node? on_node : ""), (task? task : "unknown action"), (via_node? via_node : "controller"), action->timeout); } else { /* fail the action */ crm_err("Node %s did not send %s result (via %s) within %dms " "(action timeout plus " PCMK_OPT_CLUSTER_DELAY ")", (on_node? on_node : ""), (task? task : "unknown action"), (via_node? via_node : "controller"), (action->timeout + controld_globals.transition_graph->network_delay)); pcmk__log_graph_action(LOG_ERR, action); pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); te_action_confirmed(action, controld_globals.transition_graph); abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Action lost", NULL); // Record timeout in the CIB if appropriate if ((action->type == pcmk__rsc_graph_action) && controld_action_is_recordable(task)) { controld_record_action_timeout(action); } } return FALSE; } diff --git a/daemons/controld/controld_te_events.c b/daemons/controld/controld_te_events.c index cd2b2bdf70..5899dba317 100644 --- a/daemons/controld/controld_te_events.c +++ b/daemons/controld/controld_te_events.c @@ -1,613 +1,613 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include /*! * \internal * \brief Action numbers of outside events processed in current update diff * * This table is to be used as a set. It should be empty when the transitioner * begins processing a CIB update diff. It ensures that if there are multiple * events (for example, "_last_0" and "_last_failure_0") for the same action, * only one of them updates the failcount. Events that originate outside the * cluster can't be confirmed, since they're not in the transition graph. */ static GHashTable *outside_events = NULL; /*! * \internal * \brief Empty the hash table containing action numbers of outside events */ void controld_remove_all_outside_events(void) { if (outside_events != NULL) { g_hash_table_remove_all(outside_events); } } /*! * \internal * \brief Destroy the hash table containing action numbers of outside events */ void controld_destroy_outside_events_table(void) { if (outside_events != NULL) { g_hash_table_destroy(outside_events); outside_events = NULL; } } /*! * \internal * \brief Add an outside event's action number to a set * * \return Standard Pacemaker return code. Specifically, \p pcmk_rc_ok if the * event was not already in the set, or \p pcmk_rc_already otherwise. */ static int record_outside_event(gint action_num) { if (outside_events == NULL) { outside_events = g_hash_table_new(NULL, NULL); } if (g_hash_table_add(outside_events, GINT_TO_POINTER(action_num))) { return pcmk_rc_ok; } return pcmk_rc_already; } gboolean fail_incompletable_actions(pcmk__graph_t *graph, const char *down_node) { const char *target_uuid = NULL; const char *router = NULL; const char *router_uuid = NULL; xmlNode *last_action = NULL; GList *gIter = NULL; GList *gIter2 = NULL; if (graph == NULL || graph->complete) { return FALSE; } gIter = graph->synapses; for (; gIter != NULL; gIter = gIter->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) gIter->data; if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_failed)) { /* We've already been here */ continue; } gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) gIter2->data; if ((action->type == pcmk__pseudo_graph_action) || pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { continue; } else if (action->type == pcmk__cluster_graph_action) { const char *task = crm_element_value(action->xml, PCMK_XA_OPERATION); if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { continue; } } target_uuid = crm_element_value(action->xml, PCMK__META_ON_NODE_UUID); router = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if (router) { const pcmk__node_status_t *node = pcmk__get_node(0, router, NULL, pcmk__node_search_cluster_member); if (node != NULL) { router_uuid = node->xml_id; } } if (pcmk__str_eq(target_uuid, down_node, pcmk__str_casei) || pcmk__str_eq(router_uuid, down_node, pcmk__str_casei)) { pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); pcmk__set_synapse_flags(synapse, pcmk__synapse_failed); last_action = action->xml; stop_te_timer(action); pcmk__update_graph(graph, action); if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) { crm_notice("Action %d (%s) was pending on %s (offline)", action->id, crm_element_value(action->xml, PCMK__XA_OPERATION_KEY), down_node); } else { crm_info("Action %d (%s) is scheduled for %s (offline)", action->id, crm_element_value(action->xml, PCMK__XA_OPERATION_KEY), down_node); } } } } if (last_action != NULL) { crm_info("Node %s shutdown resulted in un-runnable actions", down_node); abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Node failure", last_action); return TRUE; } return FALSE; } /*! * \internal * \brief Update failure-related node attributes if warranted * * \param[in] event XML describing operation that (maybe) failed * \param[in] event_node_uuid Node that event occurred on * \param[in] rc Actual operation return code * \param[in] target_rc Expected operation return code * \param[in] do_update If TRUE, do update regardless of operation type * \param[in] ignore_failures If TRUE, update last failure but not fail count * * \return TRUE if this was not a direct nack, success or lrm status refresh */ static gboolean update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, int target_rc, gboolean do_update, gboolean ignore_failures) { guint interval_ms = 0; char *task = NULL; char *rsc_id = NULL; const char *value = NULL; const char *id = crm_element_value(event, PCMK__XA_OPERATION_KEY); const char *on_uname = pcmk__node_name_from_uuid(event_node_uuid); const char *origin = crm_element_value(event, PCMK_XA_CRM_DEBUG_ORIGIN); // Nothing needs to be done for success or status refresh if (rc == target_rc) { return FALSE; } else if (pcmk__str_eq(origin, "build_active_RAs", pcmk__str_casei)) { crm_debug("No update for %s (rc=%d) on %s: Old failure from lrm status refresh", id, rc, on_uname); return FALSE; } /* Sanity check */ CRM_CHECK(on_uname != NULL, return TRUE); CRM_CHECK(parse_op_key(id, &rsc_id, &task, &interval_ms), crm_err("Couldn't parse: %s", pcmk__xe_id(event)); goto bail); /* Decide whether update is necessary and what value to use */ if ((interval_ms > 0) || pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_none) || pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_none)) { do_update = TRUE; } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_none)) { do_update = TRUE; value = pcmk__s(controld_globals.transition_graph->failed_start_offset, PCMK_VALUE_INFINITY); } else if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_none)) { do_update = TRUE; value = pcmk__s(controld_globals.transition_graph->failed_stop_offset, PCMK_VALUE_INFINITY); } if (do_update) { pcmk__attrd_query_pair_t *fail_pair = NULL; pcmk__attrd_query_pair_t *last_pair = NULL; char *fail_name = NULL; char *last_name = NULL; GList *attrs = NULL; uint32_t opts = pcmk__node_attr_none; char *now = pcmk__ttoa(time(NULL)); // Fail count will be either incremented or set to infinity if (!pcmk_str_is_infinity(value)) { value = PCMK_XA_VALUE "++"; } if (g_hash_table_lookup(pcmk__remote_peer_cache, event_node_uuid)) { opts |= pcmk__node_attr_remote; } crm_info("Updating %s for %s on %s after failed %s: rc=%d (update=%s, time=%s)", (ignore_failures? "last failure" : "failcount"), rsc_id, on_uname, task, rc, value, now); /* Update the fail count, if we're not ignoring failures */ if (!ignore_failures) { fail_pair = pcmk__assert_alloc(1, sizeof(pcmk__attrd_query_pair_t)); fail_name = pcmk__failcount_name(rsc_id, task, interval_ms); fail_pair->name = fail_name; fail_pair->value = value; fail_pair->node = on_uname; attrs = g_list_prepend(attrs, fail_pair); } /* Update the last failure time (even if we're ignoring failures, * so that failure can still be detected and shown, e.g. by crm_mon) */ last_pair = pcmk__assert_alloc(1, sizeof(pcmk__attrd_query_pair_t)); last_name = pcmk__lastfailure_name(rsc_id, task, interval_ms); last_pair->name = last_name; last_pair->value = now; last_pair->node = on_uname; attrs = g_list_prepend(attrs, last_pair); update_attrd_list(attrs, opts); free(fail_name); free(fail_pair); free(last_name); free(last_pair); g_list_free(attrs); free(now); } bail: free(rsc_id); free(task); return TRUE; } pcmk__graph_action_t * controld_get_action(int id) { for (GList *item = controld_globals.transition_graph->synapses; item != NULL; item = item->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) item->data; for (GList *item2 = synapse->actions; item2; item2 = item2->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) item2->data; if (action->id == id) { return action; } } } return NULL; } pcmk__graph_action_t * get_cancel_action(const char *id, const char *node) { GList *gIter = NULL; GList *gIter2 = NULL; gIter = controld_globals.transition_graph->synapses; for (; gIter != NULL; gIter = gIter->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) gIter->data; gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { const char *task = NULL; const char *target = NULL; pcmk__graph_action_t *action = (pcmk__graph_action_t *) gIter2->data; task = crm_element_value(action->xml, PCMK_XA_OPERATION); if (!pcmk__str_eq(PCMK_ACTION_CANCEL, task, pcmk__str_casei)) { continue; } task = crm_element_value(action->xml, PCMK__XA_OPERATION_KEY); if (!pcmk__str_eq(task, id, pcmk__str_casei)) { continue; } target = crm_element_value(action->xml, PCMK__META_ON_NODE_UUID); if (node && !pcmk__str_eq(target, node, pcmk__str_casei)) { crm_trace("Wrong node %s for %s on %s", target, id, node); continue; } crm_trace("Found %s on %s", id, node); return action; } } return NULL; } bool confirm_cancel_action(const char *id, const char *node_id) { const char *op_key = NULL; const char *node_name = NULL; pcmk__graph_action_t *cancel = get_cancel_action(id, node_id); if (cancel == NULL) { return FALSE; } op_key = crm_element_value(cancel->xml, PCMK__XA_OPERATION_KEY); node_name = crm_element_value(cancel->xml, PCMK__META_ON_NODE); stop_te_timer(cancel); te_action_confirmed(cancel, controld_globals.transition_graph); crm_info("Cancellation of %s on %s confirmed (action %d)", op_key, node_name, cancel->id); return TRUE; } /* downed nodes are listed like: ... */ #define XPATH_DOWNED "//" PCMK__XE_DOWNED \ "/" PCMK_XE_NODE "[@" PCMK_XA_ID "='%s']" /*! * \brief Find a transition event that would have made a specified node down * * \param[in] target UUID of node to match * * \return Matching event if found, NULL otherwise */ pcmk__graph_action_t * match_down_event(const char *target) { pcmk__graph_action_t *match = NULL; xmlXPathObject *xpath_ret = NULL; GList *gIter, *gIter2; char *xpath = crm_strdup_printf(XPATH_DOWNED, target); for (gIter = controld_globals.transition_graph->synapses; gIter != NULL && match == NULL; gIter = gIter->next) { for (gIter2 = ((pcmk__graph_synapse_t * ) gIter->data)->actions; gIter2 != NULL && match == NULL; gIter2 = gIter2->next) { match = (pcmk__graph_action_t *) gIter2->data; if (pcmk_is_set(match->flags, pcmk__graph_action_executed)) { xpath_ret = pcmk__xpath_search(match->xml->doc, xpath); if (pcmk__xpath_num_results(xpath_ret) < 1) { match = NULL; } - freeXpathObject(xpath_ret); + xmlXPathFreeObject(xpath_ret); } else { // Only actions that were actually started can match match = NULL; } } } free(xpath); if (match != NULL) { crm_debug("Shutdown action %d (%s) found for node %s", match->id, crm_element_value(match->xml, PCMK__XA_OPERATION_KEY), target); } else { crm_debug("No reason to expect node %s to be down", target); } return match; } void process_graph_event(xmlNode *event, const char *event_node) { int rc = -1; // Actual result int target_rc = -1; // Expected result int status = -1; // Executor status int callid = -1; // Executor call ID int transition_num = -1; // Transition number int action_num = -1; // Action number within transition char *update_te_uuid = NULL; bool ignore_failures = FALSE; const char *id = NULL; const char *desc = NULL; const char *magic = NULL; const char *uname = NULL; pcmk__assert(event != NULL); /* */ magic = crm_element_value(event, PCMK__XA_TRANSITION_KEY); if (magic == NULL) { /* non-change */ return; } crm_element_value_int(event, PCMK__XA_OP_STATUS, &status); if (status == PCMK_EXEC_PENDING) { return; } id = crm_element_value(event, PCMK__XA_OPERATION_KEY); crm_element_value_int(event, PCMK__XA_RC_CODE, &rc); crm_element_value_int(event, PCMK__XA_CALL_ID, &callid); rc = pcmk__effective_rc(rc); if (decode_transition_key(magic, &update_te_uuid, &transition_num, &action_num, &target_rc) == FALSE) { // decode_transition_key() already logged the bad key crm_err("Can't process action %s result: Incompatible versions? " QB_XS " call-id=%d", id, callid); abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Bad event", event); return; } if (transition_num == -1) { // E.g. crm_resource --fail if (record_outside_event(action_num) != pcmk_rc_ok) { crm_debug("Outside event with transition key '%s' has already been " "processed", magic); goto bail; } desc = "initiated outside of the cluster"; abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Unexpected event", event); } else if ((action_num < 0) || !pcmk__str_eq(update_te_uuid, controld_globals.te_uuid, pcmk__str_none)) { desc = "initiated by a different DC"; abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Foreign event", event); } else if ((controld_globals.transition_graph->id != transition_num) || controld_globals.transition_graph->complete) { // Action is not from currently active transition guint interval_ms = 0; if (parse_op_key(id, NULL, NULL, &interval_ms) && (interval_ms != 0)) { /* Recurring actions have the transition number they were first * scheduled in. */ if (status == PCMK_EXEC_CANCELLED) { confirm_cancel_action(id, get_node_id(event)); goto bail; } desc = "arrived after initial scheduling"; abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Change in recurring result", event); } else if (controld_globals.transition_graph->id != transition_num) { desc = "arrived really late"; abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Old event", event); } else { desc = "arrived late"; abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Inactive graph", event); } } else { // Event is result of an action from currently active transition pcmk__graph_action_t *action = controld_get_action(action_num); if (action == NULL) { // Should never happen desc = "unknown"; abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Unknown event", event); } else if (pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { /* Nothing further needs to be done if the action has already been * confirmed. This can happen e.g. when processing both an * "xxx_last_0" or "xxx_last_failure_0" record as well as the main * history record, which would otherwise result in incorrectly * bumping the fail count twice. */ crm_log_xml_debug(event, "Event already confirmed:"); goto bail; } else { /* An action result needs to be confirmed. * (This is the only case where desc == NULL.) */ if (pcmk__str_eq(crm_meta_value(action->params, PCMK_META_ON_FAIL), PCMK_VALUE_IGNORE, pcmk__str_casei)) { ignore_failures = TRUE; } else if (rc != target_rc) { pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); } stop_te_timer(action); te_action_confirmed(action, controld_globals.transition_graph); if (pcmk_is_set(action->flags, pcmk__graph_action_failed)) { abort_transition(action->synapse->priority + 1, pcmk__graph_restart, "Event failed", event); } } } if (id == NULL) { id = "unknown action"; } uname = crm_element_value(event, PCMK__META_ON_NODE); if (uname == NULL) { uname = "unknown node"; } if (status == PCMK_EXEC_INVALID) { // We couldn't attempt the action crm_info("Transition %d action %d (%s on %s): %s", transition_num, action_num, id, uname, pcmk_exec_status_str(status)); } else if (desc && update_failcount(event, event_node, rc, target_rc, (transition_num == -1), FALSE)) { crm_notice("Transition %d action %d (%s on %s): expected '%s' but got '%s' " QB_XS " target-rc=%d rc=%d call-id=%d event='%s'", transition_num, action_num, id, uname, crm_exit_str(target_rc), crm_exit_str(rc), target_rc, rc, callid, desc); } else if (desc) { crm_info("Transition %d action %d (%s on %s): %s " QB_XS " rc=%d target-rc=%d call-id=%d", transition_num, action_num, id, uname, desc, rc, target_rc, callid); } else if (rc == target_rc) { crm_info("Transition %d action %d (%s on %s) confirmed: %s " QB_XS " rc=%d call-id=%d", transition_num, action_num, id, uname, crm_exit_str(rc), rc, callid); } else { update_failcount(event, event_node, rc, target_rc, (transition_num == -1), ignore_failures); crm_notice("Transition %d action %d (%s on %s): expected '%s' but got '%s' " QB_XS " target-rc=%d rc=%d call-id=%d", transition_num, action_num, id, uname, crm_exit_str(target_rc), crm_exit_str(rc), target_rc, rc, callid); } bail: free(update_te_uuid); } diff --git a/daemons/fenced/fenced_cib.c b/daemons/fenced/fenced_cib.c index 1d6e3e6061..796a858b1c 100644 --- a/daemons/fenced/fenced_cib.c +++ b/daemons/fenced/fenced_cib.c @@ -1,617 +1,617 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include static xmlNode *local_cib = NULL; static cib_t *cib_api = NULL; static bool have_cib_devices = FALSE; /*! * \internal * \brief Check whether a node has a specific attribute name/value * * \param[in] node Name of node to check * \param[in] name Name of an attribute to look for * \param[in] value The value the named attribute needs to be set to in order to be considered a match * * \return TRUE if the locally cached CIB has the specified node attribute */ gboolean node_has_attr(const char *node, const char *name, const char *value) { GString *xpath = NULL; xmlNode *match; CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL) && (value != NULL), return FALSE); /* Search for the node's attributes in the CIB. While the schema allows * multiple sets of instance attributes, and allows instance attributes to * use id-ref to reference values elsewhere, that is intended for resources, * so we ignore that here. */ xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, "//" PCMK_XE_NODES "/" PCMK_XE_NODE "[@" PCMK_XA_UNAME "='", node, "']" "/" PCMK_XE_INSTANCE_ATTRIBUTES "/" PCMK_XE_NVPAIR "[@" PCMK_XA_NAME "='", name, "' " "and @" PCMK_XA_VALUE "='", value, "']", NULL); match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER); g_string_free(xpath, TRUE); return (match != NULL); } static void add_topology_level(xmlNode *match) { char *desc = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(match != NULL, return); fenced_register_level(match, &desc, &result); fenced_send_config_notification(STONITH_OP_LEVEL_ADD, &result, desc); pcmk__reset_result(&result); free(desc); } static void topology_remove_helper(const char *node, int level) { char *desc = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; xmlNode *data = pcmk__xe_create(NULL, PCMK_XE_FENCING_LEVEL); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add_int(data, PCMK_XA_INDEX, level); crm_xml_add(data, PCMK_XA_TARGET, node); fenced_unregister_level(data, &desc, &result); fenced_send_config_notification(STONITH_OP_LEVEL_DEL, &result, desc); pcmk__reset_result(&result); pcmk__xml_free(data); free(desc); } static void remove_topology_level(xmlNode *match) { int index = 0; char *key = NULL; CRM_CHECK(match != NULL, return); key = stonith_level_key(match, fenced_target_by_unknown); crm_element_value_int(match, PCMK_XA_INDEX, &index); topology_remove_helper(key, index); free(key); } static void register_fencing_topology(xmlXPathObjectPtr xpathObj) { int max = pcmk__xpath_num_results(xpathObj); for (int lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); remove_topology_level(match); add_topology_level(match); } } /* Fencing */ void fencing_topology_init(void) { xmlXPathObject *xpathObj = NULL; const char *xpath = "//" PCMK_XE_FENCING_LEVEL; crm_trace("Full topology refresh"); free_topology_list(); init_topology_list(); /* Grab everything */ xpathObj = pcmk__xpath_search(local_cib->doc, xpath); register_fencing_topology(xpathObj); - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } #define XPATH_WATCHDOG_TIMEOUT "//" PCMK_XE_NVPAIR \ "[@" PCMK_XA_NAME "='" \ PCMK_OPT_STONITH_WATCHDOG_TIMEOUT "']" static void update_stonith_watchdog_timeout_ms(xmlNode *cib) { long long timeout_ms = 0; xmlNode *stonith_watchdog_xml = NULL; const char *value = NULL; // @TODO An XPath search can't handle multiple instances or rules stonith_watchdog_xml = get_xpath_object(XPATH_WATCHDOG_TIMEOUT, cib, LOG_NEVER); if (stonith_watchdog_xml) { value = crm_element_value(stonith_watchdog_xml, PCMK_XA_VALUE); } if (value) { timeout_ms = crm_get_msec(value); } if (timeout_ms < 0) { timeout_ms = pcmk__auto_stonith_watchdog_timeout(); } stonith_watchdog_timeout_ms = timeout_ms; } /*! * \internal * \brief Update all STONITH device definitions based on current CIB */ static void cib_devices_update(void) { GHashTableIter iter; stonith_device_t *device = NULL; crm_info("Updating devices to version %s.%s.%s", crm_element_value(local_cib, PCMK_XA_ADMIN_EPOCH), crm_element_value(local_cib, PCMK_XA_EPOCH), crm_element_value(local_cib, PCMK_XA_NUM_UPDATES)); g_hash_table_iter_init(&iter, device_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { if (device->cib_registered) { device->dirty = TRUE; } } /* have list repopulated if cib has a watchdog-fencing-resource TODO: keep a cached list for queries happening while we are refreshing */ g_list_free_full(stonith_watchdog_targets, free); stonith_watchdog_targets = NULL; fenced_scheduler_run(local_cib); g_hash_table_iter_init(&iter, device_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { if (device->dirty) { g_hash_table_iter_remove(&iter); } } } static void update_cib_stonith_devices(const char *event, xmlNode * msg) { int format = 1; xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); xmlNode *patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); char *reason = NULL; CRM_CHECK(patchset != NULL, return); crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); if (format != 2) { crm_warn("Unknown patch format: %d", format); return; } for (xmlNode *change = pcmk__xe_first_child(patchset, NULL, NULL, NULL); change != NULL; change = pcmk__xe_next(change, NULL)) { const char *op = crm_element_value(change, PCMK_XA_OPERATION); const char *xpath = crm_element_value(change, PCMK_XA_PATH); const char *shortpath = NULL; if (pcmk__str_eq(op, PCMK_VALUE_MOVE, pcmk__str_null_matches) || (strstr(xpath, "/" PCMK_XE_STATUS) != NULL)) { continue; } if (pcmk__str_eq(op, PCMK_VALUE_DELETE, pcmk__str_none) && (strstr(xpath, "/" PCMK_XE_PRIMITIVE) != NULL)) { const char *rsc_id = NULL; char *search = NULL; char *mutable = NULL; if ((strstr(xpath, PCMK_XE_INSTANCE_ATTRIBUTES) != NULL) || (strstr(xpath, PCMK_XE_META_ATTRIBUTES) != NULL)) { reason = pcmk__str_copy("(meta) attribute deleted from " "resource"); break; } mutable = pcmk__str_copy(xpath); rsc_id = strstr(mutable, PCMK_XE_PRIMITIVE "[@" PCMK_XA_ID "=\'"); if (rsc_id != NULL) { rsc_id += strlen(PCMK_XE_PRIMITIVE "[@" PCMK_XA_ID "=\'"); search = strchr(rsc_id, '\''); } if (search != NULL) { *search = 0; stonith_device_remove(rsc_id, true); /* watchdog_device_update called afterwards to fall back to implicit definition if needed */ } else { crm_warn("Ignoring malformed CIB update (resource deletion)"); } free(mutable); } else if (strstr(xpath, "/" PCMK_XE_RESOURCES) || strstr(xpath, "/" PCMK_XE_CONSTRAINTS) || strstr(xpath, "/" PCMK_XE_RSC_DEFAULTS)) { shortpath = strrchr(xpath, '/'); pcmk__assert(shortpath != NULL); reason = crm_strdup_printf("%s %s", op, shortpath+1); break; } } if (reason != NULL) { crm_info("Updating device list from CIB: %s", reason); cib_devices_update(); free(reason); } else { crm_trace("No updates for device list found in CIB"); } } static void watchdog_device_update(void) { if (stonith_watchdog_timeout_ms > 0) { if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) && !stonith_watchdog_targets) { /* getting here watchdog-fencing enabled, no device there yet and reason isn't stonith_watchdog_targets preventing that */ int rc; xmlNode *xml; xml = create_device_registration_xml( STONITH_WATCHDOG_ID, st_namespace_internal, STONITH_WATCHDOG_AGENT, NULL, /* stonith_device_register will add our own name as PCMK_STONITH_HOST_LIST param so we can skip that here */ NULL); rc = stonith_device_register(xml, TRUE); pcmk__xml_free(xml); if (rc != pcmk_ok) { rc = pcmk_legacy2rc(rc); exit_code = CRM_EX_FATAL; crm_crit("Cannot register watchdog pseudo fence agent: %s", pcmk_rc_str(rc)); stonith_shutdown(0); } } } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) { /* be silent if no device - todo parameter to stonith_device_remove */ stonith_device_remove(STONITH_WATCHDOG_ID, true); } } /*! * \internal * \brief Query the full CIB * * \return Standard Pacemaker return code */ static int fenced_query_cib(void) { int rc = pcmk_ok; crm_trace("Re-requesting full CIB"); rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { pcmk__assert(local_cib != NULL); } else { crm_err("Couldn't retrieve the CIB: %s " QB_XS " rc=%d", pcmk_rc_str(rc), rc); } return rc; } static void update_fencing_topology(const char *event, xmlNode *msg) { xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); xmlNode *patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); int format = 1; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; CRM_CHECK(patchset != NULL, return); crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); if (format != 2) { crm_warn("Unknown patch format: %d", format); return; } xml_patch_versions(patchset, add, del); for (xmlNode *change = pcmk__xe_first_child(patchset, NULL, NULL, NULL); change != NULL; change = pcmk__xe_next(change, NULL)) { const char *op = crm_element_value(change, PCMK_XA_OPERATION); const char *xpath = crm_element_value(change, PCMK_XA_PATH); if (op == NULL) { continue; } if (strstr(xpath, "/" PCMK_XE_FENCING_LEVEL) != NULL) { // Change to a specific entry crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); if (strcmp(op, PCMK_VALUE_DELETE) == 0) { /* We have only path and ID, which is not enough info to remove * a specific entry. Re-initialize the whole topology. */ crm_info("Re-initializing fencing topology after %s operation " "%d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } if (strcmp(op, PCMK_VALUE_CREATE) == 0) { add_topology_level(change->children); } else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) { xmlNode *match = pcmk__xe_first_child(change, PCMK_XE_CHANGE_RESULT, NULL, NULL); if (match != NULL) { remove_topology_level(match->children); add_topology_level(match->children); } } continue; } if (strstr(xpath, "/" PCMK_XE_FENCING_TOPOLOGY) != NULL) { // Change to the topology in general crm_info("Re-initializing fencing topology after top-level " "%s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } if ((strstr(xpath, "/" PCMK_XE_CONFIGURATION) != NULL) && (pcmk__xe_first_child(change, PCMK_XE_FENCING_TOPOLOGY, NULL, NULL) != NULL) && pcmk__str_any_of(op, PCMK_VALUE_CREATE, PCMK_VALUE_DELETE, NULL)) { // Topology was created or entire configuration section was deleted crm_info("Re-initializing fencing topology after top-level " "%s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } crm_trace("Nothing for us in %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); } } static void update_cib_cache_cb(const char *event, xmlNode * msg) { long long timeout_ms_saved = stonith_watchdog_timeout_ms; bool need_full_refresh = false; if(!have_cib_devices) { crm_trace("Skipping updates until we get a full dump"); return; } else if(msg == NULL) { crm_trace("Missing %s update", event); return; } /* Maintain a local copy of the CIB so that we have full access * to device definitions, location constraints, and node attributes */ if (local_cib != NULL) { int rc = pcmk_ok; xmlNode *wrapper = NULL; xmlNode *patchset = NULL; crm_element_value_int(msg, PCMK__XA_CIB_RC, &rc); if (rc != pcmk_ok) { return; } wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); rc = xml_apply_patchset(local_cib, patchset, TRUE); switch (rc) { case pcmk_ok: case -pcmk_err_old_data: break; case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); pcmk__xml_free(local_cib); local_cib = NULL; break; default: crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); pcmk__xml_free(local_cib); local_cib = NULL; } } if (local_cib == NULL) { if (fenced_query_cib() != pcmk_rc_ok) { return; } need_full_refresh = true; } pcmk__refresh_node_caches_from_cib(local_cib); update_stonith_watchdog_timeout_ms(local_cib); if (timeout_ms_saved != stonith_watchdog_timeout_ms) { need_full_refresh = true; } if (need_full_refresh) { fencing_topology_init(); cib_devices_update(); } else { // Partial refresh update_fencing_topology(event, msg); update_cib_stonith_devices(event, msg); } watchdog_device_update(); } static void init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { crm_info("Updating device list from CIB"); have_cib_devices = TRUE; local_cib = pcmk__xml_copy(NULL, output); pcmk__refresh_node_caches_from_cib(local_cib); update_stonith_watchdog_timeout_ms(local_cib); fencing_topology_init(); cib_devices_update(); watchdog_device_update(); } static void cib_connection_destroy(gpointer user_data) { if (stonith_shutdown_flag) { crm_info("Connection to the CIB manager closed"); return; } else { crm_crit("Lost connection to the CIB manager, shutting down"); } if (cib_api) { cib_api->cmds->signoff(cib_api); } stonith_shutdown(0); } /*! * \internal * \brief Disconnect from CIB manager */ void fenced_cib_cleanup(void) { if (cib_api != NULL) { cib_api->cmds->del_notify_callback(cib_api, PCMK__VALUE_CIB_DIFF_NOTIFY, update_cib_cache_cb); cib__clean_up_connection(&cib_api); } pcmk__xml_free(local_cib); local_cib = NULL; } void setup_cib(void) { int rc, retries = 0; cib_api = cib_new(); if (cib_api == NULL) { crm_err("No connection to the CIB manager"); return; } do { sleep(retries); rc = cib_api->cmds->signon(cib_api, crm_system_name, cib_command); } while (rc == -ENOTCONN && ++retries < 5); if (rc != pcmk_ok) { crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc); return; } rc = cib_api->cmds->add_notify_callback(cib_api, PCMK__VALUE_CIB_DIFF_NOTIFY, update_cib_cache_cb); if (rc != pcmk_ok) { crm_err("Could not set CIB notification callback"); return; } rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_none); cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb", init_cib_cache_cb); cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); crm_info("Watching for fencing topology changes"); } diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index 64d544de49..6984375671 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -1,3658 +1,3658 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include #include #include #include GHashTable *device_list = NULL; GHashTable *topology = NULL; static GList *cmd_list = NULL; static GHashTable *fenced_handlers = NULL; struct device_search_s { /* target of fence action */ char *host; /* requested fence action */ char *action; /* timeout to use if a device is queried dynamically for possible targets */ // @TODO This name is misleading now, it's the value of stonith-timeout int per_device_timeout; /* number of registered fencing devices at time of request */ int replies_needed; /* number of device replies received so far */ int replies_received; /* whether the target is eligible to perform requested action (or off) */ bool allow_self; /* private data to pass to search callback function */ void *user_data; /* function to call when all replies have been received */ void (*callback) (GList * devices, void *user_data); /* devices capable of performing requested action (or off if remapping) */ GList *capable; /* Whether to perform searches that support the action */ uint32_t support_action_only; }; static gboolean stonith_device_dispatch(gpointer user_data); static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data); static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence); static int get_agent_metadata(const char *agent, xmlNode **metadata); static void read_action_metadata(stonith_device_t *device); static enum fenced_target_by unpack_level_kind(const xmlNode *level); typedef struct async_command_s { int id; int pid; int fd_stdout; uint32_t options; int default_timeout; /* seconds */ int timeout; /* seconds */ int start_delay; // seconds (-1 means disable static/random fencing delays) int delay_id; char *op; char *origin; char *client; char *client_name; char *remote_op_id; char *target; uint32_t target_nodeid; char *action; char *device; GList *device_list; GList *next_device_iter; // device_list entry for next device to execute void *internal_user_data; void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data); guint timer_sigterm; guint timer_sigkill; /*! If the operation timed out, this is the last signal * we sent to the process to get it to terminate */ int last_timeout_signo; stonith_device_t *active_on; stonith_device_t *activating_on; } async_command_t; static xmlNode *construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result); static gboolean is_action_required(const char *action, const stonith_device_t *device) { return (device != NULL) && device->automatic_unfencing && pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none); } static int get_action_delay_max(const stonith_device_t *device, const char *action) { const char *value = NULL; guint delay_max = 0U; if (!pcmk__is_fencing_action(action)) { return 0; } value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX); if (value) { pcmk_parse_interval_spec(value, &delay_max); delay_max /= 1000; } return (int) delay_max; } static int get_action_delay_base(const stonith_device_t *device, const char *action, const char *target) { char *hash_value = NULL; guint delay_base = 0U; if (!pcmk__is_fencing_action(action)) { return 0; } hash_value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE); if (hash_value) { char *value = pcmk__str_copy(hash_value); char *valptr = value; if (target != NULL) { for (char *val = strtok(value, "; \t"); val != NULL; val = strtok(NULL, "; \t")) { char *mapval = strchr(val, ':'); if (mapval == NULL || mapval[1] == 0) { crm_err("pcmk_delay_base: empty value in mapping", val); continue; } if (mapval != val && strncasecmp(target, val, (size_t)(mapval - val)) == 0) { value = mapval + 1; crm_debug("pcmk_delay_base mapped to %s for %s", value, target); break; } } } if (strchr(value, ':') == 0) { pcmk_parse_interval_spec(value, &delay_base); delay_base /= 1000; } free(valptr); } return (int) delay_base; } /*! * \internal * \brief Override STONITH timeout with pcmk_*_timeout if available * * \param[in] device STONITH device to use * \param[in] action STONITH action name * \param[in] default_timeout Timeout to use if device does not have * a pcmk_*_timeout parameter for action * * \return Value of pcmk_(action)_timeout if available, otherwise default_timeout * \note For consistency, it would be nice if reboot/off/on timeouts could be * set the same way as start/stop/monitor timeouts, i.e. with an * entry in the fencing resource configuration. However that * is insufficient because fencing devices may be registered directly via * the fencer's register_device() API instead of going through the CIB * (e.g. stonith_admin uses it for its -R option, and the executor uses it * to ensure a device is registered when a command is issued). As device * properties, pcmk_*_timeout parameters can be grabbed by the fencer when * the device is registered, whether by CIB change or API call. */ static int get_action_timeout(const stonith_device_t *device, const char *action, int default_timeout) { if (action && device && device->params) { char buffer[64] = { 0, }; const char *value = NULL; /* If "reboot" was requested but the device does not support it, * we will remap to "off", so check timeout for "off" instead */ if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk_is_set(device->flags, st_device_supports_reboot)) { crm_trace("%s doesn't support reboot, using timeout for off instead", device->id); action = PCMK_ACTION_OFF; } /* If the device config specified an action-specific timeout, use it */ snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, buffer); if (value) { long long timeout_ms = crm_get_msec(value); return (int) QB_MIN(pcmk__timeout_ms2s(timeout_ms), INT_MAX); } } return default_timeout; } /*! * \internal * \brief Get the currently executing device for a fencing operation * * \param[in] cmd Fencing operation to check * * \return Currently executing device for \p cmd if any, otherwise NULL */ static stonith_device_t * cmd_device(const async_command_t *cmd) { if ((cmd == NULL) || (cmd->device == NULL) || (device_list == NULL)) { return NULL; } return g_hash_table_lookup(device_list, cmd->device); } /*! * \internal * \brief Return the configured reboot action for a given device * * \param[in] device_id Device ID * * \return Configured reboot action for \p device_id */ const char * fenced_device_reboot_action(const char *device_id) { const char *action = NULL; if ((device_list != NULL) && (device_id != NULL)) { stonith_device_t *device = g_hash_table_lookup(device_list, device_id); if ((device != NULL) && (device->params != NULL)) { action = g_hash_table_lookup(device->params, "pcmk_reboot_action"); } } return pcmk__s(action, PCMK_ACTION_REBOOT); } /*! * \internal * \brief Check whether a given device supports the "on" action * * \param[in] device_id Device ID * * \return true if \p device_id supports "on", otherwise false */ bool fenced_device_supports_on(const char *device_id) { if ((device_list != NULL) && (device_id != NULL)) { stonith_device_t *device = g_hash_table_lookup(device_list, device_id); if (device != NULL) { return pcmk_is_set(device->flags, st_device_supports_on); } } return false; } static void free_async_command(async_command_t * cmd) { if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } cmd_list = g_list_remove(cmd_list, cmd); g_list_free_full(cmd->device_list, free); free(cmd->device); free(cmd->action); free(cmd->target); free(cmd->remote_op_id); free(cmd->client); free(cmd->client_name); free(cmd->origin); free(cmd->op); free(cmd); } /*! * \internal * \brief Create a new asynchronous fencing operation from request XML * * \param[in] msg Fencing request XML (from IPC or CPG) * * \return Newly allocated fencing operation on success, otherwise NULL * * \note This asserts on memory errors, so a NULL return indicates an * unparseable message. */ static async_command_t * create_async_command(xmlNode *msg) { xmlNode *op = NULL; async_command_t *cmd = NULL; int rc = pcmk_rc_ok; if (msg == NULL) { return NULL; } op = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, msg, LOG_ERR); if (op == NULL) { return NULL; } cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); // All messages must include these cmd->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION); cmd->op = crm_element_value_copy(msg, PCMK__XA_ST_OP); cmd->client = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTID); if ((cmd->action == NULL) || (cmd->op == NULL) || (cmd->client == NULL)) { free_async_command(cmd); return NULL; } crm_element_value_int(msg, PCMK__XA_ST_CALLID, &(cmd->id)); crm_element_value_int(msg, PCMK__XA_ST_DELAY, &(cmd->start_delay)); crm_element_value_int(msg, PCMK__XA_ST_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; rc = pcmk__xe_get_flags(msg, PCMK__XA_ST_CALLOPT, &(cmd->options), st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } cmd->origin = crm_element_value_copy(msg, PCMK__XA_SRC); cmd->remote_op_id = crm_element_value_copy(msg, PCMK__XA_ST_REMOTE_OP); cmd->client_name = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTNAME); cmd->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET); cmd->device = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ID); cmd->done_cb = st_child_done; // Track in global command list cmd_list = g_list_append(cmd_list, cmd); return cmd; } static int get_action_limit(stonith_device_t * device) { const char *value = NULL; int action_limit = 1; value = g_hash_table_lookup(device->params, PCMK_STONITH_ACTION_LIMIT); if ((value == NULL) || (pcmk__scan_min_int(value, &action_limit, INT_MIN) != pcmk_rc_ok) || (action_limit == 0)) { action_limit = 1; } return action_limit; } static int get_active_cmds(stonith_device_t * device) { int counter = 0; GList *gIter = NULL; GList *gIterNext = NULL; CRM_CHECK(device != NULL, return 0); for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd = gIter->data; gIterNext = gIter->next; if (cmd->active_on == device) { counter++; } } return counter; } static void fork_cb(int pid, void *user_data) { async_command_t *cmd = (async_command_t *) user_data; stonith_device_t * device = /* in case of a retry we've done the move from activating_on to active_on already */ cmd->activating_on?cmd->activating_on:cmd->active_on; pcmk__assert(device != NULL); crm_debug("Operation '%s' [%d]%s%s using %s now running with %ds timeout", cmd->action, pid, ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, cmd->timeout); cmd->active_on = device; cmd->activating_on = NULL; } static int get_agent_metadata_cb(gpointer data) { stonith_device_t *device = data; guint period_ms; switch (get_agent_metadata(device->agent, &device->agent_metadata)) { case pcmk_rc_ok: if (device->agent_metadata) { read_action_metadata(device); stonith__device_parameter_flags(&(device->flags), device->id, device->agent_metadata); } return G_SOURCE_REMOVE; case EAGAIN: period_ms = pcmk__mainloop_timer_get_period(device->timer); if (period_ms < 160 * 1000) { mainloop_timer_set_period(device->timer, 2 * period_ms); } return G_SOURCE_CONTINUE; default: return G_SOURCE_REMOVE; } } /*! * \internal * \brief Call a command's action callback for an internal (not library) result * * \param[in,out] cmd Command to report result for * \param[in] execution_status Execution status to use for result * \param[in] exit_status Exit status to use for result * \param[in] exit_reason Exit reason to use for result */ static void report_internal_result(async_command_t *cmd, int exit_status, int execution_status, const char *exit_reason) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; pcmk__set_result(&result, exit_status, execution_status, exit_reason); cmd->done_cb(0, &result, cmd); pcmk__reset_result(&result); } static gboolean stonith_device_execute(stonith_device_t * device) { int exec_rc = 0; const char *action_str = NULL; const char *host_arg = NULL; async_command_t *cmd = NULL; stonith_action_t *action = NULL; int active_cmds = 0; int action_limit = 0; GList *gIter = NULL; GList *gIterNext = NULL; CRM_CHECK(device != NULL, return FALSE); active_cmds = get_active_cmds(device); action_limit = get_action_limit(device); if (action_limit > -1 && active_cmds >= action_limit) { crm_trace("%s is over its action limit of %d (%u active action%s)", device->id, action_limit, active_cmds, pcmk__plural_s(active_cmds)); return TRUE; } for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) { async_command_t *pending_op = gIter->data; gIterNext = gIter->next; if (pending_op && pending_op->delay_id) { crm_trace("Operation '%s'%s%s using %s was asked to run too early, " "waiting for start delay of %ds", pending_op->action, ((pending_op->target == NULL)? "" : " targeting "), pcmk__s(pending_op->target, ""), device->id, pending_op->start_delay); continue; } device->pending_ops = g_list_remove_link(device->pending_ops, gIter); g_list_free_1(gIter); cmd = pending_op; break; } if (cmd == NULL) { crm_trace("No actions using %s are needed", device->id); return TRUE; } if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { if (pcmk__is_fencing_action(cmd->action)) { if (node_does_watchdog_fencing(fenced_get_local_node())) { pcmk__panic("Watchdog self-fencing required"); goto done; } } else { crm_info("Faking success for %s watchdog operation", cmd->action); report_internal_result(cmd, CRM_EX_OK, PCMK_EXEC_DONE, NULL); goto done; } } #if PCMK__ENABLE_CIBSECRETS exec_rc = pcmk__substitute_secrets(device->id, device->params); if (exec_rc != pcmk_rc_ok) { if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) { crm_info("Proceeding with stop operation for %s " "despite being unable to load CIB secrets (%s)", device->id, pcmk_rc_str(exec_rc)); } else { crm_err("Considering %s unconfigured " "because unable to load CIB secrets: %s", device->id, pcmk_rc_str(exec_rc)); report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_SECRETS, "Failed to get CIB secrets"); goto done; } } #endif action_str = cmd->action; if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk_is_set(device->flags, st_device_supports_reboot)) { crm_notice("Remapping 'reboot' action%s%s using %s to 'off' " "because agent '%s' does not support reboot", ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, device->agent); action_str = PCMK_ACTION_OFF; } if (pcmk_is_set(device->flags, st_device_supports_parameter_port)) { host_arg = "port"; } else if (pcmk_is_set(device->flags, st_device_supports_parameter_plug)) { host_arg = "plug"; } action = stonith__action_create(device->agent, action_str, cmd->target, cmd->target_nodeid, cmd->timeout, device->params, device->aliases, host_arg); /* for async exec, exec_rc is negative for early error exit otherwise handling of success/errors is done via callbacks */ cmd->activating_on = device; exec_rc = stonith__execute_async(action, (void *)cmd, cmd->done_cb, fork_cb); if (exec_rc < 0) { cmd->activating_on = NULL; cmd->done_cb(0, stonith__action_result(action), cmd); stonith__destroy_action(action); } done: /* Device might get triggered to work by multiple fencing commands * simultaneously. Trigger the device again to make sure any * remaining concurrent commands get executed. */ if (device->pending_ops) { mainloop_set_trigger(device->work); } return TRUE; } static gboolean stonith_device_dispatch(gpointer user_data) { return stonith_device_execute(user_data); } static gboolean start_delay_helper(gpointer data) { async_command_t *cmd = data; stonith_device_t *device = cmd_device(cmd); cmd->delay_id = 0; if (device) { mainloop_set_trigger(device->work); } return FALSE; } static void schedule_stonith_command(async_command_t * cmd, stonith_device_t * device) { int delay_max = 0; int delay_base = 0; int requested_delay = cmd->start_delay; CRM_CHECK(cmd != NULL, return); CRM_CHECK(device != NULL, return); if (cmd->device) { free(cmd->device); } if (device->include_nodeid && (cmd->target != NULL)) { pcmk__node_status_t *node = pcmk__get_node(0, cmd->target, NULL, pcmk__node_search_cluster_member); cmd->target_nodeid = node->cluster_layer_id; } cmd->device = pcmk__str_copy(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); if (cmd->remote_op_id) { crm_debug("Scheduling '%s' action%s%s using %s for remote peer %s " "with op id %.8s and timeout %ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->origin, cmd->remote_op_id, cmd->timeout); } else { crm_debug("Scheduling '%s' action%s%s using %s for %s with timeout %ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->client, cmd->timeout); } device->pending_ops = g_list_append(device->pending_ops, cmd); mainloop_set_trigger(device->work); // Value -1 means disable any static/random fencing delays if (requested_delay < 0) { return; } delay_max = get_action_delay_max(device, cmd->action); delay_base = get_action_delay_base(device, cmd->action, cmd->target); if (delay_max == 0) { delay_max = delay_base; } if (delay_max < delay_base) { crm_warn(PCMK_STONITH_DELAY_BASE " (%ds) is larger than " PCMK_STONITH_DELAY_MAX " (%ds) for %s using %s " "(limiting to maximum delay)", delay_base, delay_max, cmd->action, device->id); delay_base = delay_max; } if (delay_max > 0) { // coverity[dontcall] It doesn't matter here if rand() is predictable cmd->start_delay += ((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0) + delay_base; } if (cmd->start_delay > 0) { crm_notice("Delaying '%s' action%s%s using %s for %ds " QB_XS " timeout=%ds requested_delay=%ds base=%ds max=%ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->start_delay, cmd->timeout, requested_delay, delay_base, delay_max); cmd->delay_id = pcmk__create_timer(cmd->start_delay * 1000, start_delay_helper, cmd); } } static void free_device(gpointer data) { GList *gIter = NULL; stonith_device_t *device = data; g_hash_table_destroy(device->params); g_hash_table_destroy(device->aliases); for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) { async_command_t *cmd = gIter->data; crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action); report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Device was removed before action could be executed"); } g_list_free(device->pending_ops); g_list_free_full(device->targets, free); if (device->timer) { mainloop_timer_stop(device->timer); mainloop_timer_del(device->timer); } mainloop_destroy_trigger(device->work); pcmk__xml_free(device->agent_metadata); free(device->namespace); if (device->on_target_actions != NULL) { g_string_free(device->on_target_actions, TRUE); } free(device->agent); free(device->id); free(device); } void free_device_list(void) { if (device_list != NULL) { g_hash_table_destroy(device_list); device_list = NULL; } } void init_device_list(void) { if (device_list == NULL) { device_list = pcmk__strkey_table(NULL, free_device); } } static GHashTable * build_port_aliases(const char *hostmap, GList ** targets) { char *name = NULL; int last = 0, lpc = 0, max = 0, added = 0; GHashTable *aliases = pcmk__strikey_table(free, free); if (hostmap == NULL) { return aliases; } max = strlen(hostmap); for (; lpc <= max; lpc++) { switch (hostmap[lpc]) { /* Skip escaped chars */ case '\\': lpc++; break; /* Assignment chars */ case '=': case ':': if (lpc > last) { free(name); name = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; break; /* Delimeter chars */ /* case ',': Potentially used to specify multiple ports */ case 0: case ';': case ' ': case '\t': if (name) { char *value = NULL; int k = 0; value = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); for (int i = 0; value[i] != '\0'; i++) { if (value[i] != '\\') { value[k++] = value[i]; } } value[k] = '\0'; crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if (targets) { *targets = g_list_append(*targets, pcmk__str_copy(value)); } value = NULL; name = NULL; added++; } else if (lpc > last) { crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last); } last = lpc + 1; break; } if (hostmap[lpc] == 0) { break; } } if (added == 0) { crm_info("No host mappings detected in '%s'", hostmap); } free(name); return aliases; } GHashTable *metadata_cache = NULL; void free_metadata_cache(void) { if (metadata_cache != NULL) { g_hash_table_destroy(metadata_cache); metadata_cache = NULL; } } static void init_metadata_cache(void) { if (metadata_cache == NULL) { metadata_cache = pcmk__strkey_table(free, free); } } int get_agent_metadata(const char *agent, xmlNode ** metadata) { char *buffer = NULL; if (metadata == NULL) { return EINVAL; } *metadata = NULL; if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) { return pcmk_rc_ok; } init_metadata_cache(); buffer = g_hash_table_lookup(metadata_cache, agent); if (buffer == NULL) { stonith_t *st = stonith_api_new(); int rc; if (st == NULL) { crm_warn("Could not get agent meta-data: " "API memory allocation failed"); return EAGAIN; } rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); stonith_api_delete(st); if (rc || !buffer) { crm_err("Could not retrieve metadata for fencing agent %s", agent); return EAGAIN; } g_hash_table_replace(metadata_cache, pcmk__str_copy(agent), buffer); } *metadata = pcmk__xml_parse(buffer); return pcmk_rc_ok; } static gboolean is_nodeid_required(xmlNode * xml) { xmlXPathObject *xpath = NULL; if (!xml) { return FALSE; } xpath = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PARAMETER "[@" PCMK_XA_NAME "='nodeid']"); if (pcmk__xpath_num_results(xpath) <= 0) { - freeXpathObject(xpath); + xmlXPathFreeObject(xpath); return FALSE; } - freeXpathObject(xpath); + xmlXPathFreeObject(xpath); return TRUE; } static void read_action_metadata(stonith_device_t *device) { xmlXPathObject *xpath = NULL; int max = 0; int lpc = 0; if (device->agent_metadata == NULL) { return; } xpath = pcmk__xpath_search(device->agent_metadata->doc, "//" PCMK_XE_ACTION); max = pcmk__xpath_num_results(xpath); if (max <= 0) { - freeXpathObject(xpath); + xmlXPathFreeObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *action = NULL; xmlNode *match = pcmk__xpath_result(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; action = crm_element_value(match, PCMK_XA_NAME); if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_list); } else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_status); } else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_reboot); } else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { /* PCMK_XA_AUTOMATIC means the cluster will unfence a node when it * joins. * * @COMPAT PCMK__XA_REQUIRED is a deprecated synonym for * PCMK_XA_AUTOMATIC. */ if (pcmk__xe_attr_is_true(match, PCMK_XA_AUTOMATIC) || pcmk__xe_attr_is_true(match, PCMK__XA_REQUIRED)) { device->automatic_unfencing = TRUE; } stonith__set_device_flags(device->flags, device->id, st_device_supports_on); } if ((action != NULL) && pcmk__xe_attr_is_true(match, PCMK_XA_ON_TARGET)) { pcmk__add_word(&(device->on_target_actions), 64, action); } } - freeXpathObject(xpath); + xmlXPathFreeObject(xpath); } static const char * target_list_type(stonith_device_t * dev) { const char *check_type = NULL; check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK); if (check_type == NULL) { if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { check_type = PCMK_VALUE_STATIC_LIST; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { check_type = PCMK_VALUE_STATIC_LIST; } else if (pcmk_is_set(dev->flags, st_device_supports_list)) { check_type = PCMK_VALUE_DYNAMIC_LIST; } else if (pcmk_is_set(dev->flags, st_device_supports_status)) { check_type = PCMK_VALUE_STATUS; } else { check_type = PCMK_VALUE_NONE; } } return check_type; } static stonith_device_t * build_device_from_xml(xmlNode *dev) { const char *value; stonith_device_t *device = NULL; char *agent = crm_element_value_copy(dev, PCMK_XA_AGENT); CRM_CHECK(agent != NULL, return device); device = pcmk__assert_alloc(1, sizeof(stonith_device_t)); device->id = crm_element_value_copy(dev, PCMK_XA_ID); device->agent = agent; device->namespace = crm_element_value_copy(dev, PCMK__XA_NAMESPACE); device->params = xml2list(dev); value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST); if (value) { device->targets = stonith__parse_targets(value); } value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP); device->aliases = build_port_aliases(value, &(device->targets)); value = target_list_type(device); if (!pcmk__str_eq(value, PCMK_VALUE_STATIC_LIST, pcmk__str_casei) && (device->targets != NULL)) { // device->targets is necessary only with PCMK_VALUE_STATIC_LIST g_list_free_full(device->targets, free); device->targets = NULL; } switch (get_agent_metadata(device->agent, &device->agent_metadata)) { case pcmk_rc_ok: if (device->agent_metadata) { read_action_metadata(device); stonith__device_parameter_flags(&(device->flags), device->id, device->agent_metadata); } break; case EAGAIN: if (device->timer == NULL) { device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, TRUE, get_agent_metadata_cb, device); } if (!mainloop_timer_running(device->timer)) { mainloop_timer_start(device->timer); } break; default: break; } value = g_hash_table_lookup(device->params, "nodeid"); if (!value) { device->include_nodeid = is_nodeid_required(device->agent_metadata); } value = crm_element_value(dev, PCMK__XA_RSC_PROVIDES); if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) { device->automatic_unfencing = TRUE; } if (is_action_required(PCMK_ACTION_ON, device)) { crm_info("Fencing device '%s' requires unfencing", device->id); } if (device->on_target_actions != NULL) { crm_info("Fencing device '%s' requires actions (%s) to be executed " "on target", device->id, (const char *) device->on_target_actions->str); } device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device); /* TODO: Hook up priority */ return device; } static void schedule_internal_command(const char *origin, stonith_device_t * device, const char *action, const char *target, int timeout, void *internal_user_data, void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data)) { async_command_t *cmd = NULL; cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); cmd->id = -1; cmd->default_timeout = timeout ? timeout : 60; cmd->timeout = cmd->default_timeout; cmd->action = pcmk__str_copy(action); cmd->target = pcmk__str_copy(target); cmd->device = pcmk__str_copy(device->id); cmd->origin = pcmk__str_copy(origin); cmd->client = pcmk__str_copy(crm_system_name); cmd->client_name = pcmk__str_copy(crm_system_name); cmd->internal_user_data = internal_user_data; cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */ schedule_stonith_command(cmd, device); } // Fence agent status commands use custom exit status codes enum fence_status_code { fence_status_invalid = -1, fence_status_active = 0, fence_status_unknown = 1, fence_status_inactive = 2, }; static void status_search_cb(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd_device(cmd); gboolean can = FALSE; free_async_command(cmd); if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (result->execution_status != PCMK_EXEC_DONE) { crm_warn("Assuming %s cannot fence %s " "because status could not be executed: %s%s%s%s", dev->id, search->host, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); search_devices_record_result(search, dev->id, FALSE); return; } switch (result->exit_status) { case fence_status_unknown: crm_trace("%s reported it cannot fence %s", dev->id, search->host); break; case fence_status_active: case fence_status_inactive: crm_trace("%s reported it can fence %s", dev->id, search->host); can = TRUE; break; default: crm_warn("Assuming %s cannot fence %s " "(status returned unknown code %d)", dev->id, search->host, result->exit_status); break; } search_devices_record_result(search, dev->id, can); } static void dynamic_list_search_cb(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd_device(cmd); gboolean can_fence = FALSE; free_async_command(cmd); /* Host/alias must be in the list output to be eligible to be fenced * * Will cause problems if down'd nodes aren't listed or (for virtual nodes) * if the guest is still listed despite being moved to another machine */ if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (pcmk__result_ok(result)) { crm_info("Refreshing target list for %s", dev->id); g_list_free_full(dev->targets, free); dev->targets = stonith__parse_targets(result->action_stdout); dev->targets_age = time(NULL); } else if (dev->targets != NULL) { if (result->execution_status == PCMK_EXEC_DONE) { crm_info("Reusing most recent target list for %s " "because list returned error code %d", dev->id, result->exit_status); } else { crm_info("Reusing most recent target list for %s " "because list could not be executed: %s%s%s%s", dev->id, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); } } else { // We have never successfully executed list if (result->execution_status == PCMK_EXEC_DONE) { crm_warn("Assuming %s cannot fence %s " "because list returned error code %d", dev->id, search->host, result->exit_status); } else { crm_warn("Assuming %s cannot fence %s " "because list could not be executed: %s%s%s%s", dev->id, search->host, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); } /* Fall back to pcmk_host_check=PCMK_VALUE_STATUS if the user didn't * explicitly specify PCMK_VALUE_DYNAMIC_LIST */ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) { crm_notice("Switching to pcmk_host_check='status' for %s", dev->id); pcmk__insert_dup(dev->params, PCMK_STONITH_HOST_CHECK, PCMK_VALUE_STATUS); } } if (dev->targets) { const char *alias = g_hash_table_lookup(dev->aliases, search->host); if (!alias) { alias = search->host; } if (pcmk__str_in_list(alias, dev->targets, pcmk__str_casei)) { can_fence = TRUE; } } search_devices_record_result(search, dev->id, can_fence); } /*! * \internal * \brief Returns true if any key in first is not in second or second has a different value for key */ static int device_params_diff(GHashTable *first, GHashTable *second) { char *key = NULL; char *value = NULL; GHashTableIter gIter; g_hash_table_iter_init(&gIter, first); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) { if(strstr(key, "CRM_meta") == key) { continue; } else if (strcmp(key, PCMK_XA_CRM_FEATURE_SET) == 0) { continue; } else { char *other_value = g_hash_table_lookup(second, key); if (!other_value || !pcmk__str_eq(other_value, value, pcmk__str_casei)) { crm_trace("Different value for %s: %s != %s", key, other_value, value); return 1; } } } return 0; } /*! * \internal * \brief Checks to see if an identical device already exists in the device_list */ static stonith_device_t * device_has_duplicate(const stonith_device_t *device) { stonith_device_t *dup = g_hash_table_lookup(device_list, device->id); if (!dup) { crm_trace("No match for %s", device->id); return NULL; } else if (!pcmk__str_eq(dup->agent, device->agent, pcmk__str_casei)) { crm_trace("Different agent: %s != %s", dup->agent, device->agent); return NULL; } // Use pcmk__digest_operation() here? if (device_params_diff(device->params, dup->params) || device_params_diff(dup->params, device->params)) { return NULL; } crm_trace("Match"); return dup; } int stonith_device_register(xmlNode *dev, gboolean from_cib) { stonith_device_t *dup = NULL; stonith_device_t *device = build_device_from_xml(dev); guint ndevices = 0; int rv = pcmk_ok; CRM_CHECK(device != NULL, return -ENOMEM); /* do we have a watchdog-device? */ if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none) || pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) do { if (stonith_watchdog_timeout_ms <= 0) { crm_err("Ignoring watchdog fence device without " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " set."); rv = -ENODEV; /* fall through to cleanup & return */ } else if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { crm_err("Ignoring watchdog fence device with unknown " "agent '%s' unequal '" STONITH_WATCHDOG_AGENT "'.", device->agent?device->agent:""); rv = -ENODEV; /* fall through to cleanup & return */ } else if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { crm_err("Ignoring watchdog fence device " "named %s !='"STONITH_WATCHDOG_ID"'.", device->id?device->id:""); rv = -ENODEV; /* fall through to cleanup & return */ } else { const char *local_node_name = fenced_get_local_node(); if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { /* this either has an empty list or the targets configured for watchdog-fencing */ g_list_free_full(stonith_watchdog_targets, free); stonith_watchdog_targets = device->targets; device->targets = NULL; } if (node_does_watchdog_fencing(local_node_name)) { g_list_free_full(device->targets, free); device->targets = stonith__parse_targets(local_node_name); pcmk__insert_dup(device->params, PCMK_STONITH_HOST_LIST, local_node_name); /* proceed as with any other stonith-device */ break; } crm_debug("Skip registration of watchdog fence device on node not in host-list."); /* cleanup and fall through to more cleanup and return */ device->targets = NULL; stonith_device_remove(device->id, from_cib); } free_device(device); return rv; } while (0); dup = device_has_duplicate(device); if (dup) { ndevices = g_hash_table_size(device_list); crm_debug("Device '%s' already in device list (%d active device%s)", device->id, ndevices, pcmk__plural_s(ndevices)); free_device(device); device = dup; dup = g_hash_table_lookup(device_list, device->id); dup->dirty = FALSE; } else { stonith_device_t *old = g_hash_table_lookup(device_list, device->id); if (from_cib && old && old->api_registered) { /* If the cib is writing over an entry that is shared with a stonith client, * copy any pending ops that currently exist on the old entry to the new one. * Otherwise the pending ops will be reported as failures */ crm_info("Overwriting existing entry for %s from CIB", device->id); device->pending_ops = old->pending_ops; device->api_registered = TRUE; old->pending_ops = NULL; if (device->pending_ops) { mainloop_set_trigger(device->work); } } g_hash_table_replace(device_list, device->id, device); ndevices = g_hash_table_size(device_list); crm_notice("Added '%s' to device list (%d active device%s)", device->id, ndevices, pcmk__plural_s(ndevices)); } if (from_cib) { device->cib_registered = TRUE; } else { device->api_registered = TRUE; } return pcmk_ok; } void stonith_device_remove(const char *id, bool from_cib) { stonith_device_t *device = g_hash_table_lookup(device_list, id); guint ndevices = 0; if (!device) { ndevices = g_hash_table_size(device_list); crm_info("Device '%s' not found (%d active device%s)", id, ndevices, pcmk__plural_s(ndevices)); return; } if (from_cib) { device->cib_registered = FALSE; } else { device->verified = FALSE; device->api_registered = FALSE; } if (!device->cib_registered && !device->api_registered) { g_hash_table_remove(device_list, id); ndevices = g_hash_table_size(device_list); crm_info("Removed '%s' from device list (%d active device%s)", id, ndevices, pcmk__plural_s(ndevices)); } else { crm_trace("Not removing '%s' from device list (%d active) because " "still registered via:%s%s", id, g_hash_table_size(device_list), (device->cib_registered? " cib" : ""), (device->api_registered? " api" : "")); } } /*! * \internal * \brief Return the number of stonith levels registered for a node * * \param[in] tp Node's topology table entry * * \return Number of non-NULL levels in topology entry * \note This function is used only for log messages. */ static int count_active_levels(const stonith_topology_t *tp) { int lpc = 0; int count = 0; for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) { if (tp->levels[lpc] != NULL) { count++; } } return count; } static void free_topology_entry(gpointer data) { stonith_topology_t *tp = data; int lpc = 0; for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) { if (tp->levels[lpc] != NULL) { g_list_free_full(tp->levels[lpc], free); } } free(tp->target); free(tp->target_value); free(tp->target_pattern); free(tp->target_attribute); free(tp); } void free_topology_list(void) { if (topology != NULL) { g_hash_table_destroy(topology); topology = NULL; } } void init_topology_list(void) { if (topology == NULL) { topology = pcmk__strkey_table(NULL, free_topology_entry); } } char * stonith_level_key(const xmlNode *level, enum fenced_target_by mode) { if (mode == fenced_target_by_unknown) { mode = unpack_level_kind(level); } switch (mode) { case fenced_target_by_name: return crm_element_value_copy(level, PCMK_XA_TARGET); case fenced_target_by_pattern: return crm_element_value_copy(level, PCMK_XA_TARGET_PATTERN); case fenced_target_by_attribute: return crm_strdup_printf("%s=%s", crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE), crm_element_value(level, PCMK_XA_TARGET_VALUE)); default: return crm_strdup_printf("unknown-%s", pcmk__xe_id(level)); } } /*! * \internal * \brief Parse target identification from topology level XML * * \param[in] level Topology level XML to parse * * \return How to identify target of \p level */ static enum fenced_target_by unpack_level_kind(const xmlNode *level) { if (crm_element_value(level, PCMK_XA_TARGET) != NULL) { return fenced_target_by_name; } if (crm_element_value(level, PCMK_XA_TARGET_PATTERN) != NULL) { return fenced_target_by_pattern; } if ((crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE) != NULL) && (crm_element_value(level, PCMK_XA_TARGET_VALUE) != NULL)) { return fenced_target_by_attribute; } return fenced_target_by_unknown; } static stonith_key_value_t * parse_device_list(const char *devices) { int lpc = 0; int max = 0; int last = 0; stonith_key_value_t *output = NULL; if (devices == NULL) { return output; } max = strlen(devices); for (lpc = 0; lpc <= max; lpc++) { if (devices[lpc] == ',' || devices[lpc] == 0) { char *line = strndup(devices + last, lpc - last); output = stonith_key_value_add(output, NULL, line); free(line); last = lpc + 1; } } return output; } /*! * \internal * \brief Unpack essential information from topology request XML * * \param[in] xml Request XML to search * \param[out] mode If not NULL, where to store level kind * \param[out] target If not NULL, where to store representation of target * \param[out] id If not NULL, where to store level number * \param[out] desc If not NULL, where to store log-friendly level description * * \return Topology level XML from within \p xml, or NULL if not found * \note The caller is responsible for freeing \p *target and \p *desc if set. */ static xmlNode * unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target, int *id, char **desc) { enum fenced_target_by local_mode = fenced_target_by_unknown; char *local_target = NULL; int local_id = 0; /* The level element can be the top element or lower. If top level, don't * search by xpath, because it might give multiple hits if the XML is the * CIB. */ if ((xml != NULL) && !pcmk__xe_is(xml, PCMK_XE_FENCING_LEVEL)) { xml = get_xpath_object("//" PCMK_XE_FENCING_LEVEL, xml, LOG_WARNING); } if (xml == NULL) { if (desc != NULL) { *desc = crm_strdup_printf("missing"); } } else { local_mode = unpack_level_kind(xml); local_target = stonith_level_key(xml, local_mode); crm_element_value_int(xml, PCMK_XA_INDEX, &local_id); if (desc != NULL) { *desc = crm_strdup_printf("%s[%d]", local_target, local_id); } } if (mode != NULL) { *mode = local_mode; } if (id != NULL) { *id = local_id; } if (target != NULL) { *target = local_target; } else { free(local_target); } return xml; } /*! * \internal * \brief Register a fencing topology level for a target * * Given an XML request specifying the target name, level index, and device IDs * for the level, this will create an entry for the target in the global topology * table if one does not already exist, then append the specified device IDs to * the entry's device list for the specified level. * * \param[in] msg XML request for STONITH level registration * \param[out] desc If not NULL, set to string representation "TARGET[LEVEL]" * \param[out] result Where to set result of registration */ void fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) { int id = 0; xmlNode *level; enum fenced_target_by mode; char *target; stonith_topology_t *tp; stonith_key_value_t *dIter = NULL; stonith_key_value_t *devices = NULL; CRM_CHECK((msg != NULL) && (result != NULL), return); level = unpack_level_request(msg, &mode, &target, &id, desc); if (level == NULL) { fenced_set_protocol_error(result); return; } // Ensure an ID was given (even the client API adds an ID) if (pcmk__str_empty(pcmk__xe_id(level))) { crm_warn("Ignoring registration for topology level without ID"); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Topology level is invalid without ID"); return; } // Ensure a valid target was specified if (mode == fenced_target_by_unknown) { crm_warn("Ignoring registration for topology level '%s' " "without valid target", pcmk__xe_id(level)); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid target for topology level '%s'", pcmk__xe_id(level)); return; } // Ensure level ID is in allowed range if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) { crm_warn("Ignoring topology registration for %s with invalid level %d", target, id); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level '%s'", pcmk__s(crm_element_value(level, PCMK_XA_INDEX), ""), pcmk__xe_id(level)); return; } /* Find or create topology table entry */ tp = g_hash_table_lookup(topology, target); if (tp == NULL) { tp = pcmk__assert_alloc(1, sizeof(stonith_topology_t)); tp->kind = mode; tp->target = target; tp->target_value = crm_element_value_copy(level, PCMK_XA_TARGET_VALUE); tp->target_pattern = crm_element_value_copy(level, PCMK_XA_TARGET_PATTERN); tp->target_attribute = crm_element_value_copy(level, PCMK_XA_TARGET_ATTRIBUTE); g_hash_table_replace(topology, tp->target, tp); crm_trace("Added %s (%d) to the topology (%d active entries)", target, (int) mode, g_hash_table_size(topology)); } else { free(target); } if (tp->levels[id] != NULL) { crm_info("Adding to the existing %s[%d] topology entry", tp->target, id); } devices = parse_device_list(crm_element_value(level, PCMK_XA_DEVICES)); for (dIter = devices; dIter; dIter = dIter->next) { const char *device = dIter->value; crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id); tp->levels[id] = g_list_append(tp->levels[id], pcmk__str_copy(device)); } stonith_key_value_freeall(devices, 1, 1); { int nlevels = count_active_levels(tp); crm_info("Target %s has %d active fencing level%s", tp->target, nlevels, pcmk__plural_s(nlevels)); } pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } /*! * \internal * \brief Unregister a fencing topology level for a target * * Given an XML request specifying the target name and level index (or 0 for all * levels), this will remove any corresponding entry for the target from the * global topology table. * * \param[in] msg XML request for STONITH level registration * \param[out] desc If not NULL, set to string representation "TARGET[LEVEL]" * \param[out] result Where to set result of unregistration */ void fenced_unregister_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) { int id = -1; stonith_topology_t *tp; char *target; xmlNode *level = NULL; CRM_CHECK(result != NULL, return); level = unpack_level_request(msg, NULL, &target, &id, desc); if (level == NULL) { fenced_set_protocol_error(result); return; } // Ensure level ID is in allowed range if ((id < 0) || (id >= ST__LEVEL_COUNT)) { crm_warn("Ignoring topology unregistration for %s with invalid level %d", target, id); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level %s", pcmk__s(crm_element_value(level, PCMK_XA_INDEX), ""), // Client API doesn't add ID to unregistration XML pcmk__s(pcmk__xe_id(level), "")); return; } tp = g_hash_table_lookup(topology, target); if (tp == NULL) { guint nentries = g_hash_table_size(topology); crm_info("No fencing topology found for %s (%d active %s)", target, nentries, pcmk__plural_alt(nentries, "entry", "entries")); } else if (id == 0 && g_hash_table_remove(topology, target)) { guint nentries = g_hash_table_size(topology); crm_info("Removed all fencing topology entries related to %s " "(%d active %s remaining)", target, nentries, pcmk__plural_alt(nentries, "entry", "entries")); } else if (tp->levels[id] != NULL) { guint nlevels; g_list_free_full(tp->levels[id], free); tp->levels[id] = NULL; nlevels = count_active_levels(tp); crm_info("Removed level %d from fencing topology for %s " "(%d active level%s remaining)", id, target, nlevels, pcmk__plural_s(nlevels)); } free(target); pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } static char * list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) { int max = g_list_length(list); size_t delim_len = delim?strlen(delim):0; size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0); char *rv; GList *gIter; char *pos = NULL; const char *lead_delim = ""; for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; alloc_size += strlen(value); } rv = pcmk__assert_alloc(alloc_size, sizeof(char)); pos = rv; for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; lead_delim = delim; } if (max && terminate_with_delim) { sprintf(pos, "%s", delim); } return rv; } /*! * \internal * \brief Execute a fence agent action directly (and asynchronously) * * Handle a STONITH_OP_EXEC API message by scheduling a requested agent action * directly on a specified device. Only list, monitor, and status actions are * expected to use this call, though it should work with any agent command. * * \param[in] msg Request XML specifying action * \param[out] result Where to store result of action * * \note If the action is monitor, the device must be registered via the API * (CIB registration is not sufficient), because monitor should not be * possible unless the device is "started" (API registered). */ static void execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) { xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, msg, LOG_ERR); xmlNode *op = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, msg, LOG_ERR); const char *id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); const char *action = crm_element_value(op, PCMK__XA_ST_DEVICE_ACTION); async_command_t *cmd = NULL; stonith_device_t *device = NULL; if ((id == NULL) || (action == NULL)) { crm_info("Malformed API action request: device %s, action %s", (id? id : "not specified"), (action? action : "not specified")); fenced_set_protocol_error(result); return; } if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) { // Watchdog agent actions are implemented internally if (stonith_watchdog_timeout_ms <= 0) { pcmk__set_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Watchdog fence device not configured"); return; } else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_result_output(result, list_to_string(stonith_watchdog_targets, "\n", TRUE), NULL); return; } else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return; } } device = g_hash_table_lookup(device_list, id); if (device == NULL) { crm_info("Ignoring API '%s' action request because device %s not found", action, id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "'%s' not found", id); return; } else if (!device->api_registered && (strcmp(action, PCMK_ACTION_MONITOR) == 0)) { // Monitors may run only on "started" (API-registered) devices crm_info("Ignoring API '%s' action request because device %s not active", action, id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "'%s' not active", id); return; } cmd = create_async_command(msg); if (cmd == NULL) { crm_log_xml_warn(msg, "invalid"); fenced_set_protocol_error(result); return; } schedule_stonith_command(cmd, device); pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence) { search->replies_received++; if (can_fence && device) { if (search->support_action_only != st_device_supports_none) { stonith_device_t *dev = g_hash_table_lookup(device_list, device); if (dev && !pcmk_is_set(dev->flags, search->support_action_only)) { return; } } search->capable = g_list_append(search->capable, pcmk__str_copy(device)); } if (search->replies_needed == search->replies_received) { guint ndevices = g_list_length(search->capable); crm_debug("Search found %d device%s that can perform '%s' targeting %s", ndevices, pcmk__plural_s(ndevices), (search->action? search->action : "unknown action"), (search->host? search->host : "any node")); search->callback(search->capable, search->user_data); free(search->host); free(search->action); free(search); } } /*! * \internal * \brief Check whether the local host is allowed to execute a fencing action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Hostname of fence target * \param[in] allow_self Whether self-fencing is allowed for this operation * * \return TRUE if local host is allowed to execute action, FALSE otherwise */ static gboolean localhost_is_eligible(const stonith_device_t *device, const char *action, const char *target, gboolean allow_self) { gboolean localhost_is_target = pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei); if ((device != NULL) && (action != NULL) && (device->on_target_actions != NULL) && (strstr((const char*) device->on_target_actions->str, action) != NULL)) { if (!localhost_is_target) { crm_trace("Operation '%s' using %s can only be executed for local " "host, not %s", action, device->id, target); return FALSE; } } else if (localhost_is_target && !allow_self) { crm_trace("'%s' operation does not support self-fencing", action); return FALSE; } return TRUE; } /*! * \internal * \brief Check if local node is allowed to execute (possibly remapped) action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Node name of fence target * \param[in] allow_self Whether self-fencing is allowed for this operation * * \return true if local node is allowed to execute \p action or any actions it * might be remapped to, otherwise false */ static bool localhost_is_eligible_with_remap(const stonith_device_t *device, const char *action, const char *target, gboolean allow_self) { // Check exact action if (localhost_is_eligible(device, action, target, allow_self)) { return true; } // Check potential remaps if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* "reboot" might get remapped to "off" then "on", so even if reboot is * disallowed, return true if either of those is allowed. We'll report * the disallowed actions with the results. We never allow self-fencing * for remapped "on" actions because the target is off at that point. */ if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self) || localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) { return true; } } return false; } /*! * \internal * \brief Check whether we can use a device's cached target list * * \param[in] dev Fencing device to check * * \return \c true if \p dev cached its targets less than a minute ago, * otherwise \c false */ static inline bool can_use_target_cache(const stonith_device_t *dev) { return (dev->targets != NULL) && (time(NULL) < (dev->targets_age + 60)); } static void can_fence_host_with_device(stonith_device_t *dev, struct device_search_s *search) { gboolean can = FALSE; const char *check_type = "Internal bug"; const char *target = NULL; const char *alias = NULL; const char *dev_id = "Unspecified device"; const char *action = (search == NULL)? NULL : search->action; CRM_CHECK((dev != NULL) && (action != NULL), goto search_report_results); if (dev->id != NULL) { dev_id = dev->id; } target = search->host; if (target == NULL) { can = TRUE; check_type = "No target"; goto search_report_results; } /* Answer immediately if the device does not support the action * or the local node is not allowed to perform it */ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none) && !pcmk_is_set(dev->flags, st_device_supports_on)) { check_type = "Agent does not support 'on'"; goto search_report_results; } else if (!localhost_is_eligible_with_remap(dev, action, target, search->allow_self)) { check_type = "This node is not allowed to execute action"; goto search_report_results; } // Check eligibility as specified by pcmk_host_check check_type = target_list_type(dev); alias = g_hash_table_lookup(dev->aliases, target); if (pcmk__str_eq(check_type, PCMK_VALUE_NONE, pcmk__str_casei)) { can = TRUE; } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATIC_LIST, pcmk__str_casei)) { if (pcmk__str_in_list(target, dev->targets, pcmk__str_casei)) { can = TRUE; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) && g_hash_table_lookup(dev->aliases, target)) { can = TRUE; } } else if (pcmk__str_eq(check_type, PCMK_VALUE_DYNAMIC_LIST, pcmk__str_casei)) { if (!can_use_target_cache(dev)) { int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_list_timeout (%ds) parameter of %s " "is larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), timeout may occur", device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL, search->per_device_timeout, search, dynamic_list_search_cb); /* we'll respond to this search request async in the cb */ return; } if (pcmk__str_in_list(((alias == NULL)? target : alias), dev->targets, pcmk__str_casei)) { can = TRUE; } } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATUS, pcmk__str_casei)) { int device_timeout = get_action_timeout(dev, check_type, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_status_timeout (%ds) parameter of %s is " "larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), " "timeout may occur", device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target, search->per_device_timeout, search, status_search_cb); /* we'll respond to this search request async in the cb */ return; } else { crm_err("Invalid value for " PCMK_STONITH_HOST_CHECK ": %s", check_type); check_type = "Invalid " PCMK_STONITH_HOST_CHECK; } search_report_results: crm_info("%s is%s eligible to fence (%s) %s%s%s%s: %s", dev_id, (can? "" : " not"), pcmk__s(action, "unspecified action"), pcmk__s(target, "unspecified target"), (alias == NULL)? "" : " (as '", pcmk__s(alias, ""), (alias == NULL)? "" : "')", check_type); search_devices_record_result(search, ((dev == NULL)? NULL : dev_id), can); } static void search_devices(gpointer key, gpointer value, gpointer user_data) { stonith_device_t *dev = value; struct device_search_s *search = user_data; can_fence_host_with_device(dev, search); } #define DEFAULT_QUERY_TIMEOUT 20 static void get_capable_devices(const char *host, const char *action, int timeout, bool allow_self, void *user_data, void (*callback) (GList * devices, void *user_data), uint32_t support_action_only) { struct device_search_s *search; guint ndevices = g_hash_table_size(device_list); if (ndevices == 0) { callback(NULL, user_data); return; } search = pcmk__assert_alloc(1, sizeof(struct device_search_s)); search->host = pcmk__str_copy(host); search->action = pcmk__str_copy(action); search->per_device_timeout = timeout; search->allow_self = allow_self; search->callback = callback; search->user_data = user_data; search->support_action_only = support_action_only; /* We are guaranteed this many replies, even if a device is * unregistered while the search is in progress. */ search->replies_needed = ndevices; crm_debug("Searching %d device%s to see which can execute '%s' targeting %s", ndevices, pcmk__plural_s(ndevices), (search->action? search->action : "unknown action"), (search->host? search->host : "any node")); g_hash_table_foreach(device_list, search_devices, search); } struct st_query_data { xmlNode *reply; char *remote_peer; char *client_id; char *target; char *action; int call_options; }; /*! * \internal * \brief Add action-specific attributes to query reply XML * * \param[in,out] xml XML to add attributes to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target */ static void add_action_specific_attributes(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target) { int action_specific_timeout; int delay_max; int delay_base; CRM_CHECK(xml && action && device, return); // PCMK__XA_ST_REQUIRED is currently used only for unfencing if (is_action_required(action, device)) { crm_trace("Action '%s' is required using %s", action, device->id); crm_xml_add_int(xml, PCMK__XA_ST_REQUIRED, 1); } // pcmk__timeout if configured action_specific_timeout = get_action_timeout(device, action, 0); if (action_specific_timeout) { crm_trace("Action '%s' has timeout %ds using %s", action, action_specific_timeout, device->id); crm_xml_add_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, action_specific_timeout); } delay_max = get_action_delay_max(device, action); if (delay_max > 0) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); crm_xml_add_int(xml, PCMK__XA_ST_DELAY_MAX, delay_max); } delay_base = get_action_delay_base(device, action, target); if (delay_base > 0) { crm_xml_add_int(xml, PCMK__XA_ST_DELAY_BASE, delay_base); } if ((delay_max > 0) && (delay_base == 0)) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); } else if ((delay_max == 0) && (delay_base > 0)) { crm_trace("Action '%s' has a static delay of %ds using %s", action, delay_base, device->id); } else if ((delay_max > 0) && (delay_base > 0)) { crm_trace("Action '%s' has a minimum delay of %ds and a randomly chosen " "maximum delay of %ds using %s", action, delay_base, delay_max, device->id); } } /*! * \internal * \brief Add "disallowed" attribute to query reply XML if appropriate * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_self Whether self-fencing is allowed */ static void add_disallowed(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target, gboolean allow_self) { if (!localhost_is_eligible(device, action, target, allow_self)) { crm_trace("Action '%s' using %s is disallowed for local host", action, device->id); pcmk__xe_set_bool_attr(xml, PCMK__XA_ST_ACTION_DISALLOWED, true); } } /*! * \internal * \brief Add child element with action-specific values to query reply XML * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_self Whether self-fencing is allowed */ static void add_action_reply(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target, gboolean allow_self) { xmlNode *child = pcmk__xe_create(xml, PCMK__XE_ST_DEVICE_ACTION); crm_xml_add(child, PCMK_XA_ID, action); add_action_specific_attributes(child, action, device, target); add_disallowed(child, action, device, target, allow_self); } /*! * \internal * \brief Send a reply to a CPG peer or IPC client * * \param[in] reply XML reply to send * \param[in] call_options Send synchronously if st_opt_sync_call is set * \param[in] remote_peer If not NULL, name of peer node to send CPG reply * \param[in,out] client If not NULL, client to send IPC reply */ static void stonith_send_reply(const xmlNode *reply, int call_options, const char *remote_peer, pcmk__client_t *client) { CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)), return); if (remote_peer == NULL) { do_local_reply(reply, client, call_options); } else { const pcmk__node_status_t *node = pcmk__get_node(0, remote_peer, NULL, pcmk__node_search_cluster_member); pcmk__cluster_send_message(node, pcmk_ipc_fenced, reply); } } static void stonith_query_capable_device_cb(GList * devices, void *user_data) { struct st_query_data *query = user_data; int available_devices = 0; xmlNode *wrapper = NULL; xmlNode *list = NULL; GList *lpc = NULL; pcmk__client_t *client = NULL; if (query->client_id != NULL) { client = pcmk__find_client_by_id(query->client_id); if ((client == NULL) && (query->remote_peer == NULL)) { crm_trace("Skipping reply to %s: no longer a client", query->client_id); goto done; } } // Pack the results into XML wrapper = pcmk__xe_create(query->reply, PCMK__XE_ST_CALLDATA); list = pcmk__xe_create(wrapper, __func__); crm_xml_add(list, PCMK__XA_ST_TARGET, query->target); for (lpc = devices; lpc != NULL; lpc = lpc->next) { stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data); const char *action = query->action; xmlNode *dev = NULL; if (!device) { /* It is possible the device got unregistered while * determining who can fence the target */ continue; } available_devices++; dev = pcmk__xe_create(list, PCMK__XE_ST_DEVICE_ID); crm_xml_add(dev, PCMK_XA_ID, device->id); crm_xml_add(dev, PCMK__XA_NAMESPACE, device->namespace); crm_xml_add(dev, PCMK_XA_AGENT, device->agent); // Has had successful monitor, list, or status on this node crm_xml_add_int(dev, PCMK__XA_ST_MONITOR_VERIFIED, device->verified); crm_xml_add_int(dev, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, device->flags); /* If the originating fencer wants to reboot the node, and we have a * capable device that doesn't support "reboot", remap to "off" instead. */ if (!pcmk_is_set(device->flags, st_device_supports_reboot) && pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { crm_trace("%s doesn't support reboot, using values for off instead", device->id); action = PCMK_ACTION_OFF; } /* Add action-specific values if available */ add_action_specific_attributes(dev, action, device, query->target); if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A "reboot" *might* get remapped to "off" then "on", so after * sending the "reboot"-specific values in the main element, we add * sub-elements for "off" and "on" values. * * We short-circuited earlier if "reboot", "off" and "on" are all * disallowed for the local host. However if only one or two are * disallowed, we send back the results and mark which ones are * disallowed. If "reboot" is disallowed, this might cause problems * with older fencer versions, which won't check for it. Older * versions will ignore "off" and "on", so they are not a problem. */ add_disallowed(dev, action, device, query->target, pcmk_is_set(query->call_options, st_opt_allow_self_fencing)); add_action_reply(dev, PCMK_ACTION_OFF, device, query->target, pcmk_is_set(query->call_options, st_opt_allow_self_fencing)); add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE); } /* A query without a target wants device parameters */ if (query->target == NULL) { xmlNode *attrs = pcmk__xe_create(dev, PCMK__XE_ATTRIBUTES); g_hash_table_foreach(device->params, hash2field, attrs); } } crm_xml_add_int(list, PCMK__XA_ST_AVAILABLE_DEVICES, available_devices); if (query->target) { crm_debug("Found %d matching device%s for target '%s'", available_devices, pcmk__plural_s(available_devices), query->target); } else { crm_debug("%d device%s installed", available_devices, pcmk__plural_s(available_devices)); } crm_log_xml_trace(list, "query-result"); stonith_send_reply(query->reply, query->call_options, query->remote_peer, client); done: pcmk__xml_free(query->reply); free(query->remote_peer); free(query->client_id); free(query->target); free(query->action); free(query); g_list_free_full(devices, free); } /*! * \internal * \brief Log the result of an asynchronous command * * \param[in] cmd Command the result is for * \param[in] result Result of command * \param[in] pid Process ID of command, if available * \param[in] next Alternate device that will be tried if command failed * \param[in] op_merged Whether this command was merged with an earlier one */ static void log_async_result(const async_command_t *cmd, const pcmk__action_result_t *result, int pid, const char *next, bool op_merged) { int log_level = LOG_ERR; int output_log_level = LOG_NEVER; guint devices_remaining = g_list_length(cmd->next_device_iter); GString *msg = g_string_sized_new(80); // Reasonable starting size // Choose log levels appropriately if we have a result if (pcmk__result_ok(result)) { log_level = (cmd->target == NULL)? LOG_DEBUG : LOG_NOTICE; if ((result->action_stdout != NULL) && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, pcmk__str_none)) { output_log_level = LOG_DEBUG; } next = NULL; } else { log_level = (cmd->target == NULL)? LOG_NOTICE : LOG_ERR; if ((result->action_stdout != NULL) && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, pcmk__str_none)) { output_log_level = LOG_WARNING; } } // Build the log message piece by piece pcmk__g_strcat(msg, "Operation '", cmd->action, "' ", NULL); if (pid != 0) { g_string_append_printf(msg, "[%d] ", pid); } if (cmd->target != NULL) { pcmk__g_strcat(msg, "targeting ", cmd->target, " ", NULL); } if (cmd->device != NULL) { pcmk__g_strcat(msg, "using ", cmd->device, " ", NULL); } // Add exit status or execution status as appropriate if (result->execution_status == PCMK_EXEC_DONE) { g_string_append_printf(msg, "returned %d", result->exit_status); } else { pcmk__g_strcat(msg, "could not be executed: ", pcmk_exec_status_str(result->execution_status), NULL); } // Add exit reason and next device if appropriate if (result->exit_reason != NULL) { pcmk__g_strcat(msg, " (", result->exit_reason, ")", NULL); } if (next != NULL) { pcmk__g_strcat(msg, ", retrying with ", next, NULL); } if (devices_remaining > 0) { g_string_append_printf(msg, " (%u device%s remaining)", (unsigned int) devices_remaining, pcmk__plural_s(devices_remaining)); } g_string_append_printf(msg, " " QB_XS " %scall %d from %s", (op_merged? "merged " : ""), cmd->id, cmd->client_name); // Log the result do_crm_log(log_level, "%s", msg->str); g_string_free(msg, TRUE); // Log the output (which may have multiple lines), if appropriate if (output_log_level != LOG_NEVER) { char *prefix = crm_strdup_printf("%s[%d]", cmd->device, pid); crm_log_output(output_log_level, prefix, result->action_stdout); free(prefix); } } /*! * \internal * \brief Reply to requester after asynchronous command completion * * \param[in] cmd Command that completed * \param[in] result Result of command * \param[in] pid Process ID of command, if available * \param[in] merged If true, command was merged with another, not executed */ static void send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result, int pid, bool merged) { xmlNode *reply = NULL; pcmk__client_t *client = NULL; CRM_CHECK((cmd != NULL) && (result != NULL), return); log_async_result(cmd, result, pid, NULL, merged); if (cmd->client != NULL) { client = pcmk__find_client_by_id(cmd->client); if ((client == NULL) && (cmd->origin == NULL)) { crm_trace("Skipping reply to %s: no longer a client", cmd->client); return; } } reply = construct_async_reply(cmd, result); if (merged) { pcmk__xe_set_bool_attr(reply, PCMK__XA_ST_OP_MERGED, true); } if (pcmk__is_fencing_action(cmd->action) && pcmk__str_eq(cmd->origin, cmd->target, pcmk__str_casei)) { /* The target was also the originator, so broadcast the result on its * behalf (since it will be unable to). */ crm_trace("Broadcast '%s' result for %s (target was also originator)", cmd->action, cmd->target); crm_xml_add(reply, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); crm_xml_add(reply, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, reply); } else { // Reply only to the originator stonith_send_reply(reply, cmd->options, cmd->origin, client); } crm_log_xml_trace(reply, "Reply"); pcmk__xml_free(reply); } static void cancel_stonith_command(async_command_t * cmd) { stonith_device_t *device = cmd_device(cmd); if (device) { crm_trace("Cancel scheduled '%s' action using %s", cmd->action, device->id); device->pending_ops = g_list_remove(device->pending_ops, cmd); } } /*! * \internal * \brief Cancel and reply to any duplicates of a just-completed operation * * Check whether any fencing operations are scheduled to do the same thing as * one that just succeeded. If so, rather than performing the same operation * twice, return the result of this operation for all matching pending commands. * * \param[in,out] cmd Fencing operation that just succeeded * \param[in] result Result of \p cmd * \param[in] pid If nonzero, process ID of agent invocation (for logs) * * \note Duplicate merging will do the right thing for either type of remapped * reboot. If the executing fencer remapped an unsupported reboot to off, * then cmd->action will be "reboot" and will be merged with any other * reboot requests. If the originating fencer remapped a topology reboot * to off then on, we will get here once with cmd->action "off" and once * with "on", and they will be merged separately with similar requests. */ static void reply_to_duplicates(async_command_t *cmd, const pcmk__action_result_t *result, int pid) { GList *next = NULL; for (GList *iter = cmd_list; iter != NULL; iter = next) { async_command_t *cmd_other = iter->data; next = iter->next; // We might delete this entry, so grab next now if (cmd == cmd_other) { continue; } /* A pending operation matches if: * 1. The client connections are different. * 2. The target is the same. * 3. The fencing action is the same. * 4. The device scheduled to execute the action is the same. */ if (pcmk__str_eq(cmd->client, cmd_other->client, pcmk__str_casei) || !pcmk__str_eq(cmd->target, cmd_other->target, pcmk__str_casei) || !pcmk__str_eq(cmd->action, cmd_other->action, pcmk__str_none) || !pcmk__str_eq(cmd->device, cmd_other->device, pcmk__str_casei)) { continue; } crm_notice("Merging fencing action '%s'%s%s originating from " "client %s with identical fencing request from client %s", cmd_other->action, (cmd_other->target == NULL)? "" : " targeting ", pcmk__s(cmd_other->target, ""), cmd_other->client_name, cmd->client_name); // Stop tracking the duplicate, send its result, and cancel it cmd_list = g_list_remove_link(cmd_list, iter); send_async_reply(cmd_other, result, pid, true); cancel_stonith_command(cmd_other); free_async_command(cmd_other); g_list_free_1(iter); } } /*! * \internal * \brief Return the next required device (if any) for an operation * * \param[in,out] cmd Fencing operation that just succeeded * * \return Next device required for action if any, otherwise NULL */ static stonith_device_t * next_required_device(async_command_t *cmd) { for (GList *iter = cmd->next_device_iter; iter != NULL; iter = iter->next) { stonith_device_t *next_device = g_hash_table_lookup(device_list, iter->data); if (is_action_required(cmd->action, next_device)) { /* This is only called for successful actions, so it's OK to skip * non-required devices. */ cmd->next_device_iter = iter->next; return next_device; } } return NULL; } static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; stonith_device_t *device = NULL; stonith_device_t *next_device = NULL; CRM_CHECK(cmd != NULL, return); device = cmd_device(cmd); cmd->active_on = NULL; /* The device is ready to do something else now */ if (device) { if (!device->verified && pcmk__result_ok(result) && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS, NULL)) { device->verified = TRUE; } mainloop_set_trigger(device->work); } if (pcmk__result_ok(result)) { next_device = next_required_device(cmd); } else if ((cmd->next_device_iter != NULL) && !is_action_required(cmd->action, device)) { /* if this device didn't work out, see if there are any others we can try. * if the failed device was 'required', we can't pick another device. */ next_device = g_hash_table_lookup(device_list, cmd->next_device_iter->data); cmd->next_device_iter = cmd->next_device_iter->next; } if (next_device == NULL) { send_async_reply(cmd, result, pid, false); if (pcmk__result_ok(result)) { reply_to_duplicates(cmd, result, pid); } free_async_command(cmd); } else { // This operation requires more fencing log_async_result(cmd, result, pid, next_device->id, false); schedule_stonith_command(cmd, next_device); } } static gint sort_device_priority(gconstpointer a, gconstpointer b) { const stonith_device_t *dev_a = a; const stonith_device_t *dev_b = b; if (dev_a->priority > dev_b->priority) { return -1; } else if (dev_a->priority < dev_b->priority) { return 1; } return 0; } static void stonith_fence_get_devices_cb(GList * devices, void *user_data) { async_command_t *cmd = user_data; stonith_device_t *device = NULL; guint ndevices = g_list_length(devices); crm_info("Found %d matching device%s for target '%s'", ndevices, pcmk__plural_s(ndevices), cmd->target); if (devices != NULL) { /* Order based on priority */ devices = g_list_sort(devices, sort_device_priority); device = g_hash_table_lookup(device_list, devices->data); } if (device == NULL) { // No device found pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; pcmk__format_result(&result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "No device configured for target '%s'", cmd->target); send_async_reply(cmd, &result, 0, false); pcmk__reset_result(&result); free_async_command(cmd); g_list_free_full(devices, free); } else { // Device found, schedule it for fencing cmd->device_list = devices; cmd->next_device_iter = devices->next; schedule_stonith_command(cmd, device); } } /*! * \internal * \brief Execute a fence action via the local node * * \param[in] msg Fencing request * \param[out] result Where to store result of fence action */ static void fence_locally(xmlNode *msg, pcmk__action_result_t *result) { const char *device_id = NULL; stonith_device_t *device = NULL; async_command_t *cmd = NULL; xmlNode *dev = NULL; CRM_CHECK((msg != NULL) && (result != NULL), return); dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR); cmd = create_async_command(msg); if (cmd == NULL) { crm_log_xml_warn(msg, "invalid"); fenced_set_protocol_error(result); return; } device_id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (device_id != NULL) { device = g_hash_table_lookup(device_list, device_id); if (device == NULL) { crm_err("Requested device '%s' is not available", device_id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Requested device '%s' not found", device_id); return; } schedule_stonith_command(cmd, device); } else { const char *host = crm_element_value(dev, PCMK__XA_ST_TARGET); if (pcmk_is_set(cmd->options, st_opt_cs_nodeid)) { int nodeid = 0; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(host, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); if (node != NULL) { host = node->name; } } /* If we get to here, then self-fencing is implicitly allowed */ get_capable_devices(host, cmd->action, cmd->default_timeout, TRUE, cmd, stonith_fence_get_devices_cb, fenced_support_flag(cmd->action)); } pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } /*! * \internal * \brief Build an XML reply for a fencing operation * * \param[in] request Request that reply is for * \param[in] data If not NULL, add to reply as call data * \param[in] result Full result of fencing operation * * \return Newly created XML reply * \note The caller is responsible for freeing the result. * \note This has some overlap with construct_async_reply(), but that copies * values from an async_command_t, whereas this one copies them from the * request. */ xmlNode * fenced_construct_reply(const xmlNode *request, xmlNode *data, const pcmk__action_result_t *result) { xmlNode *reply = NULL; reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); stonith__xe_set_result(reply, result); if (request == NULL) { /* Most likely, this is the result of a stonith operation that was * initiated before we came up. Unfortunately that means we lack enough * information to provide clients with a full result. * * @TODO Maybe synchronize this information at start-up? */ crm_warn("Missing request information for client notifications for " "operation with result '%s' (initiated before we came up?)", pcmk_exec_status_str(result->execution_status)); } else { const char *name = NULL; const char *value = NULL; // Attributes to copy from request to reply const char *names[] = { PCMK__XA_ST_OP, PCMK__XA_ST_CALLID, PCMK__XA_ST_CLIENTID, PCMK__XA_ST_CLIENTNAME, PCMK__XA_ST_REMOTE_OP, PCMK__XA_ST_CALLOPT, }; for (int lpc = 0; lpc < PCMK__NELEM(names); lpc++) { name = names[lpc]; value = crm_element_value(request, name); crm_xml_add(reply, name, value); } if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_ST_CALLDATA); pcmk__xml_copy(wrapper, data); } } return reply; } /*! * \internal * \brief Build an XML reply to an asynchronous fencing command * * \param[in] cmd Fencing command that reply is for * \param[in] result Command result */ static xmlNode * construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result) { xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(reply, PCMK__XA_ST_OP, cmd->op); crm_xml_add(reply, PCMK__XA_ST_DEVICE_ID, cmd->device); crm_xml_add(reply, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id); crm_xml_add(reply, PCMK__XA_ST_CLIENTID, cmd->client); crm_xml_add(reply, PCMK__XA_ST_CLIENTNAME, cmd->client_name); crm_xml_add(reply, PCMK__XA_ST_TARGET, cmd->target); crm_xml_add(reply, PCMK__XA_ST_DEVICE_ACTION, cmd->op); crm_xml_add(reply, PCMK__XA_ST_ORIGIN, cmd->origin); crm_xml_add_int(reply, PCMK__XA_ST_CALLID, cmd->id); crm_xml_add_int(reply, PCMK__XA_ST_CALLOPT, cmd->options); stonith__xe_set_result(reply, result); return reply; } bool fencing_peer_active(pcmk__node_status_t *peer) { return (peer != NULL) && (peer->name != NULL) && pcmk_is_set(peer->processes, crm_get_cluster_proc()); } void set_fencing_completed(remote_fencing_op_t *op) { struct timespec tv; qb_util_timespec_from_epoch_get(&tv); op->completed = tv.tv_sec; op->completed_nsec = tv.tv_nsec; } /*! * \internal * \brief Look for alternate node needed if local node shouldn't fence target * * \param[in] target Node that must be fenced * * \return Name of an alternate node that should fence \p target if any, * or NULL otherwise */ static const char * check_alternate_host(const char *target) { if (pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) { GHashTableIter gIter; pcmk__node_status_t *entry = NULL; g_hash_table_iter_init(&gIter, pcmk__peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if (fencing_peer_active(entry) && !pcmk__str_eq(entry->name, target, pcmk__str_casei)) { crm_notice("Forwarding self-fencing request to %s", entry->name); return entry->name; } } crm_warn("Will handle own fencing because no peer can"); } return NULL; } static void remove_relay_op(xmlNode * request) { xmlNode *dev = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, request, LOG_TRACE); const char *relay_op_id = NULL; const char *op_id = NULL; const char *client_name = NULL; const char *target = NULL; remote_fencing_op_t *relay_op = NULL; if (dev) { target = crm_element_value(dev, PCMK__XA_ST_TARGET); } relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP_RELAY); op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP); client_name = crm_element_value(request, PCMK__XA_ST_CLIENTNAME); /* Delete RELAY operation. */ if ((relay_op_id != NULL) && (target != NULL) && pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) { relay_op = g_hash_table_lookup(stonith_remote_op_list, relay_op_id); if (relay_op) { GHashTableIter iter; remote_fencing_op_t *list_op = NULL; g_hash_table_iter_init(&iter, stonith_remote_op_list); /* If the operation to be deleted is registered as a duplicate, delete the registration. */ while (g_hash_table_iter_next(&iter, NULL, (void **)&list_op)) { GList *dup_iter = NULL; if (list_op != relay_op) { for (dup_iter = list_op->duplicates; dup_iter != NULL; dup_iter = dup_iter->next) { remote_fencing_op_t *other = dup_iter->data; if (other == relay_op) { other->duplicates = g_list_remove(other->duplicates, relay_op); break; } } } } crm_debug("Deleting relay op %s ('%s'%s%s for %s), " "replaced by op %s ('%s'%s%s for %s)", relay_op->id, relay_op->action, (relay_op->target == NULL)? "" : " targeting ", pcmk__s(relay_op->target, ""), relay_op->client_name, op_id, relay_op->action, (target == NULL)? "" : " targeting ", pcmk__s(target, ""), client_name); g_hash_table_remove(stonith_remote_op_list, relay_op_id); } } } /*! * \internal * \brief Check whether an API request was sent by a privileged user * * API commands related to fencing configuration may be done only by privileged * IPC users (i.e. root or hacluster), because all other users should go through * the CIB to have ACLs applied. If no client was given, this is a peer request, * which is always allowed. * * \param[in] c IPC client that sent request (or NULL if sent by CPG peer) * \param[in] op Requested API operation (for logging only) * * \return true if sender is peer or privileged client, otherwise false */ static inline bool is_privileged(const pcmk__client_t *c, const char *op) { if ((c == NULL) || pcmk_is_set(c->flags, pcmk__client_privileged)) { return true; } else { crm_warn("Rejecting IPC request '%s' from unprivileged client %s", pcmk__s(op, ""), pcmk__client_name(c)); return false; } } // CRM_OP_REGISTER static xmlNode * handle_register_request(pcmk__request_t *request) { xmlNode *reply = pcmk__xe_create(NULL, "reply"); pcmk__assert(request->ipc_client != NULL); crm_xml_add(reply, PCMK__XA_ST_OP, CRM_OP_REGISTER); crm_xml_add(reply, PCMK__XA_ST_CLIENTID, request->ipc_client->id); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return reply; } // STONITH_OP_EXEC static xmlNode * handle_agent_request(pcmk__request_t *request) { execute_agent_action(request->xml, &request->result); if (request->result.execution_status == PCMK_EXEC_PENDING) { return NULL; } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_TIMEOUT_UPDATE static xmlNode * handle_update_timeout_request(pcmk__request_t *request) { const char *call_id = crm_element_value(request->xml, PCMK__XA_ST_CALLID); const char *client_id = crm_element_value(request->xml, PCMK__XA_ST_CLIENTID); int op_timeout = 0; crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } // STONITH_OP_QUERY static xmlNode * handle_query_request(pcmk__request_t *request) { int timeout = 0; xmlNode *dev = NULL; const char *action = NULL; const char *target = NULL; const char *client_id = crm_element_value(request->xml, PCMK__XA_ST_CLIENTID); struct st_query_data *query = NULL; if (request->peer != NULL) { // Record it for the future notification create_remote_stonith_op(client_id, request->xml, TRUE); } /* Delete the DC node RELAY operation. */ remove_relay_op(request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); dev = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, request->xml, LOG_NEVER); if (dev != NULL) { const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (pcmk__str_eq(device, "manual_ack", pcmk__str_casei)) { return NULL; // No query or reply necessary } target = crm_element_value(dev, PCMK__XA_ST_TARGET); action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION); } crm_log_xml_trace(request->xml, "Query"); query = pcmk__assert_alloc(1, sizeof(struct st_query_data)); query->reply = fenced_construct_reply(request->xml, NULL, &request->result); query->remote_peer = pcmk__str_copy(request->peer); query->client_id = pcmk__str_copy(client_id); query->target = pcmk__str_copy(target); query->action = pcmk__str_copy(action); query->call_options = request->call_options; crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &timeout); get_capable_devices(target, action, timeout, pcmk_is_set(query->call_options, st_opt_allow_self_fencing), query, stonith_query_capable_device_cb, st_device_supports_none); return NULL; } // STONITH_OP_NOTIFY static xmlNode * handle_notify_request(pcmk__request_t *request) { const char *flag_name = NULL; pcmk__assert(request->ipc_client != NULL); flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_ACTIVATE); if (flag_name != NULL) { crm_debug("Enabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__set_client_flags(request->ipc_client, get_stonith_flag(flag_name)); } flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_DEACTIVATE); if (flag_name != NULL) { crm_debug("Disabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__clear_client_flags(request->ipc_client, get_stonith_flag(flag_name)); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return pcmk__ipc_create_ack(request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_OK); } // STONITH_OP_RELAY static xmlNode * handle_relay_request(pcmk__request_t *request) { xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request->xml, LOG_TRACE); crm_notice("Received forwarded fencing request from " "%s %s to fence (%s) peer %s", pcmk__request_origin_type(request), pcmk__request_origin(request), crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION), crm_element_value(dev, PCMK__XA_ST_TARGET)); if (initiate_remote_stonith_op(NULL, request->xml, FALSE) == NULL) { fenced_set_protocol_error(&request->result); return fenced_construct_reply(request->xml, NULL, &request->result); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); return NULL; } // STONITH_OP_FENCE static xmlNode * handle_fence_request(pcmk__request_t *request) { if (request->peer != NULL) { fence_locally(request->xml, &request->result); } else if (pcmk_is_set(request->call_options, st_opt_manual_ack)) { switch (fenced_handle_manual_confirmation(request->ipc_client, request->xml)) { case pcmk_rc_ok: pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); break; case EINPROGRESS: pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); break; default: fenced_set_protocol_error(&request->result); break; } } else { const char *alternate_host = NULL; xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request->xml, LOG_TRACE); const char *target = crm_element_value(dev, PCMK__XA_ST_TARGET); const char *action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION); const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (request->ipc_client != NULL) { int tolerance = 0; crm_notice("Client %s wants to fence (%s) %s using %s", pcmk__request_origin(request), action, target, (device? device : "any device")); crm_element_value_int(dev, PCMK__XA_ST_TOLERANCE, &tolerance); if (stonith_check_fence_tolerance(tolerance, target, action)) { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return fenced_construct_reply(request->xml, NULL, &request->result); } alternate_host = check_alternate_host(target); } else { crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'", request->peer, action, target, (device == NULL)? "(any)" : device); } if (alternate_host != NULL) { const char *client_id = NULL; remote_fencing_op_t *op = NULL; pcmk__node_status_t *node = pcmk__get_node(0, alternate_host, NULL, pcmk__node_search_cluster_member); if (request->ipc_client->id == 0) { client_id = crm_element_value(request->xml, PCMK__XA_ST_CLIENTID); } else { client_id = request->ipc_client->id; } /* Create a duplicate fencing operation to relay with the client ID. * When a query response is received, this operation should be * deleted to avoid keeping the duplicate around. */ op = create_remote_stonith_op(client_id, request->xml, FALSE); crm_xml_add(request->xml, PCMK__XA_ST_OP, STONITH_OP_RELAY); crm_xml_add(request->xml, PCMK__XA_ST_CLIENTID, request->ipc_client->id); crm_xml_add(request->xml, PCMK__XA_ST_REMOTE_OP, op->id); // @TODO On failure, fail request immediately, or maybe panic pcmk__cluster_send_message(node, pcmk_ipc_fenced, request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } else if (initiate_remote_stonith_op(request->ipc_client, request->xml, FALSE) == NULL) { fenced_set_protocol_error(&request->result); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } } if (request->result.execution_status == PCMK_EXEC_PENDING) { return NULL; } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_FENCE_HISTORY static xmlNode * handle_history_request(pcmk__request_t *request) { xmlNode *reply = NULL; xmlNode *data = NULL; stonith_fence_history(request->xml, &data, request->peer, request->call_options); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (!pcmk_is_set(request->call_options, st_opt_discard_reply)) { /* When the local node broadcasts its history, it sets * st_opt_discard_reply and doesn't need a reply. */ reply = fenced_construct_reply(request->xml, data, &request->result); } pcmk__xml_free(data); return reply; } // STONITH_OP_DEVICE_ADD static xmlNode * handle_device_add_request(pcmk__request_t *request) { const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, request->xml, LOG_ERR); if (is_privileged(request->ipc_client, op)) { int rc = stonith_device_register(dev, FALSE); pcmk__set_result(&request->result, ((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR), stonith__legacy2status(rc), ((rc == pcmk_ok)? NULL : pcmk_strerror(rc))); } else { pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must register device via CIB"); } fenced_send_config_notification(op, &request->result, (dev == NULL)? NULL : pcmk__xe_id(dev)); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_DEVICE_DEL static xmlNode * handle_device_delete_request(pcmk__request_t *request) { xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, request->xml, LOG_ERR); const char *device_id = crm_element_value(dev, PCMK_XA_ID); const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { stonith_device_remove(device_id, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must delete device via CIB"); } fenced_send_config_notification(op, &request->result, device_id); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_LEVEL_ADD static xmlNode * handle_level_add_request(pcmk__request_t *request) { char *desc = NULL; const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_register_level(request->xml, &desc, &request->result); } else { unpack_level_request(request->xml, NULL, NULL, NULL, &desc); pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must add level via CIB"); } fenced_send_config_notification(op, &request->result, desc); free(desc); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_LEVEL_DEL static xmlNode * handle_level_delete_request(pcmk__request_t *request) { char *desc = NULL; const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_unregister_level(request->xml, &desc, &request->result); } else { unpack_level_request(request->xml, NULL, NULL, NULL, &desc); pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must delete level via CIB"); } fenced_send_config_notification(op, &request->result, desc); free(desc); return fenced_construct_reply(request->xml, NULL, &request->result); } // CRM_OP_RM_NODE_CACHE static xmlNode * handle_cache_request(pcmk__request_t *request) { int node_id = 0; const char *name = NULL; crm_element_value_int(request->xml, PCMK_XA_ID, &node_id); name = crm_element_value(request->xml, PCMK_XA_UNAME); pcmk__cluster_forget_cluster_node(node_id, name); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { crm_err("Unknown IPC request %s from %s %s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request)); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", request->op); return fenced_construct_reply(request->xml, NULL, &request->result); } static void fenced_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_REGISTER, handle_register_request }, { STONITH_OP_EXEC, handle_agent_request }, { STONITH_OP_TIMEOUT_UPDATE, handle_update_timeout_request }, { STONITH_OP_QUERY, handle_query_request }, { STONITH_OP_NOTIFY, handle_notify_request }, { STONITH_OP_RELAY, handle_relay_request }, { STONITH_OP_FENCE, handle_fence_request }, { STONITH_OP_FENCE_HISTORY, handle_history_request }, { STONITH_OP_DEVICE_ADD, handle_device_add_request }, { STONITH_OP_DEVICE_DEL, handle_device_delete_request }, { STONITH_OP_LEVEL_ADD, handle_level_add_request }, { STONITH_OP_LEVEL_DEL, handle_level_delete_request }, { CRM_OP_RM_NODE_CACHE, handle_cache_request }, { NULL, handle_unknown_request }, }; fenced_handlers = pcmk__register_handlers(handlers); } void fenced_unregister_handlers(void) { if (fenced_handlers != NULL) { g_hash_table_destroy(fenced_handlers); fenced_handlers = NULL; } } static void handle_request(pcmk__request_t *request) { xmlNode *reply = NULL; const char *reason = NULL; if (fenced_handlers == NULL) { fenced_register_handlers(); } reply = pcmk__process_request(request, fenced_handlers); if (reply != NULL) { if (pcmk_is_set(request->flags, pcmk__request_reuse_options) && (request->ipc_client != NULL)) { /* Certain IPC-only commands must reuse the call options from the * original request rather than the ones set by stonith_send_reply() * -> do_local_reply(). */ pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply, request->ipc_flags); request->ipc_client->request_id = 0; } else { stonith_send_reply(reply, request->call_options, request->peer, request->ipc_client); } pcmk__xml_free(reply); } reason = request->result.exit_reason; crm_debug("Processed %s request from %s %s: %s%s%s%s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request), pcmk_exec_status_str(request->result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); } static void handle_reply(pcmk__client_t *client, xmlNode *request, const char *remote_peer) { // Copy, because request might be freed before we want to log this char *op = crm_element_value_copy(request, PCMK__XA_ST_OP); if (pcmk__str_eq(op, STONITH_OP_QUERY, pcmk__str_none)) { process_remote_stonith_query(request); } else if (pcmk__str_any_of(op, STONITH_OP_NOTIFY, STONITH_OP_FENCE, NULL)) { fenced_process_fencing_reply(request); } else { crm_err("Ignoring unknown %s reply from %s %s", pcmk__s(op, "untyped"), ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); crm_log_xml_warn(request, "UnknownOp"); free(op); return; } crm_debug("Processed %s reply from %s %s", op, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); free(op); } /*! * \internal * \brief Handle a message from an IPC client or CPG peer * * \param[in,out] client If not NULL, IPC client that sent message * \param[in] id If from IPC client, IPC message ID * \param[in] flags Message flags * \param[in,out] message Message XML * \param[in] remote_peer If not NULL, CPG peer that sent message */ void stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags, xmlNode *message, const char *remote_peer) { uint32_t call_options = st_opt_none; int rc = pcmk_rc_ok; bool is_reply = false; CRM_CHECK(message != NULL, return); if (get_xpath_object("//" PCMK__XE_ST_REPLY, message, LOG_NEVER) != NULL) { is_reply = true; } rc = pcmk__xe_get_flags(message, PCMK__XA_ST_CALLOPT, &call_options, st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from message: %s", pcmk_rc_str(rc)); } crm_debug("Processing %ssynchronous %s %s %u from %s %s", pcmk_is_set(call_options, st_opt_sync_call)? "" : "a", crm_element_value(message, PCMK__XA_ST_OP), (is_reply? "reply" : "request"), id, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); if (pcmk_is_set(call_options, st_opt_sync_call)) { pcmk__assert((client == NULL) || (client->request_id == id)); } if (is_reply) { handle_reply(client, message, remote_peer); } else { pcmk__request_t request = { .ipc_client = client, .ipc_id = id, .ipc_flags = flags, .peer = remote_peer, .xml = message, .call_options = call_options, .result = PCMK__UNKNOWN_RESULT, }; request.op = crm_element_value_copy(request.xml, PCMK__XA_ST_OP); CRM_CHECK(request.op != NULL, return); if (pcmk_is_set(request.call_options, st_opt_sync_call)) { pcmk__set_request_flags(&request, pcmk__request_sync); } handle_request(&request); pcmk__reset_request(&request); } } diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c index 0c7d9815b0..4d542b2a0c 100644 --- a/lib/cib/cib_ops.c +++ b/lib/cib/cib_ops.c @@ -1,813 +1,813 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include // @TODO: Free this via crm_exit() when libcib gets merged with libcrmcommon static GHashTable *operation_table = NULL; static const cib__operation_t cib_ops[] = { { PCMK__CIB_REQUEST_ABS_DELETE, cib__op_abs_delete, cib__op_attr_modifies|cib__op_attr_privileged }, { PCMK__CIB_REQUEST_APPLY_PATCH, cib__op_apply_patch, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_BUMP, cib__op_bump, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_COMMIT_TRANSACT, cib__op_commit_transact, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_replaces |cib__op_attr_writes_through }, { PCMK__CIB_REQUEST_CREATE, cib__op_create, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_DELETE, cib__op_delete, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_ERASE, cib__op_erase, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_replaces |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_IS_PRIMARY, cib__op_is_primary, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_MODIFY, cib__op_modify, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_NOOP, cib__op_noop, cib__op_attr_none }, { CRM_OP_PING, cib__op_ping, cib__op_attr_none }, { // @COMPAT: Drop cib__op_attr_modifies when we drop legacy mode support PCMK__CIB_REQUEST_PRIMARY, cib__op_primary, cib__op_attr_modifies|cib__op_attr_privileged|cib__op_attr_local }, { PCMK__CIB_REQUEST_QUERY, cib__op_query, cib__op_attr_none }, { PCMK__CIB_REQUEST_REPLACE, cib__op_replace, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_replaces |cib__op_attr_writes_through |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_SECONDARY, cib__op_secondary, cib__op_attr_privileged|cib__op_attr_local }, { PCMK__CIB_REQUEST_SHUTDOWN, cib__op_shutdown, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_SYNC_TO_ALL, cib__op_sync_all, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_SYNC_TO_ONE, cib__op_sync_one, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_UPGRADE, cib__op_upgrade, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_writes_through |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_SCHEMAS, cib__op_schemas, cib__op_attr_local } }; /*! * \internal * \brief Get the \c cib__operation_t object for a given CIB operation name * * \param[in] op CIB operation name * \param[out] operation Where to store CIB operation object * * \return Standard Pacemaker return code */ int cib__get_operation(const char *op, const cib__operation_t **operation) { pcmk__assert((op != NULL) && (operation != NULL)); if (operation_table == NULL) { operation_table = pcmk__strkey_table(NULL, NULL); for (int lpc = 0; lpc < PCMK__NELEM(cib_ops); lpc++) { const cib__operation_t *oper = &(cib_ops[lpc]); g_hash_table_insert(operation_table, (gpointer) oper->name, (gpointer) oper); } } *operation = g_hash_table_lookup(operation_table, op); if (*operation == NULL) { crm_err("Operation %s is invalid", op); return EINVAL; } return pcmk_rc_ok; } int cib_process_query(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *obj_root = NULL; int result = pcmk_ok; crm_trace("Processing %s for %s section", op, pcmk__s(section, "unspecified")); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } CRM_CHECK(*answer == NULL, pcmk__xml_free(*answer)); *answer = NULL; if (pcmk__str_eq(PCMK__XE_ALL, section, pcmk__str_casei)) { section = NULL; } obj_root = pcmk_find_cib_element(existing_cib, section); if (obj_root == NULL) { result = -ENXIO; } else if (options & cib_no_children) { xmlNode *shallow = pcmk__xe_create(*answer, (const char *) obj_root->name); pcmk__xe_copy_attrs(shallow, obj_root, pcmk__xaf_none); *answer = shallow; } else { *answer = obj_root; } if (result == pcmk_ok && *answer == NULL) { crm_err("Error creating query response"); result = -ENOMSG; } return result; } static int update_counter(xmlNode *xml_obj, const char *field, bool reset) { char *new_value = NULL; char *old_value = NULL; int int_value = -1; if (!reset && crm_element_value(xml_obj, field) != NULL) { old_value = crm_element_value_copy(xml_obj, field); } if (old_value != NULL) { int_value = atoi(old_value); new_value = pcmk__itoa(++int_value); } else { new_value = pcmk__str_copy("1"); } crm_trace("Update %s from %s to %s", field, pcmk__s(old_value, "unset"), new_value); crm_xml_add(xml_obj, field, new_value); free(new_value); free(old_value); return pcmk_ok; } int cib_process_erase(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing \"%s\" event", op); if (*result_cib != existing_cib) { pcmk__xml_free(*result_cib); } *result_cib = createEmptyCib(0); pcmk__xe_copy_attrs(*result_cib, existing_cib, pcmk__xaf_none); update_counter(*result_cib, PCMK_XA_ADMIN_EPOCH, false); *answer = NULL; return result; } int cib_process_upgrade(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int rc = 0; const char *max_schema = crm_element_value(req, PCMK__XA_CIB_SCHEMA_MAX); const char *original_schema = NULL; const char *new_schema = NULL; *answer = NULL; crm_trace("Processing \"%s\" event with max=%s", op, max_schema); original_schema = crm_element_value(existing_cib, PCMK_XA_VALIDATE_WITH); rc = pcmk__update_schema(result_cib, max_schema, true, !pcmk_is_set(options, cib_verbose)); rc = pcmk_rc2legacy(rc); new_schema = crm_element_value(*result_cib, PCMK_XA_VALIDATE_WITH); if (pcmk__cmp_schemas_by_name(new_schema, original_schema) > 0) { update_counter(*result_cib, PCMK_XA_ADMIN_EPOCH, false); update_counter(*result_cib, PCMK_XA_EPOCH, true); update_counter(*result_cib, PCMK_XA_NUM_UPDATES, true); return pcmk_ok; } return rc; } int cib_process_bump(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing %s for epoch='%s'", op, pcmk__s(crm_element_value(existing_cib, PCMK_XA_EPOCH), "")); *answer = NULL; update_counter(*result_cib, PCMK_XA_EPOCH, false); return result; } int cib_process_replace(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing %s for %s section", op, pcmk__s(section, "unspecified")); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } *answer = NULL; if (input == NULL) { return -EINVAL; } if (pcmk__str_eq(PCMK__XE_ALL, section, pcmk__str_casei)) { section = NULL; } else if (pcmk__xe_is(input, section)) { section = NULL; } if (pcmk__xe_is(input, PCMK_XE_CIB)) { int updates = 0; int epoch = 0; int admin_epoch = 0; int replace_updates = 0; int replace_epoch = 0; int replace_admin_epoch = 0; const char *reason = NULL; const char *peer = crm_element_value(req, PCMK__XA_SRC); const char *digest = crm_element_value(req, PCMK__XA_DIGEST); if (digest) { char *digest_verify = pcmk__digest_xml(input, true); if (!pcmk__str_eq(digest_verify, digest, pcmk__str_casei)) { crm_err("Digest mis-match on replace from %s: %s vs. %s (expected)", peer, digest_verify, digest); reason = "digest mismatch"; } else { crm_info("Digest matched on replace from %s: %s", peer, digest); } free(digest_verify); } else { crm_trace("No digest to verify"); } cib_version_details(existing_cib, &admin_epoch, &epoch, &updates); cib_version_details(input, &replace_admin_epoch, &replace_epoch, &replace_updates); if (replace_admin_epoch < admin_epoch) { reason = PCMK_XA_ADMIN_EPOCH; } else if (replace_admin_epoch > admin_epoch) { /* no more checks */ } else if (replace_epoch < epoch) { reason = PCMK_XA_EPOCH; } else if (replace_epoch > epoch) { /* no more checks */ } else if (replace_updates < updates) { reason = PCMK_XA_NUM_UPDATES; } if (reason != NULL) { crm_info("Replacement %d.%d.%d from %s not applied to %d.%d.%d:" " current %s is greater than the replacement", replace_admin_epoch, replace_epoch, replace_updates, peer, admin_epoch, epoch, updates, reason); result = -pcmk_err_old_data; } else { crm_info("Replaced %d.%d.%d with %d.%d.%d from %s", admin_epoch, epoch, updates, replace_admin_epoch, replace_epoch, replace_updates, peer); } if (*result_cib != existing_cib) { pcmk__xml_free(*result_cib); } *result_cib = pcmk__xml_copy(NULL, input); } else { xmlNode *obj_root = NULL; obj_root = pcmk_find_cib_element(*result_cib, section); result = pcmk__xe_replace_match(obj_root, input); result = pcmk_rc2legacy(result); if (result != pcmk_ok) { crm_trace("No matching object to replace"); } } return result; } static int delete_child(xmlNode *child, void *userdata) { xmlNode *obj_root = userdata; if (pcmk__xe_delete_match(obj_root, child) != pcmk_rc_ok) { crm_trace("No matching object to delete: %s=%s", child->name, pcmk__xe_id(child)); } return pcmk_rc_ok; } int cib_process_delete(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *obj_root = NULL; crm_trace("Processing \"%s\" event", op); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } if (input == NULL) { crm_err("Cannot perform modification with no data"); return -EINVAL; } obj_root = pcmk_find_cib_element(*result_cib, section); if (pcmk__xe_is(input, section)) { pcmk__xe_foreach_child(input, NULL, delete_child, obj_root); } else { delete_child(input, obj_root); } return pcmk_ok; } int cib_process_modify(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *obj_root = NULL; uint32_t flags = pcmk__xaf_none; crm_trace("Processing \"%s\" event", op); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } if (input == NULL) { crm_err("Cannot perform modification with no data"); return -EINVAL; } obj_root = pcmk_find_cib_element(*result_cib, section); if (obj_root == NULL) { xmlNode *tmp_section = NULL; const char *path = pcmk_cib_parent_name_for(section); if (path == NULL) { return -EINVAL; } tmp_section = pcmk__xe_create(NULL, section); cib_process_xpath(PCMK__CIB_REQUEST_CREATE, 0, path, NULL, tmp_section, NULL, result_cib, answer); pcmk__xml_free(tmp_section); obj_root = pcmk_find_cib_element(*result_cib, section); } CRM_CHECK(obj_root != NULL, return -EINVAL); if (pcmk_is_set(options, cib_score_update)) { flags |= pcmk__xaf_score_update; } if (pcmk__xe_update_match(obj_root, input, flags) != pcmk_rc_ok) { if (options & cib_can_create) { pcmk__xml_copy(obj_root, input); } else { return -ENXIO; } } return pcmk_ok; } static int add_cib_object(xmlNode * parent, xmlNode * new_obj) { const char *object_name = NULL; const char *object_id = NULL; if ((parent == NULL) || (new_obj == NULL)) { return -EINVAL; } object_name = (const char *) new_obj->name; if (object_name == NULL) { return -EINVAL; } object_id = pcmk__xe_id(new_obj); if (pcmk__xe_first_child(parent, object_name, ((object_id != NULL)? PCMK_XA_ID : NULL), object_id)) { return -EEXIST; } if (object_id != NULL) { crm_trace("Processing creation of <%s " PCMK_XA_ID "='%s'>", object_name, object_id); } else { crm_trace("Processing creation of <%s>", object_name); } /* @COMPAT PCMK__XA_REPLACE is deprecated since 2.1.6. Due to a legacy use * case, PCMK__XA_REPLACE has special meaning and should not be included in * the newly created object until we can break behavioral backward * compatibility. * * At a compatibility break, drop this and drop the definition of * PCMK__XA_REPLACE. Treat it like any other attribute. */ pcmk__xml_tree_foreach(new_obj, pcmk__xe_remove_attr_cb, (void *) PCMK__XA_REPLACE); pcmk__xml_copy(parent, new_obj); return pcmk_ok; } static bool update_results(xmlNode *failed, xmlNode *target, const char *operation, int return_code) { xmlNode *xml_node = NULL; bool was_error = false; const char *error_msg = NULL; if (return_code != pcmk_ok) { error_msg = pcmk_strerror(return_code); was_error = true; xml_node = pcmk__xe_create(failed, PCMK__XE_FAILED_UPDATE); pcmk__xml_copy(xml_node, target); crm_xml_add(xml_node, PCMK_XA_ID, pcmk__xe_id(target)); crm_xml_add(xml_node, PCMK_XA_OBJECT_TYPE, (const char *) target->name); crm_xml_add(xml_node, PCMK_XA_OPERATION, operation); crm_xml_add(xml_node, PCMK_XA_REASON, error_msg); crm_warn("Action %s failed: %s (cde=%d)", operation, error_msg, return_code); } return was_error; } int cib_process_create(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *failed = NULL; int result = pcmk_ok; xmlNode *update_section = NULL; crm_trace("Processing %s for %s section", op, pcmk__s(section, "unspecified")); if (pcmk__str_eq(PCMK__XE_ALL, section, pcmk__str_casei)) { section = NULL; } else if (pcmk__str_eq(section, PCMK_XE_CIB, pcmk__str_casei)) { section = NULL; } else if (pcmk__xe_is(input, PCMK_XE_CIB)) { section = NULL; } CRM_CHECK(strcmp(op, PCMK__CIB_REQUEST_CREATE) == 0, return -EINVAL); if (input == NULL) { crm_err("Cannot perform modification with no data"); return -EINVAL; } if (section == NULL) { return cib_process_modify(op, options, section, req, input, existing_cib, result_cib, answer); } // @COMPAT Deprecated since 2.1.8 failed = pcmk__xe_create(NULL, PCMK__XE_FAILED); update_section = pcmk_find_cib_element(*result_cib, section); if (pcmk__xe_is(input, section)) { xmlNode *a_child = NULL; for (a_child = pcmk__xml_first_child(input); a_child != NULL; a_child = pcmk__xml_next(a_child)) { result = add_cib_object(update_section, a_child); if (update_results(failed, a_child, op, result)) { break; } } } else { result = add_cib_object(update_section, input); update_results(failed, input, op, result); } if ((result == pcmk_ok) && (failed->children != NULL)) { result = -EINVAL; } if (result != pcmk_ok) { crm_log_xml_err(failed, "CIB Update failures"); *answer = failed; } else { pcmk__xml_free(failed); } return result; } int cib_process_diff(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { const char *originator = NULL; if (req != NULL) { originator = crm_element_value(req, PCMK__XA_SRC); } crm_trace("Processing \"%s\" event from %s%s", op, originator, (pcmk_is_set(options, cib_force_diff)? " (global update)" : "")); if (*result_cib != existing_cib) { pcmk__xml_free(*result_cib); } *result_cib = pcmk__xml_copy(NULL, existing_cib); return xml_apply_patchset(*result_cib, input, TRUE); } int cib_process_xpath(const char *op, int options, const char *section, const xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { int lpc = 0; int max = 0; int rc = pcmk_ok; bool is_query = pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none); xmlXPathObject *xpathObj = NULL; crm_trace("Processing \"%s\" event", op); if (is_query) { xpathObj = pcmk__xpath_search(existing_cib->doc, section); } else { xpathObj = pcmk__xpath_search((*result_cib)->doc, section); } max = pcmk__xpath_num_results(xpathObj); if ((max < 1) && pcmk__str_eq(op, PCMK__CIB_REQUEST_DELETE, pcmk__str_none)) { crm_debug("%s was already removed", section); } else if (max < 1) { crm_debug("%s: %s does not exist", op, section); rc = -ENXIO; } else if (is_query) { if (max > 1) { *answer = pcmk__xe_create(NULL, PCMK__XE_XPATH_QUERY); } } if (pcmk_is_set(options, cib_multiple) && pcmk__str_eq(op, PCMK__CIB_REQUEST_DELETE, pcmk__str_none)) { dedupXpathResults(xpathObj); } for (lpc = 0; lpc < max; lpc++) { xmlChar *path = NULL; xmlNode *match = pcmk__xpath_result(xpathObj, lpc); if (match == NULL) { continue; } path = xmlGetNodePath(match); crm_debug("Processing %s op for %s with %s", op, section, path); free(path); if (pcmk__str_eq(op, PCMK__CIB_REQUEST_DELETE, pcmk__str_none)) { if (match == *result_cib) { /* Attempting to delete the whole "/cib" */ crm_warn("Cannot perform %s for %s: The xpath is addressing the whole /cib", op, section); rc = -EINVAL; break; } pcmk__xml_free(match); if ((options & cib_multiple) == 0) { break; } } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_MODIFY, pcmk__str_none)) { uint32_t flags = pcmk__xaf_none; if (pcmk_is_set(options, cib_score_update)) { flags |= pcmk__xaf_score_update; } if (pcmk__xe_update_match(match, input, flags) != pcmk_rc_ok) { rc = -ENXIO; } else if ((options & cib_multiple) == 0) { break; } } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_CREATE, pcmk__str_none)) { pcmk__xml_copy(match, input); break; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none)) { if (options & cib_no_children) { xmlNode *shallow = pcmk__xe_create(*answer, (const char *) match->name); pcmk__xe_copy_attrs(shallow, match, pcmk__xaf_none); if (*answer == NULL) { *answer = shallow; } } else if (options & cib_xpath_address) { char *path = NULL; xmlNode *parent = match; while (parent && parent->type == XML_ELEMENT_NODE) { const char *id = crm_element_value(parent, PCMK_XA_ID); char *new_path = NULL; if (id) { new_path = crm_strdup_printf("/%s[@" PCMK_XA_ID "='%s']" "%s", parent->name, id, pcmk__s(path, "")); } else { new_path = crm_strdup_printf("/%s%s", parent->name, pcmk__s(path, "")); } free(path); path = new_path; parent = parent->parent; } crm_trace("Got: %s", path); if (*answer == NULL) { *answer = pcmk__xe_create(NULL, PCMK__XE_XPATH_QUERY); } parent = pcmk__xe_create(*answer, PCMK__XE_XPATH_QUERY_PATH); crm_xml_add(parent, PCMK_XA_ID, path); free(path); } else if (*answer) { pcmk__xml_copy(*answer, match); } else { *answer = match; } } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) { xmlNode *parent = match->parent; pcmk__xml_free(match); pcmk__xml_copy(parent, input); if ((options & cib_multiple) == 0) { break; } } } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); return rc; } diff --git a/lib/common/acl.c b/lib/common/acl.c index 3ffbd4ba41..6ce2369163 100644 --- a/lib/common/acl.c +++ b/lib/common/acl.c @@ -1,917 +1,917 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include "crmcommon_private.h" typedef struct xml_acl_s { enum xml_private_flags mode; gchar *xpath; } xml_acl_t; static void free_acl(void *data) { if (data) { xml_acl_t *acl = data; g_free(acl->xpath); free(acl); } } void pcmk__free_acls(GList *acls) { g_list_free_full(acls, free_acl); } static GList * create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode) { xml_acl_t *acl = NULL; const char *tag = crm_element_value(xml, PCMK_XA_OBJECT_TYPE); const char *ref = crm_element_value(xml, PCMK_XA_REFERENCE); const char *xpath = crm_element_value(xml, PCMK_XA_XPATH); const char *attr = crm_element_value(xml, PCMK_XA_ATTRIBUTE); if ((tag == NULL) && (ref == NULL) && (xpath == NULL)) { // Schema should prevent this, but to be safe ... crm_trace("Ignoring ACL <%s> element without selection criteria", xml->name); return NULL; } acl = pcmk__assert_alloc(1, sizeof (xml_acl_t)); acl->mode = mode; if (xpath) { acl->xpath = g_strdup(xpath); crm_trace("Unpacked ACL <%s> element using xpath: %s", xml->name, acl->xpath); } else { GString *buf = g_string_sized_new(128); if ((ref != NULL) && (attr != NULL)) { // NOTE: schema currently does not allow this pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), "[@" PCMK_XA_ID "='", ref, "' and @", attr, "]", NULL); } else if (ref != NULL) { pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), "[@" PCMK_XA_ID "='", ref, "']", NULL); } else if (attr != NULL) { pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), "[@", attr, "]", NULL); } else { pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), NULL); } acl->xpath = buf->str; g_string_free(buf, FALSE); crm_trace("Unpacked ACL <%s> element as xpath: %s", xml->name, acl->xpath); } return g_list_append(acls, acl); } /*! * \internal * \brief Unpack a user, group, or role subtree of the ACLs section * * \param[in] acl_top XML of entire ACLs section * \param[in] acl_entry XML of ACL element being unpacked * \param[in,out] acls List of ACLs unpacked so far * * \return New head of (possibly modified) acls * * \note This function is recursive */ static GList * parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls) { for (const xmlNode *child = pcmk__xe_first_child(acl_entry, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { if (pcmk__xe_is(child, PCMK_XE_ACL_PERMISSION)) { const char *kind = crm_element_value(child, PCMK_XA_KIND); pcmk__assert(kind != NULL); crm_trace("Unpacking <" PCMK_XE_ACL_PERMISSION "> element of " "kind '%s'", kind); if (pcmk__str_eq(kind, PCMK_VALUE_READ, pcmk__str_none)) { acls = create_acl(child, acls, pcmk__xf_acl_read); } else if (pcmk__str_eq(kind, PCMK_VALUE_WRITE, pcmk__str_none)) { acls = create_acl(child, acls, pcmk__xf_acl_write); } else if (pcmk__str_eq(kind, PCMK_VALUE_DENY, pcmk__str_none)) { acls = create_acl(child, acls, pcmk__xf_acl_deny); } else { crm_warn("Ignoring unknown ACL kind '%s'", kind); } } else if (pcmk__xe_is(child, PCMK_XE_ROLE)) { const char *ref_role = crm_element_value(child, PCMK_XA_ID); crm_trace("Unpacking <" PCMK_XE_ROLE "> element"); if (ref_role == NULL) { continue; } for (xmlNode *role = pcmk__xe_first_child(acl_top, NULL, NULL, NULL); role != NULL; role = pcmk__xe_next(role, NULL)) { const char *role_id = NULL; if (!pcmk__xe_is(role, PCMK_XE_ACL_ROLE)) { continue; } role_id = crm_element_value(role, PCMK_XA_ID); if (pcmk__str_eq(ref_role, role_id, pcmk__str_none)) { crm_trace("Unpacking referenced role '%s' in <%s> element", role_id, acl_entry->name); acls = parse_acl_entry(acl_top, role, acls); break; } } } } return acls; } /* */ static const char * acl_to_text(enum xml_private_flags flags) { if (pcmk_is_set(flags, pcmk__xf_acl_deny)) { return "deny"; } else if (pcmk_any_flags_set(flags, pcmk__xf_acl_write|pcmk__xf_acl_create)) { return "read/write"; } else if (pcmk_is_set(flags, pcmk__xf_acl_read)) { return "read"; } return "none"; } void pcmk__apply_acl(xmlNode *xml) { GList *aIter = NULL; xml_doc_private_t *docpriv = xml->doc->_private; xml_node_private_t *nodepriv; xmlXPathObject *xpathObj = NULL; if (!xml_acl_enabled(xml)) { crm_trace("Skipping ACLs for user '%s' because not enabled for this XML", docpriv->user); return; } for (aIter = docpriv->acls; aIter != NULL; aIter = aIter->next) { int max = 0, lpc = 0; xml_acl_t *acl = aIter->data; xpathObj = pcmk__xpath_search(xml->doc, acl->xpath); max = pcmk__xpath_num_results(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); if (match == NULL) { continue; } /* @COMPAT If the ACL's XPath matches a node that is neither an * element nor a document, we apply the ACL to the parent element * rather than to the matched node. For example, if the XPath * matches a "score" attribute, then it applies to every element * that contains a "score" attribute. That is, the XPath expression * "//@score" matches all attributes named "score", but we apply the * ACL to all elements containing such an attribute. * * This behavior is incorrect from an XPath standpoint and is thus * confusing and counterintuitive. The correct way to match all * elements containing a "score" attribute is to use an XPath * predicate: "// *[@score]". (Space inserted after slashes so that * GCC doesn't throw an error about nested comments.) * * Additionally, if an XPath expression matches the entire document * (for example, "/"), then the ACL applies to the document's root * element if it exists. * * These behaviors should be changed so that the ACL applies to the * nodes matched by the XPath expression, or so that it doesn't * apply at all if applying an ACL to an attribute doesn't make * sense. * * Unfortunately, we document in Pacemaker Explained that matching * attributes is a valid way to match elements: "Attributes may be * specified in the XPath to select particular elements, but the * permissions apply to the entire element." * * So we have to keep this behavior at least until a compatibility * break. Even then, it's not feasible in the general case to * transform such XPath expressions using XSLT. */ match = pcmk__xpath_match_element(match); if (match == NULL) { continue; } nodepriv = match->_private; pcmk__set_xml_flags(nodepriv, acl->mode); // Build a GString only if tracing is enabled pcmk__if_tracing( { GString *path = pcmk__element_xpath(match); crm_trace("Applying %s ACL to %s matched by %s", acl_to_text(acl->mode), path->str, acl->xpath); g_string_free(path, TRUE); }, {} ); } crm_trace("Applied %s ACL %s (%d match%s)", acl_to_text(acl->mode), acl->xpath, max, ((max == 1)? "" : "es")); - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } } /*! * \internal * \brief Unpack ACLs for a given user into the * metadata of the target XML tree * * Taking the description of ACLs from the source XML tree and * marking up the target XML tree with access information for the * given user by tacking it onto the relevant nodes * * \param[in] source XML with ACL definitions * \param[in,out] target XML that ACLs will be applied to * \param[in] user Username whose ACLs need to be unpacked */ void pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user) { xml_doc_private_t *docpriv = NULL; if ((target == NULL) || (target->doc == NULL) || (target->doc->_private == NULL)) { return; } docpriv = target->doc->_private; if (!pcmk_acl_required(user)) { crm_trace("Not unpacking ACLs because not required for user '%s'", user); } else if (docpriv->acls == NULL) { xmlNode *acls = get_xpath_object("//" PCMK_XE_ACLS, source, LOG_NEVER); pcmk__str_update(&docpriv->user, user); if (acls) { xmlNode *child = NULL; for (child = pcmk__xe_first_child(acls, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { if (pcmk__xe_is(child, PCMK_XE_ACL_TARGET)) { const char *id = crm_element_value(child, PCMK_XA_NAME); if (id == NULL) { id = crm_element_value(child, PCMK_XA_ID); } if (id && strcmp(id, user) == 0) { crm_debug("Unpacking ACLs for user '%s'", id); docpriv->acls = parse_acl_entry(acls, child, docpriv->acls); } } else if (pcmk__xe_is(child, PCMK_XE_ACL_GROUP)) { const char *id = crm_element_value(child, PCMK_XA_NAME); if (id == NULL) { id = crm_element_value(child, PCMK_XA_ID); } if (id && pcmk__is_user_in_group(user,id)) { crm_debug("Unpacking ACLs for group '%s'", id); docpriv->acls = parse_acl_entry(acls, child, docpriv->acls); } } } } } } /*! * \internal * \brief Copy source to target and set xf_acl_enabled flag in target * * \param[in] acl_source XML with ACL definitions * \param[in,out] target XML that ACLs will be applied to * \param[in] user Username whose ACLs need to be set */ void pcmk__enable_acl(xmlNode *acl_source, xmlNode *target, const char *user) { pcmk__unpack_acl(acl_source, target, user); pcmk__set_xml_doc_flag(target, pcmk__xf_acl_enabled); pcmk__apply_acl(target); } static inline bool test_acl_mode(enum xml_private_flags allowed, enum xml_private_flags requested) { if (pcmk_is_set(allowed, pcmk__xf_acl_deny)) { return false; } else if (pcmk_all_flags_set(allowed, requested)) { return true; } else if (pcmk_is_set(requested, pcmk__xf_acl_read) && pcmk_is_set(allowed, pcmk__xf_acl_write)) { return true; } else if (pcmk_is_set(requested, pcmk__xf_acl_create) && pcmk_any_flags_set(allowed, pcmk__xf_acl_write|pcmk__xf_created)) { return true; } return false; } /*! * \internal * \brief Rid XML tree of all unreadable nodes and node properties * * \param[in,out] xml Root XML node to be purged of attributes * * \return true if this node or any of its children are readable * if false is returned, xml will be freed * * \note This function is recursive */ static bool purge_xml_attributes(xmlNode *xml) { xmlNode *child = NULL; xmlAttr *xIter = NULL; bool readable_children = false; xml_node_private_t *nodepriv = xml->_private; if (test_acl_mode(nodepriv->flags, pcmk__xf_acl_read)) { crm_trace("%s[@" PCMK_XA_ID "=%s] is readable", xml->name, pcmk__xe_id(xml)); return true; } xIter = xml->properties; while (xIter != NULL) { xmlAttr *tmp = xIter; const char *prop_name = (const char *)xIter->name; xIter = xIter->next; if (strcmp(prop_name, PCMK_XA_ID) == 0) { continue; } pcmk__xa_remove(tmp, true); } child = pcmk__xml_first_child(xml); while ( child != NULL ) { xmlNode *tmp = child; child = pcmk__xml_next(child); readable_children |= purge_xml_attributes(tmp); } if (!readable_children) { // Nothing readable under here, so purge completely pcmk__xml_free(xml); } return readable_children; } /*! * \brief Copy ACL-allowed portions of specified XML * * \param[in] user Username whose ACLs should be used * \param[in] acl_source XML containing ACLs * \param[in] xml XML to be copied * \param[out] result Copy of XML portions readable via ACLs * * \return true if xml exists and ACLs are required for user, false otherwise * \note If this returns true, caller should use \p result rather than \p xml */ bool xml_acl_filtered_copy(const char *user, xmlNode *acl_source, xmlNode *xml, xmlNode **result) { GList *aIter = NULL; xmlNode *target = NULL; xml_doc_private_t *docpriv = NULL; *result = NULL; if ((xml == NULL) || !pcmk_acl_required(user)) { crm_trace("Not filtering XML because ACLs not required for user '%s'", user); return false; } crm_trace("Filtering XML copy using user '%s' ACLs", user); target = pcmk__xml_copy(NULL, xml); if (target == NULL) { return true; } pcmk__enable_acl(acl_source, target, user); docpriv = target->doc->_private; for(aIter = docpriv->acls; aIter != NULL && target; aIter = aIter->next) { int max = 0; xml_acl_t *acl = aIter->data; if (acl->mode != pcmk__xf_acl_deny) { /* Nothing to do */ } else if (acl->xpath) { int lpc = 0; xmlXPathObject *xpathObj = pcmk__xpath_search(target->doc, acl->xpath); max = pcmk__xpath_num_results(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); if (match == NULL) { continue; } // @COMPAT See COMPAT comment in pcmk__apply_acl() match = pcmk__xpath_match_element(match); if (match == NULL) { continue; } if (!purge_xml_attributes(match) && (match == target)) { crm_trace("ACLs deny user '%s' access to entire XML document", user); - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); return true; } } crm_trace("ACLs deny user '%s' access to %s (%d %s)", user, acl->xpath, max, pcmk__plural_alt(max, "match", "matches")); - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } } if (!purge_xml_attributes(target)) { crm_trace("ACLs deny user '%s' access to entire XML document", user); return true; } if (docpriv->acls) { g_list_free_full(docpriv->acls, free_acl); docpriv->acls = NULL; } else { crm_trace("User '%s' without ACLs denied access to entire XML document", user); pcmk__xml_free(target); target = NULL; } if (target) { *result = target; } return true; } /*! * \internal * \brief Check whether creation of an XML element is implicitly allowed * * Check whether XML is a "scaffolding" element whose creation is implicitly * allowed regardless of ACLs (that is, it is not in the ACL section and has * no attributes other than \c PCMK_XA_ID). * * \param[in] xml XML element to check * * \return true if XML element is implicitly allowed, false otherwise */ static bool implicitly_allowed(const xmlNode *xml) { GString *path = NULL; for (xmlAttr *prop = xml->properties; prop != NULL; prop = prop->next) { if (strcmp((const char *) prop->name, PCMK_XA_ID) != 0) { return false; } } path = pcmk__element_xpath(xml); pcmk__assert(path != NULL); if (strstr((const char *) path->str, "/" PCMK_XE_ACLS "/") != NULL) { g_string_free(path, TRUE); return false; } g_string_free(path, TRUE); return true; } #define display_id(xml) pcmk__s(pcmk__xe_id(xml), "") /*! * \internal * \brief Drop XML nodes created in violation of ACLs * * Given an XML element, free all of its descendant nodes created in violation * of ACLs, with the exception of allowing "scaffolding" elements (i.e. those * that aren't in the ACL section and don't have any attributes other than * \c PCMK_XA_ID). * * \param[in,out] xml XML to check * \param[in] check_top Whether to apply checks to argument itself * (if true, xml might get freed) * * \note This function is recursive */ void pcmk__apply_creation_acl(xmlNode *xml, bool check_top) { xml_node_private_t *nodepriv = xml->_private; if (pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { if (implicitly_allowed(xml)) { crm_trace("Creation of <%s> scaffolding with " PCMK_XA_ID "=\"%s\"" " is implicitly allowed", xml->name, display_id(xml)); } else if (pcmk__check_acl(xml, NULL, pcmk__xf_acl_write)) { crm_trace("ACLs allow creation of <%s> with " PCMK_XA_ID "=\"%s\"", xml->name, display_id(xml)); } else if (check_top) { /* is_root=true should be impossible with check_top=true, but check * for sanity */ bool is_root = (xmlDocGetRootElement(xml->doc) == xml); xml_doc_private_t *docpriv = xml->doc->_private; crm_trace("ACLs disallow creation of %s<%s> with " PCMK_XA_ID "=\"%s\"", (is_root? "root element " : ""), xml->name, display_id(xml)); // pcmk__xml_free() checks ACLs if enabled, which would fail pcmk__clear_xml_flags(docpriv, pcmk__xf_acl_enabled); pcmk__xml_free(xml); if (!is_root) { // If root, the document was freed. Otherwise re-enable ACLs. pcmk__set_xml_flags(docpriv, pcmk__xf_acl_enabled); } return; } else { crm_notice("ACLs would disallow creation of %s<%s> with " PCMK_XA_ID "=\"%s\"", ((xml == xmlDocGetRootElement(xml->doc))? "root element " : ""), xml->name, display_id(xml)); } } for (xmlNode *cIter = pcmk__xml_first_child(xml); cIter != NULL; ) { xmlNode *child = cIter; cIter = pcmk__xml_next(cIter); /* In case it is free'd */ pcmk__apply_creation_acl(child, true); } } /*! * \brief Check whether or not an XML node is ACL-denied * * \param[in] xml node to check * * \return true if XML node exists and is ACL-denied, false otherwise */ bool xml_acl_denied(const xmlNode *xml) { if (xml && xml->doc && xml->doc->_private){ xml_doc_private_t *docpriv = xml->doc->_private; return pcmk_is_set(docpriv->flags, pcmk__xf_acl_denied); } return false; } void xml_acl_disable(xmlNode *xml) { if (xml_acl_enabled(xml)) { xml_doc_private_t *docpriv = xml->doc->_private; /* Catch anything that was created but shouldn't have been */ pcmk__apply_acl(xml); pcmk__apply_creation_acl(xml, false); pcmk__clear_xml_flags(docpriv, pcmk__xf_acl_enabled); } } /*! * \brief Check whether or not an XML node is ACL-enabled * * \param[in] xml node to check * * \return true if XML node exists and is ACL-enabled, false otherwise */ bool xml_acl_enabled(const xmlNode *xml) { if (xml && xml->doc && xml->doc->_private){ xml_doc_private_t *docpriv = xml->doc->_private; return pcmk_is_set(docpriv->flags, pcmk__xf_acl_enabled); } return false; } bool pcmk__check_acl(xmlNode *xml, const char *name, enum xml_private_flags mode) { pcmk__assert((xml != NULL) && (xml->doc != NULL) && (xml->doc->_private != NULL)); if (pcmk__tracking_xml_changes(xml, false) && xml_acl_enabled(xml)) { xmlNode *parent = xml; xml_doc_private_t *docpriv = xml->doc->_private; GString *xpath = NULL; if (docpriv->acls == NULL) { pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_denied); pcmk__if_tracing({}, return false); xpath = pcmk__element_xpath(xml); if (name != NULL) { pcmk__g_strcat(xpath, "[@", name, "]", NULL); } qb_log_from_external_source(__func__, __FILE__, "User '%s' without ACLs denied %s " "access to %s", LOG_TRACE, __LINE__, 0, docpriv->user, acl_to_text(mode), (const char *) xpath->str); g_string_free(xpath, TRUE); return false; } /* Walk the tree upwards looking for xml_acl_* flags * - Creating an attribute requires write permissions for the node * - Creating a child requires write permissions for the parent */ if (name) { xmlAttr *attr = xmlHasProp(xml, (pcmkXmlStr) name); if (attr && mode == pcmk__xf_acl_create) { mode = pcmk__xf_acl_write; } } while (parent && parent->_private) { xml_node_private_t *nodepriv = parent->_private; if (test_acl_mode(nodepriv->flags, mode)) { return true; } else if (pcmk_is_set(nodepriv->flags, pcmk__xf_acl_deny)) { pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_denied); pcmk__if_tracing({}, return false); xpath = pcmk__element_xpath(xml); if (name != NULL) { pcmk__g_strcat(xpath, "[@", name, "]", NULL); } qb_log_from_external_source(__func__, __FILE__, "%sACL denies user '%s' %s access " "to %s", LOG_TRACE, __LINE__, 0, (parent != xml)? "Parent ": "", docpriv->user, acl_to_text(mode), (const char *) xpath->str); g_string_free(xpath, TRUE); return false; } parent = parent->parent; } pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_denied); pcmk__if_tracing({}, return false); xpath = pcmk__element_xpath(xml); if (name != NULL) { pcmk__g_strcat(xpath, "[@", name, "]", NULL); } qb_log_from_external_source(__func__, __FILE__, "Default ACL denies user '%s' %s access to " "%s", LOG_TRACE, __LINE__, 0, docpriv->user, acl_to_text(mode), (const char *) xpath->str); g_string_free(xpath, TRUE); return false; } return true; } /*! * \brief Check whether ACLs are required for a given user * * \param[in] User name to check * * \return true if the user requires ACLs, false otherwise */ bool pcmk_acl_required(const char *user) { if (pcmk__str_empty(user)) { crm_trace("ACLs not required because no user set"); return false; } else if (!strcmp(user, CRM_DAEMON_USER) || !strcmp(user, "root")) { crm_trace("ACLs not required for privileged user %s", user); return false; } crm_trace("ACLs required for %s", user); return true; } char * pcmk__uid2username(uid_t uid) { struct passwd *pwent = getpwuid(uid); if (pwent == NULL) { crm_perror(LOG_INFO, "Cannot get user details for user ID %d", uid); return NULL; } return pcmk__str_copy(pwent->pw_name); } /*! * \internal * \brief Set the ACL user field properly on an XML request * * Multiple user names are potentially involved in an XML request: the effective * user of the current process; the user name known from an IPC client * connection; and the user name obtained from the request itself, whether by * the current standard XML attribute name or an older legacy attribute name. * This function chooses the appropriate one that should be used for ACLs, sets * it in the request (using the standard attribute name, and the legacy name if * given), and returns it. * * \param[in,out] request XML request to update * \param[in] field Alternate name for ACL user name XML attribute * \param[in] peer_user User name as known from IPC connection * * \return ACL user name actually used */ const char * pcmk__update_acl_user(xmlNode *request, const char *field, const char *peer_user) { static const char *effective_user = NULL; const char *requested_user = NULL; const char *user = NULL; if (effective_user == NULL) { effective_user = pcmk__uid2username(geteuid()); if (effective_user == NULL) { effective_user = pcmk__str_copy("#unprivileged"); crm_err("Unable to determine effective user, assuming unprivileged for ACLs"); } } requested_user = crm_element_value(request, PCMK__XA_ACL_TARGET); if (requested_user == NULL) { /* Currently, different XML attribute names are used for the ACL user in * different contexts (PCMK__XA_ATTR_USER, PCMK__XA_CIB_USER, etc.). * The caller may specify that name as the field argument. * * @TODO Standardize on PCMK__XA_ACL_TARGET and eventually drop the * others once rolling upgrades from versions older than that are no * longer supported. */ requested_user = crm_element_value(request, field); } if (!pcmk__is_privileged(effective_user)) { /* We're not running as a privileged user, set or overwrite any existing * value for PCMK__XA_ACL_TARGET */ user = effective_user; } else if (peer_user == NULL && requested_user == NULL) { /* No user known or requested, use 'effective_user' and make sure one is * set for the request */ user = effective_user; } else if (peer_user == NULL) { /* No user known, trusting 'requested_user' */ user = requested_user; } else if (!pcmk__is_privileged(peer_user)) { /* The peer is not a privileged user, set or overwrite any existing * value for PCMK__XA_ACL_TARGET */ user = peer_user; } else if (requested_user == NULL) { /* Even if we're privileged, make sure there is always a value set */ user = peer_user; } else { /* Legal delegation to 'requested_user' */ user = requested_user; } // This requires pointer comparison, not string comparison if (user != crm_element_value(request, PCMK__XA_ACL_TARGET)) { crm_xml_add(request, PCMK__XA_ACL_TARGET, user); } if (field != NULL && user != crm_element_value(request, field)) { crm_xml_add(request, field, user); } return requested_user; } diff --git a/lib/common/xpath.c b/lib/common/xpath.c index 9fe1848146..671dac14a0 100644 --- a/lib/common/xpath.c +++ b/lib/common/xpath.c @@ -1,510 +1,510 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "crmcommon_private.h" #include // xmlXPathObject, etc. /* * From xpath2.c * * All the elements returned by an XPath query are pointers to * elements from the tree *except* namespace nodes where the XPath * semantic is different from the implementation in libxml2 tree. * As a result when a returned node set is freed when * xmlXPathFreeObject() is called, that routine must check the * element type. But node from the returned set may have been removed * by xmlNodeSetContent() resulting in access to freed data. * * This can be exercised by running * valgrind xpath2 test3.xml '//discarded' discarded * * There is 2 ways around it: * - make a copy of the pointers to the nodes from the result set * then call xmlXPathFreeObject() and then modify the nodes * or * - remove the references from the node set, if they are not namespace nodes, before calling xmlXPathFreeObject(). */ void freeXpathObject(xmlXPathObjectPtr xpathObj) { int max = pcmk__xpath_num_results(xpathObj); if (xpathObj == NULL) { return; } for (int lpc = 0; lpc < max; lpc++) { if (xpathObj->nodesetval->nodeTab[lpc] && xpathObj->nodesetval->nodeTab[lpc]->type != XML_NAMESPACE_DECL) { xpathObj->nodesetval->nodeTab[lpc] = NULL; } } /* _Now_ it's safe to free it */ xmlXPathFreeObject(xpathObj); } /*! * \internal * \brief Get a node from the result set of evaluating an XPath expression * * Evaluating an XPath expression stores the list of matching nodes in an * \c xmlXPathObject. This function gets the node at a particular index within * that list. * * \param[in,out] xpath_obj XPath object containing result nodes * \param[in] index Index of result node to get * * \return Result node at the given index if possible, or \c NULL otherwise * * \note This has a side effect: it sets the result node at \p index to NULL * within \p xpath_obj, so the result at a given index can be retrieved * only once. This is a workaround to prevent a use-after-free error. * * All elements returned by an XPath query are pointers to elements from * the tree, except namespace nodes (which are allocated separately for * the XPath object's node set). Accordingly, only namespace nodes and the * node set itself are freed when libxml2 frees a node set. * * This logic requires checking the type of every node in the node set. * However, a node may have been freed already while processing an XPath * object -- either directly (for example, with \c xmlFreeNode()) or * indirectly (for example, with \c xmlNodeSetContent()). In that case, * checking the freed node's type while freeing the XPath object is a * use-after-free error. * * To reduce the likelihood of this, when we access a node in the XPath * object, we remove it from the XPath object's node set by setting it to * \c NULL. This approach is adapted from \c xpath2.c in libxml2's * examples. That file also describes a way to reproduce the * use-after-free error. * * However, there are still ways that a use-after-free can occur. For * example, freeing the entire XML tree before freeing an XPath object * that contains pointers to it would be an error. It's dangerous to mix * processing XPath search results with modifications to a tree, and it * must be done with care. */ xmlNode * pcmk__xpath_result(xmlXPathObject *xpath_obj, int index) { xmlNode *match = NULL; CRM_CHECK((xpath_obj != NULL) && (index >= 0), return NULL); match = xmlXPathNodeSetItem(xpath_obj->nodesetval, index); if (match == NULL) { // Previously requested or out of range return NULL; } if (match->type != XML_NAMESPACE_DECL) { xpath_obj->nodesetval->nodeTab[index] = NULL; } return match; } /*! * \internal * \brief Get an element node corresponding to an XPath match node * * Each node in an XPath object's result node set may be of an arbitrary type. * This function is guaranteed to return an element node (or \c NULL). * * \param[in] match XML node that matched some XPath expression * * \retval \p match if \p match is an element * \retval Root element of \p match if \p match is a document * \retval match->parent if \p match is not an element but its parent * is an element * \retval \c NULL otherwise * * \todo Phase this out. Code that relies on this behavior is likely buggy. */ xmlNode * pcmk__xpath_match_element(xmlNode *match) { pcmk__assert(match != NULL); switch (match->type) { case XML_ELEMENT_NODE: return match; case XML_DOCUMENT_NODE: // Happens if XPath expression is "/"; return root element instead return xmlDocGetRootElement((xmlDoc *) match); default: if ((match->parent != NULL) && (match->parent->type == XML_ELEMENT_NODE)) { // Probably an attribute; return parent element instead return match->parent; } crm_err("Cannot get element from XPath expression match of type %s", pcmk__xml_element_type_text(match->type)); return NULL; } } void dedupXpathResults(xmlXPathObjectPtr xpathObj) { int max = pcmk__xpath_num_results(xpathObj); if (xpathObj == NULL) { return; } for (int lpc = 0; lpc < max; lpc++) { xmlNode *xml = NULL; gboolean dedup = FALSE; if (xpathObj->nodesetval->nodeTab[lpc] == NULL) { continue; } xml = xpathObj->nodesetval->nodeTab[lpc]->parent; for (; xml; xml = xml->parent) { int lpc2 = 0; for (lpc2 = 0; lpc2 < max; lpc2++) { if (xpathObj->nodesetval->nodeTab[lpc2] == xml) { xpathObj->nodesetval->nodeTab[lpc] = NULL; dedup = TRUE; break; } } if (dedup) { break; } } } } /*! * \internal * \brief Search an XML document using an XPath expression * * \param[in] doc XML document to search * \param[in] path XPath expression to evaluate in the context of \p doc * * \return XPath object containing result of evaluating \p path against \p doc */ xmlXPathObject * pcmk__xpath_search(xmlDoc *doc, const char *path) { pcmkXmlStr xpath_expr = (pcmkXmlStr) path; xmlXPathContext *xpath_context = NULL; xmlXPathObject *xpath_obj = NULL; CRM_CHECK((doc != NULL) && !pcmk__str_empty(path), return NULL); xpath_context = xmlXPathNewContext(doc); pcmk__mem_assert(xpath_context); xpath_obj = xmlXPathEval(xpath_expr, xpath_context); xmlXPathFreeContext(xpath_context); return xpath_obj; } /*! * \brief Run a supplied function for each result of an xpath search * * \param[in,out] xml XML to search * \param[in] xpath XPath search string * \param[in] helper Function to call for each result * \param[in,out] user_data Data to pass to supplied function * * \note The helper function will be passed the XML node of the result, * and the supplied user_data. This function does not otherwise * use user_data. */ void crm_foreach_xpath_result(xmlNode *xml, const char *xpath, void (*helper)(xmlNode*, void*), void *user_data) { xmlXPathObject *xpathObj = NULL; int nresults = 0; CRM_CHECK(xml != NULL, return); xpathObj = pcmk__xpath_search(xml->doc, xpath); nresults = pcmk__xpath_num_results(xpathObj); for (int i = 0; i < nresults; i++) { xmlNode *result = pcmk__xpath_result(xpathObj, i); CRM_LOG_ASSERT(result != NULL); if (result != NULL) { result = pcmk__xpath_match_element(result); CRM_LOG_ASSERT(result != NULL); if (result != NULL) { (*helper)(result, user_data); } } } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } xmlNode * get_xpath_object(const char *xpath, xmlNode * xml_obj, int error_level) { int max; xmlNode *result = NULL; xmlXPathObject *xpathObj = NULL; char *nodePath = NULL; char *matchNodePath = NULL; if (xpath == NULL) { return xml_obj; /* or return NULL? */ } xpathObj = pcmk__xpath_search(xml_obj->doc, xpath); nodePath = (char *)xmlGetNodePath(xml_obj); max = pcmk__xpath_num_results(xpathObj); if (max < 1) { if (error_level < LOG_NEVER) { do_crm_log(error_level, "No match for %s in %s", xpath, pcmk__s(nodePath, "unknown path")); crm_log_xml_explicit(xml_obj, "Unexpected Input"); } } else if (max > 1) { if (error_level < LOG_NEVER) { int lpc = 0; do_crm_log(error_level, "Too many matches for %s in %s", xpath, pcmk__s(nodePath, "unknown path")); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if (match != NULL) { match = pcmk__xpath_match_element(match); CRM_LOG_ASSERT(match != NULL); if (match != NULL) { matchNodePath = (char *) xmlGetNodePath(match); do_crm_log(error_level, "%s[%d] = %s", xpath, lpc, pcmk__s(matchNodePath, "unrecognizable match")); free(matchNodePath); } } } crm_log_xml_explicit(xml_obj, "Bad Input"); } } else { result = pcmk__xpath_result(xpathObj, 0); if (result != NULL) { result = pcmk__xpath_match_element(result); } } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); free(nodePath); return result; } /*! * \internal * \brief Get an XPath string that matches an XML element as closely as possible * * \param[in] xml The XML element for which to build an XPath string * * \return A \p GString that matches \p xml, or \p NULL if \p xml is \p NULL. * * \note The caller is responsible for freeing the string using * \p g_string_free(). */ GString * pcmk__element_xpath(const xmlNode *xml) { const xmlNode *parent = NULL; GString *xpath = NULL; const char *id = NULL; if (xml == NULL) { return NULL; } parent = xml->parent; xpath = pcmk__element_xpath(parent); if (xpath == NULL) { xpath = g_string_sized_new(256); } // Build xpath like "/" -> "/cib" -> "/cib/configuration" if (parent == NULL) { g_string_append_c(xpath, '/'); } else if (parent->parent == NULL) { g_string_append(xpath, (const gchar *) xml->name); } else { pcmk__g_strcat(xpath, "/", (const char *) xml->name, NULL); } id = pcmk__xe_id(xml); if (id != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", id, "']", NULL); } return xpath; } /*! * \internal * \brief Extract the ID attribute from an XML element * * \param[in] xpath String to search * \param[in] node Node to get the ID for * * \return ID attribute of \p node in xpath string \p xpath */ char * pcmk__xpath_node_id(const char *xpath, const char *node) { char *retval = NULL; char *patt = NULL; char *start = NULL; char *end = NULL; if (node == NULL || xpath == NULL) { return retval; } patt = crm_strdup_printf("/%s[@" PCMK_XA_ID "=", node); start = strstr(xpath, patt); if (!start) { free(patt); return retval; } start += strlen(patt); start++; end = strstr(start, "\'"); pcmk__assert(end != NULL); retval = strndup(start, end-start); free(patt); return retval; } static int output_attr_child(xmlNode *child, void *userdata) { pcmk__output_t *out = userdata; out->info(out, " Value: %s \t(id=%s)", crm_element_value(child, PCMK_XA_VALUE), pcmk__s(pcmk__xe_id(child), "")); return pcmk_rc_ok; } /*! * \internal * \brief Warn if an XPath query returned multiple nodes with the same ID * * \param[in,out] out Output object * \param[in] search XPath search result, most typically the result of * calling cib->cmds->query(). * \param[in] name Name searched for */ void pcmk__warn_multiple_name_matches(pcmk__output_t *out, xmlNode *search, const char *name) { if (out == NULL || name == NULL || search == NULL || search->children == NULL) { return; } out->info(out, "Multiple attributes match " PCMK_XA_NAME "=%s", name); pcmk__xe_foreach_child(search, NULL, output_attr_child, out); } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include xmlXPathObjectPtr xpath_search(const xmlNode *xml_top, const char *path) { CRM_CHECK(xml_top != NULL, return NULL); return pcmk__xpath_search(xml_top->doc, path); } xmlNode * getXpathResult(xmlXPathObjectPtr xpathObj, int index) { xmlNode *match = NULL; int max = pcmk__xpath_num_results(xpathObj); CRM_CHECK(index >= 0, return NULL); CRM_CHECK(xpathObj != NULL, return NULL); if (index >= max) { crm_err("Requested index %d of only %d items", index, max); return NULL; } else if(xpathObj->nodesetval->nodeTab[index] == NULL) { /* Previously requested */ return NULL; } match = xpathObj->nodesetval->nodeTab[index]; CRM_CHECK(match != NULL, return NULL); if (xpathObj->nodesetval->nodeTab[index]->type != XML_NAMESPACE_DECL) { // See the comment for pcmk__xpath_result() xpathObj->nodesetval->nodeTab[index] = NULL; } switch (match->type) { case XML_ELEMENT_NODE: return match; case XML_DOCUMENT_NODE: // Searched for '/' return match->children; default: if ((match->parent != NULL) && (match->parent->type == XML_ELEMENT_NODE)) { return match->parent; } crm_warn("Unsupported XPath match type %d (bug?)", match->type); return NULL; } } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c index e43908754a..08d1e71427 100644 --- a/lib/fencing/st_client.c +++ b/lib/fencing/st_client.c @@ -1,2731 +1,2731 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include "fencing_private.h" CRM_TRACE_INIT_DATA(stonith); // Used as stonith_t:st_private typedef struct stonith_private_s { char *token; crm_ipc_t *ipc; mainloop_io_t *source; GHashTable *stonith_op_callback_table; GList *notify_list; int notify_refcnt; bool notify_deletes; void (*op_callback) (stonith_t * st, stonith_callback_data_t * data); } stonith_private_t; // Used as stonith_event_t:opaque struct event_private { pcmk__action_result_t result; }; typedef struct stonith_notify_client_s { const char *event; const char *obj_id; /* implement one day */ const char *obj_type; /* implement one day */ void (*notify) (stonith_t * st, stonith_event_t * e); bool delete; } stonith_notify_client_t; typedef struct stonith_callback_client_s { void (*callback) (stonith_t * st, stonith_callback_data_t * data); const char *id; void *user_data; gboolean only_success; gboolean allow_timeout_updates; struct timer_rec_s *timer; } stonith_callback_client_t; struct notify_blob_s { stonith_t *stonith; xmlNode *xml; }; struct timer_rec_s { int call_id; int timeout; guint ref; stonith_t *stonith; }; typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); bool stonith_dispatch(stonith_t * st); xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static int stonith_send_command(stonith_t *stonith, const char *op, xmlNode *data, xmlNode **output_data, int call_options, int timeout); static void stonith_connection_destroy(gpointer user_data); static void stonith_send_notification(gpointer data, gpointer user_data); static int stonith_api_del_notification(stonith_t *stonith, const char *event); /*! * \brief Get agent namespace by name * * \param[in] namespace_s Name of namespace as string * * \return Namespace as enum value */ enum stonith_namespace stonith_text2namespace(const char *namespace_s) { if (pcmk__str_eq(namespace_s, "any", pcmk__str_null_matches)) { return st_namespace_any; } else if (!strcmp(namespace_s, "redhat") || !strcmp(namespace_s, "stonith-ng")) { return st_namespace_rhcs; } else if (!strcmp(namespace_s, "internal")) { return st_namespace_internal; } else if (!strcmp(namespace_s, "heartbeat")) { return st_namespace_lha; } return st_namespace_invalid; } /*! * \brief Get agent namespace name * * \param[in] namespace Namespace as enum value * * \return Namespace name as string */ const char * stonith_namespace2text(enum stonith_namespace st_namespace) { switch (st_namespace) { case st_namespace_any: return "any"; case st_namespace_rhcs: return "stonith-ng"; case st_namespace_internal: return "internal"; case st_namespace_lha: return "heartbeat"; default: break; } return "unsupported"; } /*! * \brief Determine namespace of a fence agent * * \param[in] agent Fence agent type * \param[in] namespace_s Name of agent namespace as string, if known * * \return Namespace of specified agent, as enum value */ enum stonith_namespace stonith_get_namespace(const char *agent, const char *namespace_s) { if (pcmk__str_eq(namespace_s, "internal", pcmk__str_none)) { return st_namespace_internal; } if (stonith__agent_is_rhcs(agent)) { return st_namespace_rhcs; } #if HAVE_STONITH_STONITH_H if (stonith__agent_is_lha(agent)) { return st_namespace_lha; } #endif return st_namespace_invalid; } gboolean stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node) { gboolean rv = FALSE; stonith_t *stonith_api = st?st:stonith_api_new(); char *list = NULL; if(stonith_api) { if (stonith_api->state == stonith_disconnected) { int rc = stonith_api->cmds->connect(stonith_api, "stonith-api", NULL); if (rc != pcmk_ok) { crm_err("Failed connecting to Stonith-API for watchdog-fencing-query."); } } if (stonith_api->state != stonith_disconnected) { /* caveat!!! * this might fail when when stonithd is just updating the device-list * probably something we should fix as well for other api-calls */ int rc = stonith_api->cmds->list(stonith_api, st_opt_sync_call, STONITH_WATCHDOG_ID, &list, 0); if ((rc != pcmk_ok) || (list == NULL)) { /* due to the race described above it can happen that * we drop in here - so as not to make remote nodes * panic on that answer */ if (rc == -ENODEV) { crm_notice("Cluster does not have watchdog fencing device"); } else { crm_warn("Could not check for watchdog fencing device: %s", pcmk_strerror(rc)); } } else if (list[0] == '\0') { rv = TRUE; } else { GList *targets = stonith__parse_targets(list); rv = pcmk__str_in_list(node, targets, pcmk__str_casei); g_list_free_full(targets, free); } free(list); if (!st) { /* if we're provided the api we still might have done the * connection - but let's assume the caller won't bother */ stonith_api->cmds->disconnect(stonith_api); } } if (!st) { stonith_api_delete(stonith_api); } } else { crm_err("Stonith-API for watchdog-fencing-query couldn't be created."); } crm_trace("Pacemaker assumes node %s %sto do watchdog-fencing.", node, rv?"":"not "); return rv; } gboolean stonith__watchdog_fencing_enabled_for_node(const char *node) { return stonith__watchdog_fencing_enabled_for_node_api(NULL, node); } /* when cycling through the list we don't want to delete items so just mark them and when we know nobody is using the list loop over it to remove the marked items */ static void foreach_notify_entry (stonith_private_t *private, GFunc func, gpointer user_data) { private->notify_refcnt++; g_list_foreach(private->notify_list, func, user_data); private->notify_refcnt--; if ((private->notify_refcnt == 0) && private->notify_deletes) { GList *list_item = private->notify_list; private->notify_deletes = FALSE; while (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; GList *next = g_list_next(list_item); if (list_client->delete) { free(list_client); private->notify_list = g_list_delete_link(private->notify_list, list_item); } list_item = next; } } } static void stonith_connection_destroy(gpointer user_data) { stonith_t *stonith = user_data; stonith_private_t *native = NULL; struct notify_blob_s blob; crm_trace("Sending destroyed notification"); blob.stonith = stonith; blob.xml = pcmk__xe_create(NULL, PCMK__XE_NOTIFY); native = stonith->st_private; native->ipc = NULL; native->source = NULL; free(native->token); native->token = NULL; stonith->state = stonith_disconnected; crm_xml_add(blob.xml, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); crm_xml_add(blob.xml, PCMK__XA_SUBT, PCMK__VALUE_ST_NOTIFY_DISCONNECT); foreach_notify_entry(native, stonith_send_notification, &blob); pcmk__xml_free(blob.xml); } xmlNode * create_device_registration_xml(const char *id, enum stonith_namespace standard, const char *agent, const stonith_key_value_t *params, const char *rsc_provides) { xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES); #if HAVE_STONITH_STONITH_H if (standard == st_namespace_any) { standard = stonith_get_namespace(agent, NULL); } if (standard == st_namespace_lha) { hash2field((gpointer) "plugin", (gpointer) agent, args); agent = "fence_legacy"; } #endif crm_xml_add(data, PCMK_XA_ID, id); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK_XA_AGENT, agent); if ((standard != st_namespace_any) && (standard != st_namespace_invalid)) { crm_xml_add(data, PCMK__XA_NAMESPACE, stonith_namespace2text(standard)); } if (rsc_provides) { crm_xml_add(data, PCMK__XA_RSC_PROVIDES, rsc_provides); } for (; params; params = params->next) { hash2field((gpointer) params->key, (gpointer) params->value, args); } return data; } static int stonith_api_register_device(stonith_t *st, int call_options, const char *id, const char *namespace_s, const char *agent, const stonith_key_value_t *params) { int rc = 0; xmlNode *data = NULL; data = create_device_registration_xml(id, stonith_text2namespace(namespace_s), agent, params, NULL); rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_remove_device(stonith_t * st, int call_options, const char *name) { int rc = 0; xmlNode *data = NULL; data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK_XA_ID, name); rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_remove_level_full(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level) { int rc = 0; xmlNode *data = NULL; CRM_CHECK(node || pattern || (attr && value), return -EINVAL); data = pcmk__xe_create(NULL, PCMK_XE_FENCING_LEVEL); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); if (node) { crm_xml_add(data, PCMK_XA_TARGET, node); } else if (pattern) { crm_xml_add(data, PCMK_XA_TARGET_PATTERN, pattern); } else { crm_xml_add(data, PCMK_XA_TARGET_ATTRIBUTE, attr); crm_xml_add(data, PCMK_XA_TARGET_VALUE, value); } crm_xml_add_int(data, PCMK_XA_INDEX, level); rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_remove_level(stonith_t * st, int options, const char *node, int level) { return stonith_api_remove_level_full(st, options, node, NULL, NULL, NULL, level); } /*! * \internal * \brief Create XML for fence topology level registration request * * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to register * \param[in] device_list List of devices in level * * \return Newly allocated XML tree on success, NULL otherwise * * \note The caller should set only one of node, pattern or attr/value. */ xmlNode * create_level_registration_xml(const char *node, const char *pattern, const char *attr, const char *value, int level, const stonith_key_value_t *device_list) { GString *list = NULL; xmlNode *data; CRM_CHECK(node || pattern || (attr && value), return NULL); data = pcmk__xe_create(NULL, PCMK_XE_FENCING_LEVEL); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add_int(data, PCMK_XA_ID, level); crm_xml_add_int(data, PCMK_XA_INDEX, level); if (node) { crm_xml_add(data, PCMK_XA_TARGET, node); } else if (pattern) { crm_xml_add(data, PCMK_XA_TARGET_PATTERN, pattern); } else { crm_xml_add(data, PCMK_XA_TARGET_ATTRIBUTE, attr); crm_xml_add(data, PCMK_XA_TARGET_VALUE, value); } for (; device_list; device_list = device_list->next) { pcmk__add_separated_word(&list, 1024, device_list->value, ","); } if (list != NULL) { crm_xml_add(data, PCMK_XA_DEVICES, (const char *) list->str); g_string_free(list, TRUE); } return data; } static int stonith_api_register_level_full(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, const stonith_key_value_t *device_list) { int rc = 0; xmlNode *data = create_level_registration_xml(node, pattern, attr, value, level, device_list); CRM_CHECK(data != NULL, return -EINVAL); rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_register_level(stonith_t * st, int options, const char *node, int level, const stonith_key_value_t * device_list) { return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL, level, device_list); } static int stonith_api_device_list(stonith_t *stonith, int call_options, const char *namespace_s, stonith_key_value_t **devices, int timeout) { int count = 0; enum stonith_namespace ns = stonith_text2namespace(namespace_s); if (devices == NULL) { crm_err("Parameter error: stonith_api_device_list"); return -EFAULT; } #if HAVE_STONITH_STONITH_H // Include Linux-HA agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_lha)) { count += stonith__list_lha_agents(devices); } #endif // Include Red Hat agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) { count += stonith__list_rhcs_agents(devices); } return count; } // See stonith_api_operations_t:metadata() documentation static int stonith_api_device_metadata(stonith_t *stonith, int call_options, const char *agent, const char *namespace_s, char **output, int timeout_sec) { /* By executing meta-data directly, we can get it from stonith_admin when * the cluster is not running, which is important for higher-level tools. */ enum stonith_namespace ns = stonith_get_namespace(agent, namespace_s); if (timeout_sec <= 0) { timeout_sec = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } crm_trace("Looking up metadata for %s agent %s", stonith_namespace2text(ns), agent); switch (ns) { case st_namespace_rhcs: return stonith__rhcs_metadata(agent, timeout_sec, output); #if HAVE_STONITH_STONITH_H case st_namespace_lha: return stonith__lha_metadata(agent, timeout_sec, output); #endif default: crm_err("Can't get fence agent '%s' meta-data: No such agent", agent); break; } return -ENODEV; } static int stonith_api_query(stonith_t * stonith, int call_options, const char *target, stonith_key_value_t ** devices, int timeout) { int rc = 0, lpc = 0, max = 0; xmlNode *data = NULL; xmlNode *output = NULL; xmlXPathObject *xpathObj = NULL; CRM_CHECK(devices != NULL, return -EINVAL); data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_ST_TARGET, target); crm_xml_add(data, PCMK__XA_ST_DEVICE_ACTION, PCMK_ACTION_OFF); rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout); if (rc < 0) { return rc; } xpathObj = pcmk__xpath_search(output->doc, "//*[@" PCMK_XA_AGENT "]"); if (xpathObj) { max = pcmk__xpath_num_results(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { xmlChar *match_path = xmlGetNodePath(match); crm_info("//*[@" PCMK_XA_AGENT "][%d] = %s", lpc, match_path); free(match_path); *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, PCMK_XA_ID)); } } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); } pcmk__xml_free(output); pcmk__xml_free(data); return max; } /*! * \internal * \brief Make a STONITH_OP_EXEC request * * \param[in,out] stonith Fencer connection * \param[in] call_options Bitmask of \c stonith_call_options * \param[in] id Fence device ID that request is for * \param[in] action Agent action to request (list, status, monitor) * \param[in] target Name of target node for requested action * \param[in] timeout_sec Error if not completed within this many seconds * \param[out] output Where to set agent output */ static int stonith_api_call(stonith_t *stonith, int call_options, const char *id, const char *action, const char *target, int timeout_sec, xmlNode **output) { int rc = 0; xmlNode *data = NULL; data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_ST_DEVICE_ID, id); crm_xml_add(data, PCMK__XA_ST_DEVICE_ACTION, action); crm_xml_add(data, PCMK__XA_ST_TARGET, target); rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout_sec); pcmk__xml_free(data); return rc; } static int stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info, int timeout) { int rc; xmlNode *output = NULL; rc = stonith_api_call(stonith, call_options, id, PCMK_ACTION_LIST, NULL, timeout, &output); if (output && list_info) { const char *list_str; list_str = crm_element_value(output, PCMK__XA_ST_OUTPUT); if (list_str) { *list_info = strdup(list_str); } } if (output) { pcmk__xml_free(output); } return rc; } static int stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout) { return stonith_api_call(stonith, call_options, id, PCMK_ACTION_MONITOR, NULL, timeout, NULL); } static int stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port, int timeout) { return stonith_api_call(stonith, call_options, id, PCMK_ACTION_STATUS, port, timeout, NULL); } static int stonith_api_fence_with_delay(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance, int delay) { int rc = 0; xmlNode *data = NULL; data = pcmk__xe_create(NULL, __func__); crm_xml_add(data, PCMK__XA_ST_TARGET, node); crm_xml_add(data, PCMK__XA_ST_DEVICE_ACTION, action); crm_xml_add_int(data, PCMK__XA_ST_TIMEOUT, timeout); crm_xml_add_int(data, PCMK__XA_ST_TOLERANCE, tolerance); crm_xml_add_int(data, PCMK__XA_ST_DELAY, delay); rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout); pcmk__xml_free(data); return rc; } static int stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance) { return stonith_api_fence_with_delay(stonith, call_options, node, action, timeout, tolerance, 0); } static int stonith_api_confirm(stonith_t * stonith, int call_options, const char *target) { stonith__set_call_options(call_options, target, st_opt_manual_ack); return stonith_api_fence(stonith, call_options, target, PCMK_ACTION_OFF, 0, 0); } static int stonith_api_history(stonith_t * stonith, int call_options, const char *node, stonith_history_t ** history, int timeout) { int rc = 0; xmlNode *data = NULL; xmlNode *output = NULL; stonith_history_t *last = NULL; *history = NULL; if (node) { data = pcmk__xe_create(NULL, __func__); crm_xml_add(data, PCMK__XA_ST_TARGET, node); } stonith__set_call_options(call_options, node, st_opt_sync_call); rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output, call_options, timeout); pcmk__xml_free(data); if (rc == 0) { xmlNode *op = NULL; xmlNode *reply = get_xpath_object("//" PCMK__XE_ST_HISTORY, output, LOG_NEVER); for (op = pcmk__xe_first_child(reply, NULL, NULL, NULL); op != NULL; op = pcmk__xe_next(op, NULL)) { stonith_history_t *kvp; long long completed; long long completed_nsec = 0L; kvp = pcmk__assert_alloc(1, sizeof(stonith_history_t)); kvp->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET); kvp->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION); kvp->origin = crm_element_value_copy(op, PCMK__XA_ST_ORIGIN); kvp->delegate = crm_element_value_copy(op, PCMK__XA_ST_DELEGATE); kvp->client = crm_element_value_copy(op, PCMK__XA_ST_CLIENTNAME); crm_element_value_ll(op, PCMK__XA_ST_DATE, &completed); kvp->completed = (time_t) completed; crm_element_value_ll(op, PCMK__XA_ST_DATE_NSEC, &completed_nsec); kvp->completed_nsec = completed_nsec; crm_element_value_int(op, PCMK__XA_ST_STATE, &kvp->state); kvp->exit_reason = crm_element_value_copy(op, PCMK_XA_EXIT_REASON); if (last) { last->next = kvp; } else { *history = kvp; } last = kvp; } } pcmk__xml_free(output); return rc; } void stonith_history_free(stonith_history_t *history) { stonith_history_t *hp, *hp_old; for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) { free(hp->target); free(hp->action); free(hp->origin); free(hp->delegate); free(hp->client); free(hp->exit_reason); } } static gint stonithlib_GCompareFunc(gconstpointer a, gconstpointer b) { int rc = 0; const stonith_notify_client_t *a_client = a; const stonith_notify_client_t *b_client = b; if (a_client->delete || b_client->delete) { /* make entries marked for deletion not findable */ return -1; } CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0); rc = strcmp(a_client->event, b_client->event); if (rc == 0) { if (a_client->notify == NULL || b_client->notify == NULL) { return 0; } else if (a_client->notify == b_client->notify) { return 0; } else if (((long)a_client->notify) < ((long)b_client->notify)) { crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return -1; } crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return 1; } return rc; } xmlNode * stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options) { xmlNode *op_msg = NULL; CRM_CHECK(token != NULL, return NULL); op_msg = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); crm_xml_add(op_msg, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(op_msg, PCMK__XA_ST_OP, op); crm_xml_add_int(op_msg, PCMK__XA_ST_CALLID, call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, PCMK__XA_ST_CALLOPT, call_options); if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(op_msg, PCMK__XE_ST_CALLDATA); pcmk__xml_copy(wrapper, data); } return op_msg; } static void stonith_destroy_op_callback(gpointer data) { stonith_callback_client_t *blob = data; if (blob->timer && blob->timer->ref > 0) { g_source_remove(blob->timer->ref); } free(blob->timer); free(blob); } static int stonith_api_signoff(stonith_t * stonith) { stonith_private_t *native = stonith->st_private; crm_debug("Disconnecting from the fencer"); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } free(native->token); native->token = NULL; stonith->state = stonith_disconnected; return pcmk_ok; } static int stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks) { stonith_private_t *private = stonith->st_private; if (all_callbacks) { private->op_callback = NULL; g_hash_table_destroy(private->stonith_op_callback_table); private->stonith_op_callback_table = pcmk__intkey_table(stonith_destroy_op_callback); } else if (call_id == 0) { private->op_callback = NULL; } else { pcmk__intkey_table_remove(private->stonith_op_callback_table, call_id); } return pcmk_ok; } /*! * \internal * \brief Invoke a (single) specified fence action callback * * \param[in,out] st Fencer API connection * \param[in] call_id If positive, call ID of completed fence action, * otherwise legacy return code for early failure * \param[in,out] result Full result for action * \param[in,out] userdata User data to pass to callback * \param[in] callback Fence action callback to invoke */ static void invoke_fence_action_callback(stonith_t *st, int call_id, pcmk__action_result_t *result, void *userdata, void (*callback) (stonith_t *st, stonith_callback_data_t *data)) { stonith_callback_data_t data = { 0, }; data.call_id = call_id; data.rc = pcmk_rc2legacy(stonith__result2rc(result)); data.userdata = userdata; data.opaque = (void *) result; callback(st, &data); } /*! * \internal * \brief Invoke any callbacks registered for a specified fence action result * * Given a fence action result from the fencer, invoke any callback registered * for that action, as well as any global callback registered. * * \param[in,out] stonith Fencer API connection * \param[in] msg If non-NULL, fencer reply * \param[in] call_id If \p msg is NULL, call ID of action that timed out */ static void invoke_registered_callbacks(stonith_t *stonith, const xmlNode *msg, int call_id) { stonith_private_t *private = NULL; stonith_callback_client_t *cb_info = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(stonith != NULL, return); CRM_CHECK(stonith->st_private != NULL, return); private = stonith->st_private; if (msg == NULL) { // Fencer didn't reply in time pcmk__set_result(&result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, "Fencer accepted request but did not reply in time"); CRM_LOG_ASSERT(call_id > 0); } else { // We have the fencer reply if ((crm_element_value_int(msg, PCMK__XA_ST_CALLID, &call_id) != 0) || (call_id <= 0)) { crm_log_xml_warn(msg, "Bad fencer reply"); } stonith__xe_get_result(msg, &result); } if (call_id > 0) { cb_info = pcmk__intkey_table_lookup(private->stonith_op_callback_table, call_id); } if ((cb_info != NULL) && (cb_info->callback != NULL) && (pcmk__result_ok(&result) || !(cb_info->only_success))) { crm_trace("Invoking callback %s for call %d", pcmk__s(cb_info->id, "without ID"), call_id); invoke_fence_action_callback(stonith, call_id, &result, cb_info->user_data, cb_info->callback); } else if ((private->op_callback == NULL) && !pcmk__result_ok(&result)) { crm_warn("Fencing action without registered callback failed: %d (%s%s%s)", result.exit_status, pcmk_exec_status_str(result.execution_status), ((result.exit_reason == NULL)? "" : ": "), ((result.exit_reason == NULL)? "" : result.exit_reason)); crm_log_xml_debug(msg, "Failed fence update"); } if (private->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); invoke_fence_action_callback(stonith, call_id, &result, NULL, private->op_callback); } if (cb_info != NULL) { stonith_api_del_callback(stonith, call_id, FALSE); } pcmk__reset_result(&result); } static gboolean stonith_async_timeout_handler(gpointer data) { struct timer_rec_s *timer = data; crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout); invoke_registered_callbacks(timer->stonith, NULL, timer->call_id); /* Always return TRUE, never remove the handler * We do that in stonith_del_callback() */ return TRUE; } static void set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id, int timeout) { struct timer_rec_s *async_timer = callback->timer; if (timeout <= 0) { return; } if (!async_timer) { async_timer = pcmk__assert_alloc(1, sizeof(struct timer_rec_s)); callback->timer = async_timer; } async_timer->stonith = stonith; async_timer->call_id = call_id; /* Allow a fair bit of grace to allow the server to tell us of a timeout * This is only a fallback */ async_timer->timeout = (timeout + 60) * 1000; if (async_timer->ref) { g_source_remove(async_timer->ref); } async_timer->ref = pcmk__create_timer(async_timer->timeout, stonith_async_timeout_handler, async_timer); } static void update_callback_timeout(int call_id, int timeout, stonith_t * st) { stonith_callback_client_t *callback = NULL; stonith_private_t *private = st->st_private; callback = pcmk__intkey_table_lookup(private->stonith_op_callback_table, call_id); if (!callback || !callback->allow_timeout_updates) { return; } set_callback_timeout(callback, st, call_id, timeout); } static int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; struct notify_blob_s blob; stonith_t *st = userdata; stonith_private_t *private = NULL; pcmk__assert(st != NULL); private = st->st_private; blob.stonith = st; blob.xml = pcmk__xml_parse(buffer); if (blob.xml == NULL) { crm_warn("Received malformed message from fencer: %s", buffer); return 0; } /* do callbacks */ type = crm_element_value(blob.xml, PCMK__XA_T); crm_trace("Activating %s callbacks...", type); if (pcmk__str_eq(type, PCMK__VALUE_STONITH_NG, pcmk__str_none)) { invoke_registered_callbacks(st, blob.xml, 0); } else if (pcmk__str_eq(type, PCMK__VALUE_ST_NOTIFY, pcmk__str_none)) { foreach_notify_entry(private, stonith_send_notification, &blob); } else if (pcmk__str_eq(type, PCMK__VALUE_ST_ASYNC_TIMEOUT_VALUE, pcmk__str_none)) { int call_id = 0; int timeout = 0; crm_element_value_int(blob.xml, PCMK__XA_ST_TIMEOUT, &timeout); crm_element_value_int(blob.xml, PCMK__XA_ST_CALLID, &call_id); update_callback_timeout(call_id, timeout, st); } else { crm_err("Unknown message type: %s", type); crm_log_xml_warn(blob.xml, "BadReply"); } pcmk__xml_free(blob.xml); return 1; } static int stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd) { int rc = pcmk_ok; stonith_private_t *native = NULL; const char *display_name = name? name : "client"; struct ipc_client_callbacks st_callbacks = { .dispatch = stonith_dispatch_internal, .destroy = stonith_connection_destroy }; CRM_CHECK(stonith != NULL, return -EINVAL); native = stonith->st_private; pcmk__assert(native != NULL); crm_debug("Attempting fencer connection by %s with%s mainloop", display_name, (stonith_fd? "out" : "")); stonith->state = stonith_connected_command; if (stonith_fd) { /* No mainloop */ native->ipc = crm_ipc_new("stonith-ng", 0); if (native->ipc != NULL) { rc = pcmk__connect_generic_ipc(native->ipc); if (rc == pcmk_rc_ok) { rc = pcmk__ipc_fd(native->ipc, stonith_fd); if (rc != pcmk_rc_ok) { crm_debug("Couldn't get file descriptor for IPC: %s", pcmk_rc_str(rc)); } } if (rc != pcmk_rc_ok) { crm_ipc_close(native->ipc); crm_ipc_destroy(native->ipc); native->ipc = NULL; } } } else { /* With mainloop */ native->source = mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { rc = -ENOTCONN; } else { xmlNode *reply = NULL; xmlNode *hello = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); crm_xml_add(hello, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(hello, PCMK__XA_ST_OP, CRM_OP_REGISTER); crm_xml_add(hello, PCMK__XA_ST_CLIENTNAME, name); rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply); if (rc < 0) { crm_debug("Couldn't register with the fencer: %s " QB_XS " rc=%d", pcmk_strerror(rc), rc); rc = -ECOMM; } else if (reply == NULL) { crm_debug("Couldn't register with the fencer: no reply"); rc = -EPROTO; } else { const char *msg_type = crm_element_value(reply, PCMK__XA_ST_OP); native->token = crm_element_value_copy(reply, PCMK__XA_ST_CLIENTID); if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_none)) { crm_debug("Couldn't register with the fencer: invalid reply type '%s'", (msg_type? msg_type : "(missing)")); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else if (native->token == NULL) { crm_debug("Couldn't register with the fencer: no token in reply"); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else { crm_debug("Connection to fencer by %s succeeded (registration token: %s)", display_name, native->token); rc = pcmk_ok; } } pcmk__xml_free(reply); pcmk__xml_free(hello); } if (rc != pcmk_ok) { crm_debug("Connection attempt to fencer by %s failed: %s " QB_XS " rc=%d", display_name, pcmk_strerror(rc), rc); stonith->cmds->disconnect(stonith); } return rc; } static int stonith_set_notification(stonith_t * stonith, const char *callback, int enabled) { int rc = pcmk_ok; xmlNode *notify_msg = pcmk__xe_create(NULL, __func__); stonith_private_t *native = stonith->st_private; if (stonith->state != stonith_disconnected) { crm_xml_add(notify_msg, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); if (enabled) { crm_xml_add(notify_msg, PCMK__XA_ST_NOTIFY_ACTIVATE, callback); } else { crm_xml_add(notify_msg, PCMK__XA_ST_NOTIFY_DEACTIVATE, callback); } rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL); if (rc < 0) { crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc); rc = -ECOMM; } else { rc = pcmk_ok; } } pcmk__xml_free(notify_msg); return rc; } static int stonith_api_add_notification(stonith_t * stonith, const char *event, void (*callback) (stonith_t * stonith, stonith_event_t * e)) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; private = stonith->st_private; crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list)); new_client = pcmk__assert_alloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = callback; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); if (list_item != NULL) { crm_warn("Callback already present"); free(new_client); return -ENOTUNIQ; } else { private->notify_list = g_list_append(private->notify_list, new_client); stonith_set_notification(stonith, event, 1); crm_trace("Callback added (%d)", g_list_length(private->notify_list)); } return pcmk_ok; } static void del_notify_entry(gpointer data, gpointer user_data) { stonith_notify_client_t *entry = data; stonith_t * stonith = user_data; if (!entry->delete) { crm_debug("Removing callback for %s events", entry->event); stonith_api_del_notification(stonith, entry->event); } } static int stonith_api_del_notification(stonith_t * stonith, const char *event) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = stonith->st_private; if (event == NULL) { foreach_notify_entry(private, del_notify_entry, stonith); crm_trace("Removed callback"); return pcmk_ok; } crm_debug("Removing callback for %s events", event); new_client = pcmk__assert_alloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = NULL; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); stonith_set_notification(stonith, event, 0); if (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; if (private->notify_refcnt) { list_client->delete = TRUE; private->notify_deletes = TRUE; } else { private->notify_list = g_list_remove(private->notify_list, list_client); free(list_client); } crm_trace("Removed callback"); } else { crm_trace("Callback not present"); } free(new_client); return pcmk_ok; } static int stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options, void *user_data, const char *callback_name, void (*callback) (stonith_t * st, stonith_callback_data_t * data)) { stonith_callback_client_t *blob = NULL; stonith_private_t *private = NULL; CRM_CHECK(stonith != NULL, return -EINVAL); CRM_CHECK(stonith->st_private != NULL, return -EINVAL); private = stonith->st_private; if (call_id == 0) { // Add global callback private->op_callback = callback; } else if (call_id < 0) { // Call failed immediately, so call callback now if (!(options & st_opt_report_only_success)) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id)); pcmk__set_result(&result, CRM_EX_ERROR, stonith__legacy2status(call_id), NULL); invoke_fence_action_callback(stonith, call_id, &result, user_data, callback); } else { crm_warn("Fencer call failed: %s", pcmk_strerror(call_id)); } return FALSE; } blob = pcmk__assert_alloc(1, sizeof(stonith_callback_client_t)); blob->id = callback_name; blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE; blob->user_data = user_data; blob->callback = callback; blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE; if (timeout > 0) { set_callback_timeout(blob, stonith, call_id, timeout); } pcmk__intkey_table_insert(private->stonith_op_callback_table, call_id, blob); crm_trace("Added callback to %s for call %d", callback_name, call_id); return TRUE; } static void stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data) { int call = GPOINTER_TO_INT(key); stonith_callback_client_t *blob = value; crm_debug("Call %d (%s): pending", call, pcmk__s(blob->id, "no ID")); } void stonith_dump_pending_callbacks(stonith_t * stonith) { stonith_private_t *private = stonith->st_private; if (private->stonith_op_callback_table == NULL) { return; } return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL); } /*! * \internal * \brief Get the data section of a fencer notification * * \param[in] msg Notification XML * \param[in] ntype Notification type */ static xmlNode * get_event_data_xml(xmlNode *msg, const char *ntype) { char *data_addr = crm_strdup_printf("//%s", ntype); xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG); free(data_addr); return data; } /* */ static stonith_event_t * xml_to_event(xmlNode *msg) { stonith_event_t *event = pcmk__assert_alloc(1, sizeof(stonith_event_t)); struct event_private *event_private = NULL; event->opaque = pcmk__assert_alloc(1, sizeof(struct event_private)); event_private = (struct event_private *) event->opaque; crm_log_xml_trace(msg, "stonith_notify"); // All notification types have the operation result and notification subtype stonith__xe_get_result(msg, &event_private->result); event->operation = crm_element_value_copy(msg, PCMK__XA_ST_OP); // @COMPAT The API originally provided the result as a legacy return code event->result = pcmk_rc2legacy(stonith__result2rc(&event_private->result)); // Some notification subtypes have additional information if (pcmk__str_eq(event->operation, PCMK__VALUE_ST_NOTIFY_FENCE, pcmk__str_none)) { xmlNode *data = get_event_data_xml(msg, event->operation); if (data == NULL) { crm_err("No data for %s event", event->operation); crm_log_xml_notice(msg, "BadEvent"); } else { event->origin = crm_element_value_copy(data, PCMK__XA_ST_ORIGIN); event->action = crm_element_value_copy(data, PCMK__XA_ST_DEVICE_ACTION); event->target = crm_element_value_copy(data, PCMK__XA_ST_TARGET); event->executioner = crm_element_value_copy(data, PCMK__XA_ST_DELEGATE); event->id = crm_element_value_copy(data, PCMK__XA_ST_REMOTE_OP); event->client_origin = crm_element_value_copy(data, PCMK__XA_ST_CLIENTNAME); event->device = crm_element_value_copy(data, PCMK__XA_ST_DEVICE_ID); } } else if (pcmk__str_any_of(event->operation, STONITH_OP_DEVICE_ADD, STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, STONITH_OP_LEVEL_DEL, NULL)) { xmlNode *data = get_event_data_xml(msg, event->operation); if (data == NULL) { crm_err("No data for %s event", event->operation); crm_log_xml_notice(msg, "BadEvent"); } else { event->device = crm_element_value_copy(data, PCMK__XA_ST_DEVICE_ID); } } return event; } static void event_free(stonith_event_t * event) { struct event_private *event_private = event->opaque; free(event->id); free(event->operation); free(event->origin); free(event->action); free(event->target); free(event->executioner); free(event->device); free(event->client_origin); pcmk__reset_result(&event_private->result); free(event->opaque); free(event); } static void stonith_send_notification(gpointer data, gpointer user_data) { struct notify_blob_s *blob = user_data; stonith_notify_client_t *entry = data; stonith_event_t *st_event = NULL; const char *event = NULL; if (blob->xml == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(blob->xml, PCMK__XA_SUBT); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->delete) { crm_trace("Skipping callback - marked for deletion"); return; } else if (entry->notify == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (!pcmk__str_eq(entry->event, event, pcmk__str_none)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } st_event = xml_to_event(blob->xml); crm_trace("Invoking callback for %p/%s event...", entry, event); entry->notify(blob->stonith, st_event); crm_trace("Callback invoked..."); event_free(st_event); } /*! * \internal * \brief Create and send an API request * * \param[in,out] stonith Stonith connection * \param[in] op API operation to request * \param[in] data Data to attach to request * \param[out] output_data If not NULL, will be set to reply if synchronous * \param[in] call_options Bitmask of stonith_call_options to use * \param[in] timeout Error if not completed within this many seconds * * \return pcmk_ok (for synchronous requests) or positive call ID * (for asynchronous requests) on success, -errno otherwise */ static int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout) { int rc = 0; int reply_id = -1; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; stonith_private_t *native = NULL; pcmk__assert((stonith != NULL) && (stonith->st_private != NULL) && (op != NULL)); native = stonith->st_private; if (output_data != NULL) { *output_data = NULL; } if ((stonith->state == stonith_disconnected) || (native->token == NULL)) { return -ENOTCONN; } /* Increment the call ID, which must be positive to avoid conflicting with * error codes. This shouldn't be a problem unless the client mucked with * it or the counter wrapped around. */ stonith->call_id++; if (stonith->call_id < 1) { stonith->call_id = 1; } op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options); if (op_msg == NULL) { return -EINVAL; } crm_xml_add_int(op_msg, PCMK__XA_ST_TIMEOUT, timeout); crm_trace("Sending %s message to fencer with timeout %ds", op, timeout); if (data) { const char *delay_s = crm_element_value(data, PCMK__XA_ST_DELAY); if (delay_s) { crm_xml_add(op_msg, PCMK__XA_ST_DELAY, delay_s); } } { enum crm_ipc_flags ipc_flags = crm_ipc_flags_none; if (call_options & st_opt_sync_call) { pcmk__set_ipc_flags(ipc_flags, "stonith command", crm_ipc_client_response); } rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply); } pcmk__xml_free(op_msg); if (rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & st_opt_sync_call)) { crm_trace("Async call %d, returning", stonith->call_id); pcmk__xml_free(op_reply); return stonith->call_id; } crm_element_value_int(op_reply, PCMK__XA_ST_CALLID, &reply_id); if (reply_id == stonith->call_id) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; crm_trace("Synchronous reply %d received", reply_id); stonith__xe_get_result(op_reply, &result); rc = pcmk_rc2legacy(stonith__result2rc(&result)); pcmk__reset_result(&result); if ((call_options & st_opt_discard_reply) || output_data == NULL) { crm_trace("Discarding reply"); } else { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } } else if (reply_id <= 0) { crm_err("Received bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); pcmk__xml_free(op_reply); op_reply = NULL; rc = -ENOMSG; } else { crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id); crm_log_xml_err(op_reply, "Old reply"); pcmk__xml_free(op_reply); op_reply = NULL; rc = -ENOMSG; } done: if (!crm_ipc_connected(native->ipc)) { crm_err("Fencer disconnected"); free(native->token); native->token = NULL; stonith->state = stonith_disconnected; } pcmk__xml_free(op_reply); return rc; } /* Not used with mainloop */ bool stonith_dispatch(stonith_t * st) { gboolean stay_connected = TRUE; stonith_private_t *private = NULL; pcmk__assert(st != NULL); private = st->st_private; while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); stonith_dispatch_internal(msg, strlen(msg), st); } if (!crm_ipc_connected(private->ipc)) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } static int stonith_api_free(stonith_t * stonith) { int rc = pcmk_ok; crm_trace("Destroying %p", stonith); if (stonith->state != stonith_disconnected) { crm_trace("Unregistering notifications and disconnecting %p first", stonith); stonith->cmds->remove_notification(stonith, NULL); rc = stonith->cmds->disconnect(stonith); } if (stonith->state == stonith_disconnected) { stonith_private_t *private = stonith->st_private; crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table)); g_hash_table_destroy(private->stonith_op_callback_table); crm_trace("Destroying %d notification clients", g_list_length(private->notify_list)); g_list_free_full(private->notify_list, free); free(stonith->st_private); free(stonith->cmds); free(stonith); } else { crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc); } return rc; } void stonith_api_delete(stonith_t * stonith) { crm_trace("Destroying %p", stonith); if(stonith) { stonith->cmds->free(stonith); } } static gboolean is_stonith_param(gpointer key, gpointer value, gpointer user_data) { return pcmk_stonith_param(key); } int stonith__validate(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, GHashTable *params, int timeout_sec, char **output, char **error_output) { int rc = pcmk_rc_ok; /* Use a dummy node name in case the agent requires a target. We assume the * actual target doesn't matter for validation purposes (if in practice, * that is incorrect, we will need to allow the caller to pass the target). */ const char *target = "node1"; char *host_arg = NULL; if (params != NULL) { host_arg = pcmk__str_copy(g_hash_table_lookup(params, PCMK_STONITH_HOST_ARGUMENT)); /* Remove special stonith params from the table before doing anything else */ g_hash_table_foreach_remove(params, is_stonith_param, NULL); } #if PCMK__ENABLE_CIBSECRETS rc = pcmk__substitute_secrets(rsc_id, params); if (rc != pcmk_rc_ok) { crm_warn("Could not replace secret parameters for validation of %s: %s", agent, pcmk_rc_str(rc)); // rc is standard return value, don't return it in this function } #endif if (output) { *output = NULL; } if (error_output) { *error_output = NULL; } if (timeout_sec <= 0) { timeout_sec = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } switch (stonith_get_namespace(agent, namespace_s)) { case st_namespace_rhcs: rc = stonith__rhcs_validate(st, call_options, target, agent, params, host_arg, timeout_sec, output, error_output); rc = pcmk_legacy2rc(rc); break; #if HAVE_STONITH_STONITH_H case st_namespace_lha: rc = stonith__lha_validate(st, call_options, target, agent, params, timeout_sec, output, error_output); rc = pcmk_legacy2rc(rc); break; #endif case st_namespace_invalid: errno = ENOENT; rc = errno; if (error_output) { *error_output = crm_strdup_printf("Agent %s not found", agent); } else { crm_err("Agent %s not found", agent); } break; default: errno = EOPNOTSUPP; rc = errno; if (error_output) { *error_output = crm_strdup_printf("Agent %s does not support validation", agent); } else { crm_err("Agent %s does not support validation", agent); } break; } free(host_arg); return rc; } static int stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, const stonith_key_value_t *params, int timeout_sec, char **output, char **error_output) { /* Validation should be done directly via the agent, so we can get it from * stonith_admin when the cluster is not running, which is important for * higher-level tools. */ int rc = pcmk_ok; GHashTable *params_table = pcmk__strkey_table(free, free); // Convert parameter list to a hash table for (; params; params = params->next) { if (!pcmk_stonith_param(params->key)) { pcmk__insert_dup(params_table, params->key, params->value); } } rc = stonith__validate(st, call_options, rsc_id, namespace_s, agent, params_table, timeout_sec, output, error_output); g_hash_table_destroy(params_table); return rc; } stonith_t * stonith_api_new(void) { stonith_t *new_stonith = NULL; stonith_private_t *private = NULL; new_stonith = calloc(1, sizeof(stonith_t)); if (new_stonith == NULL) { return NULL; } private = calloc(1, sizeof(stonith_private_t)); if (private == NULL) { free(new_stonith); return NULL; } new_stonith->st_private = private; private->stonith_op_callback_table = pcmk__intkey_table(stonith_destroy_op_callback); private->notify_list = NULL; private->notify_refcnt = 0; private->notify_deletes = FALSE; new_stonith->call_id = 1; new_stonith->state = stonith_disconnected; new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t)); if (new_stonith->cmds == NULL) { free(new_stonith->st_private); free(new_stonith); return NULL; } /* *INDENT-OFF* */ new_stonith->cmds->free = stonith_api_free; new_stonith->cmds->connect = stonith_api_signon; new_stonith->cmds->disconnect = stonith_api_signoff; new_stonith->cmds->list = stonith_api_list; new_stonith->cmds->monitor = stonith_api_monitor; new_stonith->cmds->status = stonith_api_status; new_stonith->cmds->fence = stonith_api_fence; new_stonith->cmds->fence_with_delay = stonith_api_fence_with_delay; new_stonith->cmds->confirm = stonith_api_confirm; new_stonith->cmds->history = stonith_api_history; new_stonith->cmds->list_agents = stonith_api_device_list; new_stonith->cmds->metadata = stonith_api_device_metadata; new_stonith->cmds->query = stonith_api_query; new_stonith->cmds->remove_device = stonith_api_remove_device; new_stonith->cmds->register_device = stonith_api_register_device; new_stonith->cmds->remove_level = stonith_api_remove_level; new_stonith->cmds->remove_level_full = stonith_api_remove_level_full; new_stonith->cmds->register_level = stonith_api_register_level; new_stonith->cmds->register_level_full = stonith_api_register_level_full; new_stonith->cmds->remove_callback = stonith_api_del_callback; new_stonith->cmds->register_callback = stonith_api_add_callback; new_stonith->cmds->remove_notification = stonith_api_del_notification; new_stonith->cmds->register_notification = stonith_api_add_notification; new_stonith->cmds->validate = stonith_api_validate; /* *INDENT-ON* */ return new_stonith; } /*! * \brief Make a blocking connection attempt to the fencer * * \param[in,out] st Fencer API object * \param[in] name Client name to use with fencer * \param[in] max_attempts Return error if this many attempts fail * * \return pcmk_ok on success, result of last attempt otherwise */ int stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts) { int rc = -EINVAL; // if max_attempts is not positive for (int attempt = 1; attempt <= max_attempts; attempt++) { rc = st->cmds->connect(st, name, NULL); if (rc == pcmk_ok) { return pcmk_ok; } else if (attempt < max_attempts) { crm_notice("Fencer connection attempt %d of %d failed (retrying in 2s): %s " QB_XS " rc=%d", attempt, max_attempts, pcmk_strerror(rc), rc); sleep(2); } } crm_notice("Could not connect to fencer: %s " QB_XS " rc=%d", pcmk_strerror(rc), rc); return rc; } stonith_key_value_t * stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value) { stonith_key_value_t *p, *end; p = pcmk__assert_alloc(1, sizeof(stonith_key_value_t)); p->key = pcmk__str_copy(key); p->value = pcmk__str_copy(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values) { stonith_key_value_t *p; while (head) { p = head->next; if (keys) { free(head->key); } if (values) { free(head->value); } free(head); head = p; } } #define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON) #define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __func__, args) int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off) { int rc = pcmk_ok; stonith_t *st = stonith_api_new(); const char *action = off? PCMK_ACTION_OFF : PCMK_ACTION_REBOOT; api_log_open(); if (st == NULL) { api_log(LOG_ERR, "API initialization failed, could not kick (%s) node %u/%s", action, nodeid, uname); return -EPROTO; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { char *name = (uname == NULL)? pcmk__itoa(nodeid) : strdup(uname); int opts = 0; stonith__set_call_options(opts, name, st_opt_sync_call|st_opt_allow_self_fencing); if ((uname == NULL) && (nodeid > 0)) { stonith__set_call_options(opts, name, st_opt_cs_nodeid); } rc = st->cmds->fence(st, opts, name, action, timeout, 0); free(name); if (rc != pcmk_ok) { api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { api_log(LOG_NOTICE, "Node %u/%s kicked: %s", nodeid, uname, action); } } stonith_api_delete(st); return rc; } time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress) { int rc = pcmk_ok; time_t when = 0; stonith_t *st = stonith_api_new(); stonith_history_t *history = NULL, *hp = NULL; if (st == NULL) { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: " "API initialization failed", nodeid, uname); return when; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc); } else { int entries = 0; int progress = 0; int completed = 0; int opts = 0; char *name = (uname == NULL)? pcmk__itoa(nodeid) : strdup(uname); stonith__set_call_options(opts, name, st_opt_sync_call); if ((uname == NULL) && (nodeid > 0)) { stonith__set_call_options(opts, name, st_opt_cs_nodeid); } rc = st->cmds->history(st, opts, name, &history, 120); free(name); for (hp = history; hp; hp = hp->next) { entries++; if (in_progress) { progress++; if (hp->state != st_done && hp->state != st_failed) { when = time(NULL); } } else if (hp->state == st_done) { completed++; if (hp->completed > when) { when = hp->completed; } } } stonith_history_free(history); if(rc == pcmk_ok) { api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed); } else { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc); } } stonith_api_delete(st); if(when) { api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when); } return when; } bool stonith_agent_exists(const char *agent, int timeout) { stonith_t *st = NULL; stonith_key_value_t *devices = NULL; stonith_key_value_t *dIter = NULL; bool rc = FALSE; if (agent == NULL) { return rc; } st = stonith_api_new(); if (st == NULL) { crm_err("Could not list fence agents: API memory allocation failed"); return FALSE; } st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout == 0 ? 120 : timeout); for (dIter = devices; dIter != NULL; dIter = dIter->next) { if (pcmk__str_eq(dIter->value, agent, pcmk__str_none)) { rc = TRUE; break; } } stonith_key_value_freeall(devices, 1, 1); stonith_api_delete(st); return rc; } const char * stonith_action_str(const char *action) { if (action == NULL) { return "fencing"; } else if (strcmp(action, PCMK_ACTION_ON) == 0) { return "unfencing"; } else if (strcmp(action, PCMK_ACTION_OFF) == 0) { return "turning off"; } else { return action; } } /*! * \internal * \brief Parse a target name from one line of a target list string * * \param[in] line One line of a target list string * \param[in] len String length of line * \param[in,out] output List to add newly allocated target name to */ static void parse_list_line(const char *line, int len, GList **output) { size_t i = 0; size_t entry_start = 0; /* Skip complaints about additional parameters device doesn't understand * * @TODO Document or eliminate the implied restriction of target names */ if (strstr(line, "invalid") || strstr(line, "variable")) { crm_debug("Skipping list output line: %s", line); return; } // Process line content, character by character for (i = 0; i <= len; i++) { if (isspace(line[i]) || (line[i] == ',') || (line[i] == ';') || (line[i] == '\0')) { // We've found a separator (i.e. the end of an entry) int rc = 0; char *entry = NULL; if (i == entry_start) { // Skip leading and sequential separators entry_start = i + 1; continue; } entry = pcmk__assert_alloc(i - entry_start + 1, sizeof(char)); /* Read entry, stopping at first separator * * @TODO Document or eliminate these character restrictions */ rc = sscanf(line + entry_start, "%[a-zA-Z0-9_-.]", entry); if (rc != 1) { crm_warn("Could not parse list output entry: %s " QB_XS " entry_start=%d position=%d", line + entry_start, entry_start, i); free(entry); } else if (pcmk__strcase_any_of(entry, PCMK_ACTION_ON, PCMK_ACTION_OFF, NULL)) { /* Some agents print the target status in the list output, * though none are known now (the separate list-status command * is used for this, but it can also print "UNKNOWN"). To handle * this possibility, skip such entries. * * @TODO Document or eliminate the implied restriction of target * names. */ free(entry); } else { // We have a valid entry *output = g_list_append(*output, entry); } entry_start = i + 1; } } } /*! * \internal * \brief Parse a list of targets from a string * * \param[in] list_output Target list as a string * * \return List of target names * \note The target list string format is flexible, to allow for user-specified * lists such pcmk_host_list and the output of an agent's list action * (whether direct or via the API, which escapes newlines). There may be * multiple lines, separated by either a newline or an escaped newline * (backslash n). Each line may have one or more target names, separated * by any combination of whitespace, commas, and semi-colons. Lines * containing "invalid" or "variable" will be ignored entirely. Target * names "on" or "off" (case-insensitive) will be ignored. Target names * may contain only alphanumeric characters, underbars (_), dashes (-), * and dots (.) (if any other character occurs in the name, it and all * subsequent characters in the name will be ignored). * \note The caller is responsible for freeing the result with * g_list_free_full(result, free). */ GList * stonith__parse_targets(const char *target_spec) { GList *targets = NULL; if (target_spec != NULL) { size_t out_len = strlen(target_spec); size_t line_start = 0; // Starting index of line being processed for (size_t i = 0; i <= out_len; ++i) { if ((target_spec[i] == '\n') || (target_spec[i] == '\0') || ((target_spec[i] == '\\') && (target_spec[i + 1] == 'n'))) { // We've reached the end of one line of output int len = i - line_start; if (len > 0) { char *line = strndup(target_spec + line_start, len); line[len] = '\0'; // Because it might be a newline parse_list_line(line, len, &targets); free(line); } if (target_spec[i] == '\\') { ++i; // backslash-n takes up two positions } line_start = i + 1; } } } return targets; } /*! * \internal * \brief Check whether a fencing failure was followed by an equivalent success * * \param[in] event Fencing failure * \param[in] top_history Complete fencing history (must be sorted by * stonith__sort_history() beforehand) * * \return The name of the node that executed the fencing if a later successful * event exists, or NULL if no such event exists */ const char * stonith__later_succeeded(const stonith_history_t *event, const stonith_history_t *top_history) { const char *other = NULL; for (const stonith_history_t *prev_hp = top_history; prev_hp != NULL; prev_hp = prev_hp->next) { if (prev_hp == event) { break; } if ((prev_hp->state == st_done) && pcmk__str_eq(event->target, prev_hp->target, pcmk__str_casei) && pcmk__str_eq(event->action, prev_hp->action, pcmk__str_none) && ((event->completed < prev_hp->completed) || ((event->completed == prev_hp->completed) && (event->completed_nsec < prev_hp->completed_nsec)))) { if ((event->delegate == NULL) || pcmk__str_eq(event->delegate, prev_hp->delegate, pcmk__str_casei)) { // Prefer equivalent fencing by same executioner return prev_hp->delegate; } else if (other == NULL) { // Otherwise remember first successful executioner other = (prev_hp->delegate == NULL)? "some node" : prev_hp->delegate; } } } return other; } /*! * \internal * \brief Sort fencing history, pending first then by most recently completed * * \param[in,out] history List of stonith actions * * \return New head of sorted \p history */ stonith_history_t * stonith__sort_history(stonith_history_t *history) { stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp; for (hp = history; hp; ) { tmp = hp->next; if ((hp->state == st_done) || (hp->state == st_failed)) { /* sort into new */ if ((!new) || (hp->completed > new->completed) || ((hp->completed == new->completed) && (hp->completed_nsec > new->completed_nsec))) { hp->next = new; new = hp; } else { np = new; do { if ((!np->next) || (hp->completed > np->next->completed) || ((hp->completed == np->next->completed) && (hp->completed_nsec > np->next->completed_nsec))) { hp->next = np->next; np->next = hp; break; } np = np->next; } while (1); } } else { /* put into pending */ hp->next = pending; pending = hp; } hp = tmp; } /* pending actions don't have a completed-stamp so make them go front */ if (pending) { stonith_history_t *last_pending = pending; while (last_pending->next) { last_pending = last_pending->next; } last_pending->next = new; new = pending; } return new; } /*! * \brief Return string equivalent of an operation state value * * \param[in] state Fencing operation state value * * \return Human-friendly string equivalent of state */ const char * stonith_op_state_str(enum op_state state) { switch (state) { case st_query: return "querying"; case st_exec: return "executing"; case st_done: return "completed"; case st_duplicate: return "duplicate"; case st_failed: return "failed"; } return "unknown"; } stonith_history_t * stonith__first_matching_event(stonith_history_t *history, bool (*matching_fn)(stonith_history_t *, void *), void *user_data) { for (stonith_history_t *hp = history; hp; hp = hp->next) { if (matching_fn(hp, user_data)) { return hp; } } return NULL; } bool stonith__event_state_pending(stonith_history_t *history, void *user_data) { return history->state != st_failed && history->state != st_done; } bool stonith__event_state_eq(stonith_history_t *history, void *user_data) { return history->state == GPOINTER_TO_INT(user_data); } bool stonith__event_state_neq(stonith_history_t *history, void *user_data) { return history->state != GPOINTER_TO_INT(user_data); } void stonith__device_parameter_flags(uint32_t *device_flags, const char *device_name, xmlNode *metadata) { xmlXPathObject *xpath = NULL; int max = 0; int lpc = 0; CRM_CHECK((device_flags != NULL) && (metadata != NULL), return); xpath = pcmk__xpath_search(metadata->doc, "//" PCMK_XE_PARAMETER); max = pcmk__xpath_num_results(xpath); if (max <= 0) { - freeXpathObject(xpath); + xmlXPathFreeObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *parameter = NULL; xmlNode *match = pcmk__xpath_result(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if (match == NULL) { continue; } parameter = crm_element_value(match, PCMK_XA_NAME); if (pcmk__str_eq(parameter, "plug", pcmk__str_casei)) { stonith__set_device_flags(*device_flags, device_name, st_device_supports_parameter_plug); } else if (pcmk__str_eq(parameter, "port", pcmk__str_casei)) { stonith__set_device_flags(*device_flags, device_name, st_device_supports_parameter_port); } } - freeXpathObject(xpath); + xmlXPathFreeObject(xpath); } /*! * \internal * \brief Retrieve fence agent meta-data asynchronously * * \param[in] agent Agent to execute * \param[in] timeout_sec Error if not complete within this time * \param[in] callback Function to call with result (this will always be * called, whether by this function directly or * later via the main loop, and on success the * metadata will be in its result argument's * action_stdout) * \param[in,out] user_data User data to pass to callback * * \return Standard Pacemaker return code * \note The caller must use a main loop. This function is not a * stonith_api_operations_t method because it does not need a stonith_t * object and does not go through the fencer, but executes the agent * directly. */ int stonith__metadata_async(const char *agent, int timeout_sec, void (*callback)(int pid, const pcmk__action_result_t *result, void *user_data), void *user_data) { switch (stonith_get_namespace(agent, NULL)) { case st_namespace_rhcs: { stonith_action_t *action = NULL; int rc = pcmk_ok; action = stonith__action_create(agent, PCMK_ACTION_METADATA, NULL, 0, timeout_sec, NULL, NULL, NULL); rc = stonith__execute_async(action, user_data, callback, NULL); if (rc != pcmk_ok) { callback(0, stonith__action_result(action), user_data); stonith__destroy_action(action); } return pcmk_legacy2rc(rc); } #if HAVE_STONITH_STONITH_H case st_namespace_lha: // LHA metadata is simply synthesized, so simulate async { pcmk__action_result_t result = { .exit_status = CRM_EX_OK, .execution_status = PCMK_EXEC_DONE, .exit_reason = NULL, .action_stdout = NULL, .action_stderr = NULL, }; stonith__lha_metadata(agent, timeout_sec, &result.action_stdout); callback(0, &result, user_data); pcmk__reset_result(&result); return pcmk_rc_ok; } #endif default: { pcmk__action_result_t result = { .exit_status = CRM_EX_NOSUCH, .execution_status = PCMK_EXEC_ERROR_HARD, .exit_reason = crm_strdup_printf("No such agent '%s'", agent), .action_stdout = NULL, .action_stderr = NULL, }; callback(0, &result, user_data); pcmk__reset_result(&result); return ENOENT; } } } /*! * \internal * \brief Return the exit status from an async action callback * * \param[in] data Callback data * * \return Exit status from callback data */ int stonith__exit_status(const stonith_callback_data_t *data) { if ((data == NULL) || (data->opaque == NULL)) { return CRM_EX_ERROR; } return ((pcmk__action_result_t *) data->opaque)->exit_status; } /*! * \internal * \brief Return the execution status from an async action callback * * \param[in] data Callback data * * \return Execution status from callback data */ int stonith__execution_status(const stonith_callback_data_t *data) { if ((data == NULL) || (data->opaque == NULL)) { return PCMK_EXEC_UNKNOWN; } return ((pcmk__action_result_t *) data->opaque)->execution_status; } /*! * \internal * \brief Return the exit reason from an async action callback * * \param[in] data Callback data * * \return Exit reason from callback data */ const char * stonith__exit_reason(const stonith_callback_data_t *data) { if ((data == NULL) || (data->opaque == NULL)) { return NULL; } return ((pcmk__action_result_t *) data->opaque)->exit_reason; } /*! * \internal * \brief Return the exit status from an event notification * * \param[in] event Event * * \return Exit status from event */ int stonith__event_exit_status(const stonith_event_t *event) { if ((event == NULL) || (event->opaque == NULL)) { return CRM_EX_ERROR; } else { struct event_private *event_private = event->opaque; return event_private->result.exit_status; } } /*! * \internal * \brief Return the execution status from an event notification * * \param[in] event Event * * \return Execution status from event */ int stonith__event_execution_status(const stonith_event_t *event) { if ((event == NULL) || (event->opaque == NULL)) { return PCMK_EXEC_UNKNOWN; } else { struct event_private *event_private = event->opaque; return event_private->result.execution_status; } } /*! * \internal * \brief Return the exit reason from an event notification * * \param[in] event Event * * \return Exit reason from event */ const char * stonith__event_exit_reason(const stonith_event_t *event) { if ((event == NULL) || (event->opaque == NULL)) { return NULL; } else { struct event_private *event_private = event->opaque; return event_private->result.exit_reason; } } /*! * \internal * \brief Return a human-friendly description of a fencing event * * \param[in] event Event to describe * * \return Newly allocated string with description of \p event * \note The caller is responsible for freeing the return value. * This function asserts on memory errors and never returns NULL. */ char * stonith__event_description(const stonith_event_t *event) { // Use somewhat readable defaults const char *origin = pcmk__s(event->client_origin, "a client"); const char *origin_node = pcmk__s(event->origin, "a node"); const char *executioner = pcmk__s(event->executioner, "the cluster"); const char *device = pcmk__s(event->device, "unknown"); const char *action = pcmk__s(event->action, event->operation); const char *target = pcmk__s(event->target, "no node"); const char *reason = stonith__event_exit_reason(event); const char *status; if (action == NULL) { action = "(unknown)"; } if (stonith__event_execution_status(event) != PCMK_EXEC_DONE) { status = pcmk_exec_status_str(stonith__event_execution_status(event)); } else if (stonith__event_exit_status(event) != CRM_EX_OK) { status = pcmk_exec_status_str(PCMK_EXEC_ERROR); } else { status = crm_exit_str(CRM_EX_OK); } if (pcmk__str_eq(event->operation, PCMK__VALUE_ST_NOTIFY_HISTORY, pcmk__str_none)) { return crm_strdup_printf("Fencing history may have changed"); } else if (pcmk__str_eq(event->operation, STONITH_OP_DEVICE_ADD, pcmk__str_none)) { return crm_strdup_printf("A fencing device (%s) was added", device); } else if (pcmk__str_eq(event->operation, STONITH_OP_DEVICE_DEL, pcmk__str_none)) { return crm_strdup_printf("A fencing device (%s) was removed", device); } else if (pcmk__str_eq(event->operation, STONITH_OP_LEVEL_ADD, pcmk__str_none)) { return crm_strdup_printf("A fencing topology level (%s) was added", device); } else if (pcmk__str_eq(event->operation, STONITH_OP_LEVEL_DEL, pcmk__str_none)) { return crm_strdup_printf("A fencing topology level (%s) was removed", device); } // event->operation should be PCMK__VALUE_ST_NOTIFY_FENCE at this point return crm_strdup_printf("Operation %s of %s by %s for %s@%s: %s%s%s%s (ref=%s)", action, target, executioner, origin, origin_node, status, ((reason == NULL)? "" : " ("), pcmk__s(reason, ""), ((reason == NULL)? "" : ")"), pcmk__s(event->id, "(none)")); } diff --git a/lib/fencing/st_rhcs.c b/lib/fencing/st_rhcs.c index 283b637289..4010398547 100644 --- a/lib/fencing/st_rhcs.c +++ b/lib/fencing/st_rhcs.c @@ -1,328 +1,328 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include "fencing_private.h" /*! * \internal * \brief \c scandir() filter for RHCS fence agents * * \param[in] entry Directory entry * * \retval 1 if \p entry is a regular file whose name begins with \c "fence_" * \retval 0 otherwise */ static int rhcs_agent_filter(const struct dirent *entry) { char *buf = NULL; struct stat sb; int rc = 0; if (!pcmk__starts_with(entry->d_name, "fence_")) { goto done; } // glibc doesn't enforce PATH_MAX, so don't limit buf size buf = crm_strdup_printf(PCMK__FENCE_BINDIR "/%s", entry->d_name); if ((stat(buf, &sb) != 0) || !S_ISREG(sb.st_mode)) { goto done; } rc = 1; done: free(buf); return rc; } /*! * \internal * \brief Add available RHCS-compatible agents to a list * * \param[in,out] List to add to * * \return Number of agents added */ int stonith__list_rhcs_agents(stonith_key_value_t **devices) { struct dirent **namelist = NULL; const int file_num = scandir(PCMK__FENCE_BINDIR, &namelist, rhcs_agent_filter, alphasort); if (file_num < 0) { int rc = errno; crm_err("Could not list " PCMK__FENCE_BINDIR ": %s", pcmk_rc_str(rc)); free(namelist); return 0; } for (int i = 0; i < file_num; i++) { *devices = stonith_key_value_add(*devices, NULL, namelist[i]->d_name); free(namelist[i]); } free(namelist); return file_num; } static void stonith_rhcs_parameter_not_required(xmlNode *metadata, const char *parameter) { char *xpath = NULL; xmlXPathObject *xpathObj = NULL; CRM_CHECK(metadata != NULL, return); CRM_CHECK(parameter != NULL, return); xpath = crm_strdup_printf("//" PCMK_XE_PARAMETER "[@" PCMK_XA_NAME "='%s']", parameter); /* Fudge metadata so that the parameter isn't required in config * Pacemaker handles and adds it */ xpathObj = pcmk__xpath_search(metadata->doc, xpath); if (pcmk__xpath_num_results(xpathObj) > 0) { xmlNode *tmp = pcmk__xpath_result(xpathObj, 0); crm_xml_add(tmp, "required", "0"); } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); free(xpath); } /*! * \brief Execute RHCS-compatible agent's metadata action * * \param[in] agent Agent to execute * \param[in] timeout_sec Action timeout * \param[out] metadata Where to store output xmlNode (or NULL to ignore) */ static int stonith__rhcs_get_metadata(const char *agent, int timeout_sec, xmlNode **metadata) { xmlNode *xml = NULL; xmlNode *actions = NULL; xmlXPathObject *xpathObj = NULL; stonith_action_t *action = stonith__action_create(agent, PCMK_ACTION_METADATA, NULL, 0, timeout_sec, NULL, NULL, NULL); int rc = stonith__execute(action); pcmk__action_result_t *result = stonith__action_result(action); if (result == NULL) { if (rc < 0) { crm_warn("Could not execute metadata action for %s: %s " QB_XS " rc=%d", agent, pcmk_strerror(rc), rc); } stonith__destroy_action(action); return rc; } if (result->execution_status != PCMK_EXEC_DONE) { crm_warn("Could not execute metadata action for %s: %s", agent, pcmk_exec_status_str(result->execution_status)); rc = pcmk_rc2legacy(stonith__result2rc(result)); stonith__destroy_action(action); return rc; } if (!pcmk__result_ok(result)) { crm_warn("Metadata action for %s returned error code %d", agent, result->exit_status); rc = pcmk_rc2legacy(stonith__result2rc(result)); stonith__destroy_action(action); return rc; } if (result->action_stdout == NULL) { crm_warn("Metadata action for %s returned no data", agent); stonith__destroy_action(action); return -ENODATA; } xml = pcmk__xml_parse(result->action_stdout); stonith__destroy_action(action); if (xml == NULL) { crm_warn("Metadata for %s is invalid", agent); return -pcmk_err_schema_validation; } xpathObj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_ACTIONS); if (pcmk__xpath_num_results(xpathObj) > 0) { actions = pcmk__xpath_result(xpathObj, 0); } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); // Add start and stop (implemented by pacemaker, not agent) to meta-data xpathObj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_ACTION "[@" PCMK_XA_NAME "='" PCMK_ACTION_STOP "']"); if (pcmk__xpath_num_results(xpathObj) <= 0) { xmlNode *tmp = NULL; const char *timeout_str = NULL; timeout_str = pcmk__readable_interval(PCMK_DEFAULT_ACTION_TIMEOUT_MS); tmp = pcmk__xe_create(actions, PCMK_XE_ACTION); crm_xml_add(tmp, PCMK_XA_NAME, PCMK_ACTION_STOP); crm_xml_add(tmp, PCMK_META_TIMEOUT, timeout_str); tmp = pcmk__xe_create(actions, PCMK_XE_ACTION); crm_xml_add(tmp, PCMK_XA_NAME, PCMK_ACTION_START); crm_xml_add(tmp, PCMK_META_TIMEOUT, timeout_str); } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); // Fudge metadata so parameters are not required in config (pacemaker adds them) stonith_rhcs_parameter_not_required(xml, STONITH_ATTR_ACTION_OP); stonith_rhcs_parameter_not_required(xml, "plug"); stonith_rhcs_parameter_not_required(xml, "port"); if (metadata) { *metadata = xml; } else { pcmk__xml_free(xml); } return pcmk_ok; } /*! * \brief Retrieve metadata for RHCS-compatible fence agent * * \param[in] agent Agent to execute * \param[in] timeout_sec Action timeout * \param[out] output Where to store action output (or NULL to ignore) */ int stonith__rhcs_metadata(const char *agent, int timeout_sec, char **output) { GString *buffer = NULL; xmlNode *xml = NULL; int rc = stonith__rhcs_get_metadata(agent, timeout_sec, &xml); if (rc != pcmk_ok) { goto done; } buffer = g_string_sized_new(1024); pcmk__xml_string(xml, pcmk__xml_fmt_pretty|pcmk__xml_fmt_text, buffer, 0); if (pcmk__str_empty(buffer->str)) { rc = -pcmk_err_schema_validation; goto done; } if (output != NULL) { pcmk__str_update(output, buffer->str); } done: if (buffer != NULL) { g_string_free(buffer, TRUE); } pcmk__xml_free(xml); return rc; } bool stonith__agent_is_rhcs(const char *agent) { struct stat prop; char *buffer = crm_strdup_printf(PCMK__FENCE_BINDIR "/%s", agent); int rc = stat(buffer, &prop); free(buffer); return (rc >= 0) && S_ISREG(prop.st_mode); } int stonith__rhcs_validate(stonith_t *st, int call_options, const char *target, const char *agent, GHashTable *params, const char * host_arg, int timeout, char **output, char **error_output) { int rc = pcmk_ok; int remaining_timeout = timeout; xmlNode *metadata = NULL; stonith_action_t *action = NULL; pcmk__action_result_t *result = NULL; if (host_arg == NULL) { time_t start_time = time(NULL); rc = stonith__rhcs_get_metadata(agent, remaining_timeout, &metadata); if (rc == pcmk_ok) { uint32_t device_flags = 0; stonith__device_parameter_flags(&device_flags, agent, metadata); if (pcmk_is_set(device_flags, st_device_supports_parameter_port)) { host_arg = "port"; } else if (pcmk_is_set(device_flags, st_device_supports_parameter_plug)) { host_arg = "plug"; } } pcmk__xml_free(metadata); remaining_timeout -= time(NULL) - start_time; if (rc == -ETIME || remaining_timeout <= 0 ) { return -ETIME; } } else if (pcmk__str_eq(host_arg, PCMK_VALUE_NONE, pcmk__str_casei)) { host_arg = NULL; } action = stonith__action_create(agent, PCMK_ACTION_VALIDATE_ALL, target, 0, remaining_timeout, params, NULL, host_arg); rc = stonith__execute(action); result = stonith__action_result(action); if (result != NULL) { rc = pcmk_rc2legacy(stonith__result2rc(result)); // Take ownership of output so stonith__destroy_action() doesn't free it if (output != NULL) { *output = result->action_stdout; result->action_stdout = NULL; } if (error_output != NULL) { *error_output = result->action_stderr; result->action_stderr = NULL; } } stonith__destroy_action(action); return rc; } diff --git a/lib/pacemaker/pcmk_rule.c b/lib/pacemaker/pcmk_rule.c index 0107ab6da9..e5c14a1be3 100644 --- a/lib/pacemaker/pcmk_rule.c +++ b/lib/pacemaker/pcmk_rule.c @@ -1,212 +1,212 @@ /* * Copyright 2022-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include #include "libpacemaker_private.h" #define XPATH_NODE_RULE "//" PCMK_XE_RULE "[@" PCMK_XA_ID "='%s']" /*! * \internal * \brief Check whether a given rule is in effect * * \param[in] scheduler Scheduler data * \param[in] rule_id The ID of the rule to check * \param[out] error Where to store a rule evaluation error message * * \return Standard Pacemaker return code */ static int eval_rule(pcmk_scheduler_t *scheduler, const char *rule_id, const char **error) { xmlNodePtr cib_constraints = NULL; xmlNodePtr match = NULL; xmlXPathObject *xpath_obj = NULL; char *xpath = NULL; int rc = pcmk_rc_ok; int num_results = 0; *error = NULL; /* Rules are under the constraints node in the XML, so first find that. */ cib_constraints = pcmk_find_cib_element(scheduler->input, PCMK_XE_CONSTRAINTS); /* Get all rules matching the given ID that are also simple enough for us * to check. For the moment, these rules must only have a single * date_expression child and: * - Do not have a date_spec operation, or * - Have a date_spec operation that contains years= * * We do this in steps to provide better error messages. First, check that * there's any rule with the given ID. */ xpath = crm_strdup_printf(XPATH_NODE_RULE, rule_id); xpath_obj = pcmk__xpath_search(cib_constraints->doc, xpath); num_results = pcmk__xpath_num_results(xpath_obj); free(xpath); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); if (num_results == 0) { *error = "Rule not found"; return ENXIO; } if (num_results > 1) { // Should not be possible; schema prevents this *error = "Found more than one rule with matching ID"; return pcmk_rc_duplicate_id; } /* Next, make sure it has exactly one date_expression. */ xpath = crm_strdup_printf(XPATH_NODE_RULE "//date_expression", rule_id); xpath_obj = pcmk__xpath_search(cib_constraints->doc, xpath); num_results = pcmk__xpath_num_results(xpath_obj); free(xpath); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); if (num_results != 1) { if (num_results == 0) { *error = "Rule does not have a date expression"; } else { *error = "Rule has more than one date expression"; } return EOPNOTSUPP; } /* Then, check that it's something we actually support. */ xpath = crm_strdup_printf(XPATH_NODE_RULE "//" PCMK_XE_DATE_EXPRESSION "[@" PCMK_XA_OPERATION "!='" PCMK_VALUE_DATE_SPEC "']", rule_id); xpath_obj = pcmk__xpath_search(cib_constraints->doc, xpath); num_results = pcmk__xpath_num_results(xpath_obj); free(xpath); if (num_results == 0) { - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); xpath = crm_strdup_printf(XPATH_NODE_RULE "//" PCMK_XE_DATE_EXPRESSION "[@" PCMK_XA_OPERATION "='" PCMK_VALUE_DATE_SPEC "' " "and " PCMK_XE_DATE_SPEC "/@" PCMK_XA_YEARS "]", rule_id); xpath_obj = pcmk__xpath_search(cib_constraints->doc, xpath); num_results = pcmk__xpath_num_results(xpath_obj); free(xpath); if (num_results == 0) { - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); *error = "Rule must either not use " PCMK_XE_DATE_SPEC ", or use " PCMK_XE_DATE_SPEC " with " PCMK_XA_YEARS "="; return EOPNOTSUPP; } } match = pcmk__xpath_result(xpath_obj, 0); /* We should have ensured this with the xpath query above, but double- * checking can't hurt. */ pcmk__assert((match != NULL) && (pcmk__condition_type(match) == pcmk__condition_datetime)); rc = pcmk__evaluate_date_expression(match, scheduler->priv->now, NULL); if ((rc != pcmk_rc_ok) && (rc != pcmk_rc_within_range)) { // Malformed or missing *error = "Error parsing rule"; } - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); return rc; } /*! * \internal * \brief Check whether each rule in a list is in effect * * \param[in,out] out Output object * \param[in] input The CIB XML to check (if \c NULL, use current CIB) * \param[in] date Check whether the rule is in effect at this date and * time (if \c NULL, use current date and time) * \param[in] rule_ids The IDs of the rules to check, as a NULL- * terminated list. * * \return Standard Pacemaker return code */ int pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date, const char **rule_ids) { pcmk_scheduler_t *scheduler = NULL; int rc = pcmk_rc_ok; pcmk__assert(out != NULL); if (rule_ids == NULL) { // Trivial case; every rule specified is in effect return pcmk_rc_ok; } rc = pcmk__init_scheduler(out, input, date, &scheduler); if (rc != pcmk_rc_ok) { return rc; } for (const char **rule_id = rule_ids; *rule_id != NULL; rule_id++) { const char *error = NULL; int last_rc = eval_rule(scheduler, *rule_id, &error); out->message(out, "rule-check", *rule_id, last_rc, error); if (last_rc != pcmk_rc_ok) { rc = last_rc; } } pcmk_free_scheduler(scheduler); return rc; } // Documented in pacemaker.h int pcmk_check_rules(xmlNodePtr *xml, xmlNodePtr input, const crm_time_t *date, const char **rule_ids) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; rc = pcmk__xml_output_new(&out, xml); if (rc != pcmk_rc_ok) { return rc; } pcmk__register_lib_messages(out); rc = pcmk__check_rules(out, input, date, rule_ids); pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml); return rc; } diff --git a/lib/pacemaker/tests/pcmk_ticket/pcmk__get_ticket_state_test.c b/lib/pacemaker/tests/pcmk_ticket/pcmk__get_ticket_state_test.c index 676c3b0bef..cba881968c 100644 --- a/lib/pacemaker/tests/pcmk_ticket/pcmk__get_ticket_state_test.c +++ b/lib/pacemaker/tests/pcmk_ticket/pcmk__get_ticket_state_test.c @@ -1,184 +1,184 @@ /* * Copyright 2024-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include static char *cib_path = NULL; static void cib_not_connected(void **state) { xmlNode *xml = NULL; cib_t *cib = cib_new(); /* Without any special setup, cib_new() here will use the native CIB which * means IPC calls. But there's nothing listening for those calls, so * signon() will return ENOTCONN. Check that we handle that. */ assert_int_equal(pcmk__get_ticket_state(cib, "ticketA", &xml), ENOTCONN); cib__clean_up_connection(&cib); } static int setup_test(void **state) { cib_path = pcmk__cib_test_copy_cib("tickets.xml"); if (cib_path == NULL) { return -1; } return 0; } static int teardown_test(void **state) { pcmk__cib_test_cleanup(cib_path); cib_path = NULL; return 0; } static void bad_arguments(void **state) { xmlNode *xml = NULL; cib_t *cib = cib_new(); cib->cmds->signon(cib, crm_system_name, cib_command); pcmk__assert_asserts(pcmk__get_ticket_state(NULL, "ticketA", &xml)); pcmk__assert_asserts(pcmk__get_ticket_state(cib, "ticketA", NULL)); cib__clean_up_connection(&cib); } static void unknown_ticket(void **state) { xmlNode *xml = NULL; cib_t *cib = cib_new(); cib->cmds->signon(cib, crm_system_name, cib_command); assert_int_equal(pcmk__get_ticket_state(cib, "XYZ", &xml), ENXIO); pcmk__xml_free(xml); cib__clean_up_connection(&cib); } static void ticket_exists(void **state) { xmlNode *xml = NULL; xmlXPathObject *xpath_obj = NULL; cib_t *cib = cib_new(); cib->cmds->signon(cib, crm_system_name, cib_command); assert_int_equal(pcmk__get_ticket_state(cib, "ticketA", &xml), pcmk_rc_ok); /* Verify that the XML result has only one , and that its ID is * what we asked for. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK__XE_TICKET_STATE "[@" PCMK_XA_ID "=\"ticketA\"]"); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 1); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); cib__clean_up_connection(&cib); } static void multiple_tickets(void **state) { xmlNode *xml = NULL; xmlNode *ticket_node = NULL; xmlXPathObject *xpath_obj = NULL; cib_t *cib = cib_new(); cib->cmds->signon(cib, crm_system_name, cib_command); assert_int_equal(pcmk__get_ticket_state(cib, NULL, &xml), pcmk_rc_ok); /* Verify that the XML result has four elements, and that their * IDs are as expected. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK__XE_TICKET_STATE); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 4); ticket_node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketA"); ticket_node = pcmk__xpath_result(xpath_obj, 1); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketB"); ticket_node = pcmk__xpath_result(xpath_obj, 2); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketC"); ticket_node = pcmk__xpath_result(xpath_obj, 3); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketC"); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); cib__clean_up_connection(&cib); } static void duplicate_tickets(void **state) { xmlNode *xml = NULL; xmlXPathObject *xpath_obj = NULL; cib_t *cib = cib_new(); cib->cmds->signon(cib, crm_system_name, cib_command); assert_int_equal(pcmk__get_ticket_state(cib, "ticketC", &xml), pcmk_rc_duplicate_id); /* Verify that the XML result has two elements, and that their * IDs are as expected. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK__XE_TICKET_STATE "[@" PCMK_XA_ID "=\"ticketC\"]"); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 2); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); cib__clean_up_connection(&cib); } /* There are two kinds of tests in this file: * * (1) Those that test what happens if the CIB is not set up correctly, and * (2) Those that test what happens when run against a CIB. * * Therefore, we need two kinds of setup/teardown functions. We only do * minimal overall setup for the entire group, and then setup the CIB for * those tests that need it. */ PCMK__UNIT_TEST(pcmk__xml_test_setup_group, pcmk__xml_test_teardown_group, cmocka_unit_test_setup_teardown(cib_not_connected, setup_test, teardown_test), cmocka_unit_test_setup_teardown(bad_arguments, setup_test, teardown_test), cmocka_unit_test_setup_teardown(unknown_ticket, setup_test, teardown_test), cmocka_unit_test_setup_teardown(ticket_exists, setup_test, teardown_test), cmocka_unit_test_setup_teardown(multiple_tickets, setup_test, teardown_test), cmocka_unit_test_setup_teardown(duplicate_tickets, setup_test, teardown_test)) diff --git a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_constraints_test.c b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_constraints_test.c index d816108565..4f4048061d 100644 --- a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_constraints_test.c +++ b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_constraints_test.c @@ -1,136 +1,136 @@ /* * Copyright 2024-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. static char *cib_path = NULL; static void cib_not_connected(void **state) { xmlNode *xml = NULL; /* Without any special setup, cib_new() in pcmk_ticket_constraints will use the * native CIB which means IPC calls. But there's nothing listening for those * calls, so signon() will return ENOTCONN. Check that we handle that. */ assert_int_equal(pcmk_ticket_constraints(&xml, NULL), ENOTCONN); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static int setup_test(void **state) { cib_path = pcmk__cib_test_copy_cib("tickets.xml"); if (cib_path == NULL) { return -1; } return 0; } static int teardown_test(void **state) { pcmk__cib_test_cleanup(cib_path); cib_path = NULL; return 0; } static void invalid_argument(void **state) { assert_int_equal(pcmk_ticket_constraints(NULL, "ticketA"), EINVAL); } static void unknown_ticket(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_constraints(&xml, "XYZ"), ENXIO); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static void ticket_exists(void **state) { xmlNode *xml = NULL; xmlXPathObject *xpath_obj = NULL; assert_int_equal(pcmk_ticket_constraints(&xml, "ticketA"), pcmk_rc_ok); pcmk__assert_validates(xml); /* Verify that the XML result has only one , and that its ID is * what we asked for. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketA\"]"); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 1); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } static void multiple_tickets(void **state) { xmlNode *xml = NULL; xmlNode *ticket_node = NULL; xmlXPathObject *xpath_obj = NULL; assert_int_equal(pcmk_ticket_constraints(&xml, NULL), pcmk_rc_ok); pcmk__assert_validates(xml); /* Verify that the XML result has two elements, and that their * IDs are as expected. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 2); ticket_node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketA"); ticket_node = pcmk__xpath_result(xpath_obj, 1); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketB"); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } /* There are two kinds of tests in this file: * * (1) Those that test what happens if the CIB is not set up correctly, and * (2) Those that test what happens when run against a CIB. * * Therefore, we need two kinds of setup/teardown functions. We only do * minimal overall setup for the entire group, and then setup the CIB for * those tests that need it. */ PCMK__UNIT_TEST(pcmk__xml_test_setup_group, pcmk__xml_test_teardown_group, cmocka_unit_test(cib_not_connected), cmocka_unit_test_setup_teardown(invalid_argument, setup_test, teardown_test), cmocka_unit_test_setup_teardown(unknown_ticket, setup_test, teardown_test), cmocka_unit_test_setup_teardown(ticket_exists, setup_test, teardown_test), cmocka_unit_test_setup_teardown(multiple_tickets, setup_test, teardown_test)) diff --git a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_get_attr_test.c b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_get_attr_test.c index 24c77d6259..0008fec821 100644 --- a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_get_attr_test.c +++ b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_get_attr_test.c @@ -1,157 +1,157 @@ /* * Copyright 2024-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include static char *cib_path = NULL; static int setup_test(void **state) { cib_path = pcmk__cib_test_copy_cib("tickets.xml"); if (cib_path == NULL) { return -1; } return 0; } static int teardown_test(void **state) { pcmk__cib_test_cleanup(cib_path); cib_path = NULL; return 0; } static void bad_arguments(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_get_attr(NULL, "ticketA", "XYZ", NULL), EINVAL); assert_int_equal(pcmk_ticket_get_attr(&xml, NULL, "attrA", NULL), EINVAL); pcmk__assert_validates(xml); pcmk__xml_free(xml); xml = NULL; assert_int_equal(pcmk_ticket_get_attr(&xml, "ticketA", NULL, NULL), EINVAL); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static void unknown_ticket(void **state) { xmlNode *xml = NULL; /* Both an unknown ticket and an unknown attribute on a known ticket * return ENXIO so we can't really differentiate between the two here. * Still, we'd better test both. */ assert_int_equal(pcmk_ticket_get_attr(&xml, "XYZ", "attrA", NULL), ENXIO); pcmk__assert_validates(xml); pcmk__xml_free(xml); xml = NULL; assert_int_equal(pcmk_ticket_get_attr(&xml, "ticketA", "XYZ", NULL), ENXIO); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static void verify_results(xmlNode *xml, const char *ticket_id, const char *attr_name, const char *attr_value) { xmlNode *node = NULL; xmlXPathObject *xpath_obj = NULL; /* Verify that the XML result has only one , and that its ID is * what we asked for. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 1); node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(node, PCMK_XA_ID), ticket_id); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); /* Verify that it has an child whose name and value are what * we expect. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "/" PCMK_XE_ATTRIBUTE); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 1); node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(node, PCMK_XA_NAME), attr_name); assert_string_equal(crm_element_value(node, PCMK_XA_VALUE), attr_value); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); } static void attribute_exists(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_get_attr(&xml, "ticketA", "owner", NULL), pcmk_rc_ok); pcmk__assert_validates(xml); verify_results(xml, "ticketA", "owner", "1"); pcmk__xml_free(xml); } static void default_no_ticket(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_get_attr(&xml, "ticketX", "ABC", "DEFAULT"), pcmk_rc_ok); pcmk__assert_validates(xml); verify_results(xml, "ticketX", "ABC", "DEFAULT"); pcmk__xml_free(xml); } static void default_no_attribute(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_get_attr(&xml, "ticketA", "ABC", "DEFAULT"), pcmk_rc_ok); pcmk__assert_validates(xml); verify_results(xml, "ticketA", "ABC", "DEFAULT"); pcmk__xml_free(xml); } PCMK__UNIT_TEST(pcmk__xml_test_setup_group, pcmk__xml_test_teardown_group, cmocka_unit_test_setup_teardown(bad_arguments, setup_test, teardown_test), cmocka_unit_test_setup_teardown(unknown_ticket, setup_test, teardown_test), cmocka_unit_test_setup_teardown(attribute_exists, setup_test, teardown_test), cmocka_unit_test_setup_teardown(default_no_ticket, setup_test, teardown_test), cmocka_unit_test_setup_teardown(default_no_attribute, setup_test, teardown_test)) diff --git a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_info_test.c b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_info_test.c index cc91267139..b45a6a04a5 100644 --- a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_info_test.c +++ b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_info_test.c @@ -1,150 +1,150 @@ /* * Copyright 2024-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. static char *cib_path = NULL; static int setup_test(void **state) { cib_path = pcmk__cib_test_copy_cib("tickets.xml"); if (cib_path == NULL) { return -1; } return 0; } static int teardown_test(void **state) { pcmk__cib_test_cleanup(cib_path); cib_path = NULL; return 0; } static void bad_arguments(void **state) { assert_int_equal(pcmk_ticket_info(NULL, "ticketA"), EINVAL); } static void unknown_ticket(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_info(&xml, "XYZ"), ENXIO); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static void all_tickets(void **state) { xmlNode *node = NULL; xmlXPathObject *xpath_obj = NULL; xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_info(&xml, NULL), pcmk_rc_ok); pcmk__assert_validates(xml); /* Verify that the XML result has three elements, with the attributes * we expect. The input has four tickets, but when they are loaded into the * scheduler's hash table, the duplicate IDs will collide leaving us with * three. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 3); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketA\"]"); node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(node, PCMK_XA_STATUS), PCMK_VALUE_REVOKED); assert_string_equal(crm_element_value(node, PCMK__XA_GRANTED), "false"); assert_string_equal(crm_element_value(node, PCMK_XA_STANDBY), PCMK_VALUE_FALSE); assert_string_equal(crm_element_value(node, "owner"), "1"); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketB\"]"); node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(node, PCMK_XA_STATUS), PCMK_VALUE_GRANTED); assert_string_equal(crm_element_value(node, PCMK__XA_GRANTED), "true"); assert_string_equal(crm_element_value(node, PCMK_XA_STANDBY), PCMK_VALUE_FALSE); assert_null(crm_element_value(node, "owner")); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketC\"]"); node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(node, PCMK_XA_STATUS), PCMK_VALUE_GRANTED); assert_string_equal(crm_element_value(node, PCMK__XA_GRANTED), "true"); assert_string_equal(crm_element_value(node, PCMK_XA_STANDBY), PCMK_VALUE_FALSE); assert_null(crm_element_value(node, "owner")); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } static void single_ticket(void **state) { xmlNode *node = NULL; xmlXPathObject *xpath_obj = NULL; xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_info(&xml, "ticketA"), pcmk_rc_ok); pcmk__assert_validates(xml); /* Verify that the XML result has only one , with the attributes * we expect. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketA\"]"); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 1); node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(node, PCMK_XA_STATUS), PCMK_VALUE_REVOKED); assert_string_equal(crm_element_value(node, PCMK__XA_GRANTED), "false"); assert_string_equal(crm_element_value(node, PCMK_XA_STANDBY), PCMK_VALUE_FALSE); assert_string_equal(crm_element_value(node, "owner"), "1"); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } PCMK__UNIT_TEST(pcmk__xml_test_setup_group, pcmk__xml_test_teardown_group, cmocka_unit_test_setup_teardown(bad_arguments, setup_test, teardown_test), cmocka_unit_test_setup_teardown(unknown_ticket, setup_test, teardown_test), cmocka_unit_test_setup_teardown(all_tickets, setup_test, teardown_test), cmocka_unit_test_setup_teardown(single_ticket, setup_test, teardown_test)) diff --git a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_state_test.c b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_state_test.c index d4af87c1a7..bc7945afdc 100644 --- a/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_state_test.c +++ b/lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_state_test.c @@ -1,164 +1,164 @@ /* * Copyright 2024-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. static char *cib_path = NULL; static void cib_not_connected(void **state) { xmlNode *xml = NULL; /* Without any special setup, cib_new() in pcmk_ticket_state will use the * native CIB which means IPC calls. But there's nothing listening for those * calls, so signon() will return ENOTCONN. Check that we handle that. */ assert_int_equal(pcmk_ticket_state(&xml, "ticketA"), ENOTCONN); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static int setup_test(void **state) { cib_path = pcmk__cib_test_copy_cib("tickets.xml"); if (cib_path == NULL) { return -1; } return 0; } static int teardown_test(void **state) { pcmk__cib_test_cleanup(cib_path); cib_path = NULL; return 0; } static void bad_arguments(void **state) { assert_int_equal(pcmk_ticket_state(NULL, "ticketA"), EINVAL); } static void unknown_ticket(void **state) { xmlNode *xml = NULL; assert_int_equal(pcmk_ticket_state(&xml, "XYZ"), ENXIO); pcmk__assert_validates(xml); pcmk__xml_free(xml); } static void ticket_exists(void **state) { xmlNode *xml = NULL; xmlXPathObject *xpath_obj = NULL; assert_int_equal(pcmk_ticket_state(&xml, "ticketA"), pcmk_rc_ok); pcmk__assert_validates(xml); /* Verify that the XML result has only one , and that its ID is * what we asked for. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketA\"]"); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 1); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } static void multiple_tickets(void **state) { xmlNode *xml = NULL; xmlNode *ticket_node = NULL; xmlXPathObject *xpath_obj = NULL; assert_int_equal(pcmk_ticket_state(&xml, NULL), pcmk_rc_ok); pcmk__assert_validates(xml); /* Verify that the XML result has four elements, and that their * IDs are as expected. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 4); ticket_node = pcmk__xpath_result(xpath_obj, 0); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketA"); ticket_node = pcmk__xpath_result(xpath_obj, 1); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketB"); ticket_node = pcmk__xpath_result(xpath_obj, 2); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketC"); ticket_node = pcmk__xpath_result(xpath_obj, 3); assert_string_equal(crm_element_value(ticket_node, PCMK_XA_ID), "ticketC"); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } static void duplicate_tickets(void **state) { xmlNode *xml = NULL; xmlXPathObject *xpath_obj = NULL; assert_int_equal(pcmk_ticket_state(&xml, "ticketC"), pcmk_rc_duplicate_id); /* Verify that the XML result has two elements, and that their * IDs are as expected. */ xpath_obj = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PACEMAKER_RESULT "/" PCMK_XE_TICKETS "/" PCMK_XE_TICKET "[@" PCMK_XA_ID "=\"ticketC\"]"); assert_int_equal(pcmk__xpath_num_results(xpath_obj), 2); - freeXpathObject(xpath_obj); + xmlXPathFreeObject(xpath_obj); pcmk__xml_free(xml); } /* There are two kinds of tests in this file: * * (1) Those that test what happens if the CIB is not set up correctly, and * (2) Those that test what happens when run against a CIB. * * Therefore, we need two kinds of setup/teardown functions. We only do * minimal overall setup for the entire group, and then setup the CIB for * those tests that need it. */ PCMK__UNIT_TEST(pcmk__xml_test_setup_group, pcmk__xml_test_teardown_group, cmocka_unit_test(cib_not_connected), cmocka_unit_test_setup_teardown(bad_arguments, setup_test, teardown_test), cmocka_unit_test_setup_teardown(unknown_ticket, setup_test, teardown_test), cmocka_unit_test_setup_teardown(ticket_exists, setup_test, teardown_test), cmocka_unit_test_setup_teardown(multiple_tickets, setup_test, teardown_test), cmocka_unit_test_setup_teardown(duplicate_tickets, setup_test, teardown_test)) diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c index 1686df01c4..4f3f328191 100644 --- a/lib/pengine/failcounts.c +++ b/lib/pengine/failcounts.c @@ -1,482 +1,482 @@ /* * Copyright 2008-2025 the Pacemaker project contributors * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include static gboolean is_matched_failure(const char *rsc_id, const xmlNode *conf_op_xml, const xmlNode *lrm_op_xml) { gboolean matched = FALSE; const char *conf_op_name = NULL; const char *lrm_op_task = NULL; const char *conf_op_interval_spec = NULL; guint conf_op_interval_ms = 0; guint lrm_op_interval_ms = 0; const char *lrm_op_id = NULL; char *last_failure_key = NULL; if (rsc_id == NULL || conf_op_xml == NULL || lrm_op_xml == NULL) { return FALSE; } // Get name and interval from configured op conf_op_name = crm_element_value(conf_op_xml, PCMK_XA_NAME); conf_op_interval_spec = crm_element_value(conf_op_xml, PCMK_META_INTERVAL); pcmk_parse_interval_spec(conf_op_interval_spec, &conf_op_interval_ms); // Get name and interval from op history entry lrm_op_task = crm_element_value(lrm_op_xml, PCMK_XA_OPERATION); crm_element_value_ms(lrm_op_xml, PCMK_META_INTERVAL, &lrm_op_interval_ms); if ((conf_op_interval_ms != lrm_op_interval_ms) || !pcmk__str_eq(conf_op_name, lrm_op_task, pcmk__str_casei)) { return FALSE; } lrm_op_id = pcmk__xe_id(lrm_op_xml); last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0); if (pcmk__str_eq(last_failure_key, lrm_op_id, pcmk__str_casei)) { matched = TRUE; } else { char *expected_op_key = pcmk__op_key(rsc_id, conf_op_name, conf_op_interval_ms); if (pcmk__str_eq(expected_op_key, lrm_op_id, pcmk__str_casei)) { int rc = 0; int target_rc = pe__target_rc_from_xml(lrm_op_xml); crm_element_value_int(lrm_op_xml, PCMK__XA_RC_CODE, &rc); if (rc != target_rc) { matched = TRUE; } } free(expected_op_key); } free(last_failure_key); return matched; } static gboolean block_failure(const pcmk_node_t *node, pcmk_resource_t *rsc, const xmlNode *xml_op) { char *xml_name = clone_strip(rsc->id); /* @TODO This xpath search occurs after template expansion, but it is unable * to properly detect on-fail in id-ref, operation meta-attributes, or * op_defaults, or evaluate rules. * * Also, PCMK_META_ON_FAIL defaults to PCMK_VALUE_BLOCK (in * unpack_operation()) for stop actions when stonith is disabled. * * Ideally, we'd unpack the operation before this point, and pass in a * meta-attributes table that takes all that into consideration. */ char *xpath = crm_strdup_printf("//" PCMK_XE_PRIMITIVE "[@" PCMK_XA_ID "='%s']" "//" PCMK_XE_OP "[@" PCMK_META_ON_FAIL "='" PCMK_VALUE_BLOCK "']", xml_name); xmlXPathObject *xpathObj = pcmk__xpath_search(rsc->priv->xml->doc, xpath); gboolean should_block = FALSE; free(xpath); if (xpathObj) { int max = pcmk__xpath_num_results(xpathObj); int lpc = 0; for (lpc = 0; lpc < max; lpc++) { xmlNode *pref = pcmk__xpath_result(xpathObj, lpc); if (xml_op) { should_block = is_matched_failure(xml_name, pref, xml_op); if (should_block) { break; } } else { const char *conf_op_name = NULL; const char *conf_op_interval_spec = NULL; guint conf_op_interval_ms = 0; pcmk_scheduler_t *scheduler = rsc->priv->scheduler; char *lrm_op_xpath = NULL; xmlXPathObject *lrm_op_xpathObj = NULL; // Get name and interval from configured op conf_op_name = crm_element_value(pref, PCMK_XA_NAME); conf_op_interval_spec = crm_element_value(pref, PCMK_META_INTERVAL); pcmk_parse_interval_spec(conf_op_interval_spec, &conf_op_interval_ms); #define XPATH_FMT "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" \ "//" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" \ "/" PCMK__XE_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='%s']" \ "[@" PCMK_META_INTERVAL "='%u']" lrm_op_xpath = crm_strdup_printf(XPATH_FMT, node->priv->name, xml_name, conf_op_name, conf_op_interval_ms); lrm_op_xpathObj = pcmk__xpath_search(scheduler->input->doc, lrm_op_xpath); free(lrm_op_xpath); if (lrm_op_xpathObj) { int max2 = pcmk__xpath_num_results(lrm_op_xpathObj); int lpc2 = 0; for (lpc2 = 0; lpc2 < max2; lpc2++) { xmlNode *lrm_op_xml = NULL; lrm_op_xml = pcmk__xpath_result(lrm_op_xpathObj, lpc2); should_block = is_matched_failure(xml_name, pref, lrm_op_xml); if (should_block) { break; } } } - freeXpathObject(lrm_op_xpathObj); + xmlXPathFreeObject(lrm_op_xpathObj); if (should_block) { break; } } } } free(xml_name); - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); return should_block; } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(const pcmk_resource_t *rsc) { const char *name = pcmk__s(rsc->priv->history_id, rsc->id); return pcmk_is_set(rsc->flags, pcmk__rsc_unique)? strdup(name) : clone_strip(name); } /*! * \internal * \brief Compile regular expression to match a failure-related node attribute * * \param[in] prefix Attribute prefix to match * \param[in] rsc_name Resource name to match as used in failure attributes * \param[in] is_unique Whether the resource is a globally unique clone * \param[out] re Where to store resulting regular expression * * \return Standard Pacemaker return code * \note Fail attributes are named like PREFIX-RESOURCE#OP_INTERVAL. * The caller is responsible for freeing re with regfree(). */ static int generate_fail_regex(const char *prefix, const char *rsc_name, bool is_unique, regex_t *re) { char *pattern = NULL; const char *op_pattern = "#.+_[0-9]+"; /* Ignore instance numbers for anything other than globally unique clones. * Anonymous clone fail counts could contain an instance number if the * clone was initially unique, failed, then was converted to anonymous. */ const char *instance_pattern = (is_unique? "" : "(:[0-9]+)?"); pattern = crm_strdup_printf("^%s-%s%s%s$", prefix, rsc_name, instance_pattern, op_pattern); if (regcomp(re, pattern, REG_EXTENDED|REG_NOSUB) != 0) { free(pattern); return EINVAL; } free(pattern); return pcmk_rc_ok; } /*! * \internal * \brief Compile regular expressions to match failure-related node attributes * * \param[in] rsc Resource being checked for failures * \param[out] failcount_re Storage for regular expression for fail count * \param[out] lastfailure_re Storage for regular expression for last failure * * \return Standard Pacemaker return code * \note On success, the caller is responsible for freeing the expressions with * regfree(). */ static int generate_fail_regexes(const pcmk_resource_t *rsc, regex_t *failcount_re, regex_t *lastfailure_re) { int rc = pcmk_rc_ok; char *rsc_name = rsc_fail_name(rsc); if (generate_fail_regex(PCMK__FAIL_COUNT_PREFIX, rsc_name, pcmk_is_set(rsc->flags, pcmk__rsc_unique), failcount_re) != pcmk_rc_ok) { rc = EINVAL; } else if (generate_fail_regex(PCMK__LAST_FAILURE_PREFIX, rsc_name, pcmk_is_set(rsc->flags, pcmk__rsc_unique), lastfailure_re) != pcmk_rc_ok) { rc = EINVAL; regfree(failcount_re); } free(rsc_name); return rc; } // Data for fail-count-related iterators struct failcount_data { const pcmk_node_t *node;// Node to check for fail count pcmk_resource_t *rsc; // Resource to check for fail count uint32_t flags; // Fail count flags const xmlNode *xml_op; // History entry for expiration purposes (or NULL) regex_t failcount_re; // Fail count regular expression to match regex_t lastfailure_re; // Last failure regular expression to match int failcount; // Fail count so far time_t last_failure; // Time of most recent failure so far }; /*! * \internal * \brief Update fail count and last failure appropriately for a node attribute * * \param[in] key Node attribute name * \param[in] value Node attribute value * \param[in] user_data Fail count data to update */ static void update_failcount_for_attr(gpointer key, gpointer value, gpointer user_data) { struct failcount_data *fc_data = user_data; // If this is a matching fail count attribute, update fail count if (regexec(&(fc_data->failcount_re), (const char *) key, 0, NULL, 0) == 0) { int score = 0; int rc = pcmk_parse_score(value, &score, 0); if (rc != pcmk_rc_ok) { crm_warn("Ignoring %s for %s " "because '%s' is not a valid fail count: %s", (const char *) key, pcmk__node_name(fc_data->node), value, pcmk_rc_str(rc)); return; } fc_data->failcount = pcmk__add_scores(fc_data->failcount, score); pcmk__rsc_trace(fc_data->rsc, "Added %s (%s) to %s fail count (now %s)", (const char *) key, (const char *) value, fc_data->rsc->id, pcmk_readable_score(fc_data->failcount)); return; } // If this is a matching last failure attribute, update last failure if (regexec(&(fc_data->lastfailure_re), (const char *) key, 0, NULL, 0) == 0) { long long last_ll; int rc = pcmk__scan_ll(value, &last_ll, 0LL); if (rc != pcmk_rc_ok) { crm_info("Ignoring invalid value '%s' for %s: %s", (const char *) value, (const char *) key, pcmk_rc_str(rc)); return; } fc_data->last_failure = (time_t) QB_MAX(fc_data->last_failure, last_ll); } } /*! * \internal * \brief Update fail count and last failure appropriately for launched resource * * \param[in] data Launched resource * \param[in] user_data Fail count data to update */ static void update_launched_failcount(gpointer data, gpointer user_data) { pcmk_resource_t *launched = data; struct failcount_data *fc_data = user_data; time_t launched_last_failure = 0; fc_data->failcount += pe_get_failcount(fc_data->node, launched, &launched_last_failure, fc_data->flags, fc_data->xml_op); fc_data->last_failure = QB_MAX(fc_data->last_failure, launched_last_failure); } #define readable_expiration(rsc) \ pcmk__readable_interval((rsc)->priv->failure_expiration_ms) /*! * \internal * \brief Get a resource's fail count on a node * * \param[in] node Node to check * \param[in,out] rsc Resource to check * \param[out] last_failure If not NULL, where to set time of most recent * failure of \p rsc on \p node * \param[in] flags Group of enum pcmk__fc_flags * \param[in] xml_op If not NULL, consider only the action in this * history entry when determining whether on-fail * is configured as "blocked", otherwise consider * all actions configured for \p rsc * * \return Fail count for \p rsc on \p node according to \p flags */ int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op) { struct failcount_data fc_data = { .node = node, .rsc = rsc, .flags = flags, .xml_op = xml_op, .failcount = 0, .last_failure = (time_t) 0, }; // Calculate resource failcount as sum of all matching operation failcounts CRM_CHECK(generate_fail_regexes(rsc, &fc_data.failcount_re, &fc_data.lastfailure_re) == pcmk_rc_ok, return 0); g_hash_table_foreach(node->priv->attrs, update_failcount_for_attr, &fc_data); regfree(&(fc_data.failcount_re)); regfree(&(fc_data.lastfailure_re)); // If failure blocks the resource, disregard any failure timeout if ((fc_data.failcount > 0) && (rsc->priv->failure_expiration_ms > 0) && block_failure(node, rsc, xml_op)) { pcmk__config_warn("Ignoring failure timeout (%s) for %s " "because it conflicts with " PCMK_META_ON_FAIL "=" PCMK_VALUE_BLOCK, readable_expiration(rsc), rsc->id); rsc->priv->failure_expiration_ms = 0; } // If all failures have expired, ignore fail count if (pcmk_is_set(flags, pcmk__fc_effective) && (fc_data.failcount > 0) && (fc_data.last_failure > 0) && (rsc->priv->failure_expiration_ms > 0)) { time_t now = pcmk__scheduler_epoch_time(rsc->priv->scheduler); const guint expiration = pcmk__timeout_ms2s(rsc->priv->failure_expiration_ms); if (now > (fc_data.last_failure + expiration)) { pcmk__rsc_debug(rsc, "Failcount for %s on %s expired after %s", rsc->id, pcmk__node_name(node), readable_expiration(rsc)); fc_data.failcount = 0; } } /* Add the fail count of any launched resources, except that we never want * the fail counts of a bundle container's launched resources to count * towards the container's fail count. * * Most importantly, a Pacemaker Remote connection to a bundle container * is launched by the container, but can reside on a different node than the * container itself. Counting its fail count on its node towards the * container's fail count on that node could lead to attempting to stop the * container on the wrong node. */ if (pcmk_is_set(flags, pcmk__fc_launched) && (rsc->priv->launched != NULL) && !pcmk__is_bundled(rsc)) { g_list_foreach(rsc->priv->launched, update_launched_failcount, &fc_data); if (fc_data.failcount > 0) { pcmk__rsc_info(rsc, "Container %s and the resources within it " "have failed %s time%s on %s", rsc->id, pcmk_readable_score(fc_data.failcount), pcmk__plural_s(fc_data.failcount), pcmk__node_name(node)); } } else if (fc_data.failcount > 0) { pcmk__rsc_info(rsc, "%s has failed %s time%s on %s", rsc->id, pcmk_readable_score(fc_data.failcount), pcmk__plural_s(fc_data.failcount), pcmk__node_name(node)); } if (last_failure != NULL) { if ((fc_data.failcount > 0) && (fc_data.last_failure > 0)) { *last_failure = fc_data.last_failure; } else { *last_failure = 0; } } return fc_data.failcount; } /*! * \brief Schedule a controller operation to clear a fail count * * \param[in,out] rsc Resource with failure * \param[in] node Node failure occurred on * \param[in] reason Readable description why needed (for logging) * \param[in,out] scheduler Scheduler data cluster * * \return Scheduled action */ pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler) { char *key = NULL; pcmk_action_t *clear = NULL; CRM_CHECK(rsc && node && reason && scheduler, return NULL); key = pcmk__op_key(rsc->id, PCMK_ACTION_CLEAR_FAILCOUNT, 0); clear = custom_action(rsc, key, PCMK_ACTION_CLEAR_FAILCOUNT, node, FALSE, scheduler); pcmk__insert_meta(clear, PCMK__META_OP_NO_WAIT, PCMK_VALUE_TRUE); crm_notice("Clearing failure of %s on %s because %s " QB_XS " %s", rsc->id, pcmk__node_name(node), reason, clear->uuid); return clear; } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index aba85e2291..25d3a67a3e 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,5112 +1,5112 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); // A (parsed) resource action history entry struct action_history { pcmk_resource_t *rsc; // Resource that history is for pcmk_node_t *node; // Node that history is for xmlNode *xml; // History entry XML // Parsed from entry XML const char *id; // XML ID of history entry const char *key; // Operation key of action const char *task; // Action name const char *exit_reason; // Exit reason given for result guint interval_ms; // Action interval int call_id; // Call ID of action int expected_exit_status; // Expected exit status of action int exit_status; // Actual exit status of action int execution_status; // Execution status of action }; /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than * use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the * flag is stringified more readably in log messages. */ #define set_config_flag(scheduler, option, flag) do { \ GHashTable *config_hash = (scheduler)->priv->options; \ const char *scf_value = pcmk__cluster_option(config_hash, (option)); \ \ if (scf_value != NULL) { \ if (crm_is_true(scf_value)) { \ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } else { \ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } \ } \ } while(0) static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum pcmk__on_fail *failed); static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node); static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler); static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler); static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler); /*! * \internal * \brief Check whether a node is a dangling guest node * * \param[in] node Node to check * * \return true if \p node had a Pacemaker Remote connection resource with a * launcher that was removed from the CIB, otherwise false. */ static bool is_dangling_guest_node(pcmk_node_t *node) { return pcmk__is_pacemaker_remote_node(node) && (node->priv->remote != NULL) && (node->priv->remote->priv->launcher == NULL) && pcmk_is_set(node->priv->remote->flags, pcmk__rsc_removed_launched); } /*! * \brief Schedule a fence action for a node * * \param[in,out] scheduler Scheduler data * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed * \param[in] priority_delay Whether to consider * \c PCMK_OPT_PRIORITY_FENCING_DELAY */ void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay) { CRM_CHECK(node, return); if (pcmk__is_guest_or_bundle_node(node)) { // Fence a guest or bundle node by marking its launcher as failed pcmk_resource_t *rsc = node->priv->remote->priv->launcher; if (!pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", pcmk__node_name(node), reason, rsc->id); } else { pcmk__sched_warn(scheduler, "Guest node %s will be fenced " "(by recovering its guest resource %s): %s", pcmk__node_name(node), rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ pcmk__set_node_flags(node, pcmk__node_remote_reset); pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", pcmk__node_name(node), reason); pcmk__set_rsc_flags(node->priv->remote, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); } else if (pcmk__is_remote_node(node)) { pcmk_resource_t *rsc = node->priv->remote; if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", pcmk__node_name(node), reason); } else if (!pcmk_is_set(node->priv->flags, pcmk__node_remote_reset)) { pcmk__set_node_flags(node, pcmk__node_remote_reset); pcmk__sched_warn(scheduler, "Remote node %s %s: %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; // No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean", reason); } else { pcmk__sched_warn(scheduler, "Cluster node %s %s: %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \ "[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \ "or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \ "and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \ "//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \ "|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \ "/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler) { xmlXPathObject *result = NULL; if (!pcmk_is_set(scheduler->flags, flag)) { result = pcmk__xpath_search(scheduler->input->doc, xpath); if (pcmk__xpath_num_results(result) > 0) { pcmk__set_scheduler_flags(scheduler, flag); } - freeXpathObject(result); + xmlXPathFreeObject(result); } } gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler) { const char *value = NULL; GHashTable *config_hash = pcmk__strkey_table(free, free); const pcmk_rule_input_t rule_input = { .now = scheduler->priv->now, }; scheduler->priv->options = config_hash; pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_input, config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, scheduler); pcmk__validate_cluster_options(config_hash); set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES, pcmk__sched_probe_resources); if (!pcmk_is_set(scheduler->flags, pcmk__sched_probe_resources)) { crm_info("Startup probes: disabled (dangerous)"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_info("Watchdog-based self-fencing will be performed via SBD if " "fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " is nonzero"); pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_fencing); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pcmk__sched_enable_unfencing, XPATH_ENABLE_UNFENCING, scheduler); value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT); pcmk_parse_interval_spec(value, &(scheduler->priv->fence_timeout_ms)); crm_debug("Default fencing action timeout: %s", pcmk__readable_interval(scheduler->priv->fence_timeout_ms)); set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED, pcmk__sched_fencing_enabled); if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { crm_debug("STONITH of failed nodes is enabled"); } else { crm_debug("STONITH of failed nodes is disabled"); } scheduler->priv->fence_action = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_ACTION); crm_trace("STONITH will %s nodes", scheduler->priv->fence_action); set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING, pcmk__sched_concurrent_fencing); if (pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)) { crm_debug("Concurrent fencing is enabled"); } else { crm_debug("Concurrent fencing is disabled"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY); if (value) { pcmk_parse_interval_spec(value, &(scheduler->priv->priority_fencing_ms)); crm_trace("Priority fencing delay is %s", pcmk__readable_interval(scheduler->priv->priority_fencing_ms)); } set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES, pcmk__sched_stop_all); crm_debug("Stop all active resources: %s", pcmk__flag_text(scheduler->flags, pcmk__sched_stop_all)); set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER, pcmk__sched_symmetric_cluster); if (pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY); if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_ignore; } else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_freeze; } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_demote; } else if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY, NULL)) { if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { int do_panic = 0; crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC, &do_panic); if (do_panic || pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) { scheduler->no_quorum_policy = pcmk_no_quorum_fence; } else { crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY " to 'stop': cluster has never had quorum"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY " to 'stop' because fencing is disabled"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { scheduler->no_quorum_policy = pcmk_no_quorum_stop; } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: crm_debug("On loss of quorum: " "Freeze resources that require quorum"); break; case pcmk_no_quorum_stop: crm_debug("On loss of quorum: " "Stop resources that require quorum"); break; case pcmk_no_quorum_demote: crm_debug("On loss of quorum: " "Demote promotable resources and stop other resources"); break; case pcmk_no_quorum_fence: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case pcmk_no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES, pcmk__sched_stop_removed_resources); if (pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) { crm_trace("Orphan resources are stopped"); } else { crm_trace("Orphan resources are ignored"); } set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS, pcmk__sched_cancel_removed_actions); if (pcmk_is_set(scheduler->flags, pcmk__sched_cancel_removed_actions)) { crm_trace("Orphan resource actions are stopped"); } else { crm_trace("Orphan resource actions are ignored"); } set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE, pcmk__sched_in_maintenance); crm_trace("Maintenance mode: %s", pcmk__flag_text(scheduler->flags, pcmk__sched_in_maintenance)); set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL, pcmk__sched_start_failure_fatal); if (pcmk_is_set(scheduler->flags, pcmk__sched_start_failure_fatal)) { crm_trace("Start failures are always fatal"); } else { crm_trace("Start failures are handled by failcount"); } if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING, pcmk__sched_startup_fencing); } if (pcmk_is_set(scheduler->flags, pcmk__sched_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pcmk__warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes"); } pe__unpack_node_health_scores(scheduler); scheduler->priv->placement_strategy = pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY); crm_trace("Placement strategy: %s", scheduler->priv->placement_strategy); set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK, pcmk__sched_shutdown_lock); if (pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) { value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT); pcmk_parse_interval_spec(value, &(scheduler->priv->shutdown_lock_ms)); crm_trace("Resources will be locked to nodes that were cleanly " "shut down (locks expire after %s)", pcmk__readable_interval(scheduler->priv->shutdown_lock_ms)); } else { crm_trace("Resources will not be locked to nodes that were cleanly " "shut down"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT); pcmk_parse_interval_spec(value, &(scheduler->priv->node_pending_ms)); if (scheduler->priv->node_pending_ms == 0U) { crm_trace("Do not fence pending nodes"); } else { crm_trace("Fence pending nodes after %s", pcmk__readable_interval(scheduler->priv->node_pending_ms)); } return TRUE; } /*! * \internal * \brief Create a new node object in scheduler data * * \param[in] id ID of new node * \param[in] uname Name of new node * \param[in] type Type of new node * \param[in] score Score of new node * \param[in,out] scheduler Scheduler data * * \return Newly created node object * \note The returned object is part of the scheduler data and should not be * freed separately. */ pcmk_node_t * pe_create_node(const char *id, const char *uname, const char *type, int score, pcmk_scheduler_t *scheduler) { enum pcmk__node_variant variant = pcmk__node_variant_cluster; pcmk_node_t *new_node = NULL; if (pcmk_find_node(scheduler, uname) != NULL) { pcmk__config_warn("More than one node entry has name '%s'", uname); } if (pcmk__str_eq(type, PCMK_VALUE_MEMBER, pcmk__str_null_matches|pcmk__str_casei)) { variant = pcmk__node_variant_cluster; } else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) { variant = pcmk__node_variant_remote; } else { pcmk__config_err("Ignoring node %s with unrecognized type '%s'", pcmk__s(uname, "without name"), type); return NULL; } new_node = calloc(1, sizeof(pcmk_node_t)); if (new_node == NULL) { pcmk__sched_err(scheduler, "Could not allocate memory for node %s", uname); return NULL; } new_node->assign = calloc(1, sizeof(struct pcmk__node_assignment)); new_node->details = calloc(1, sizeof(struct pcmk__node_details)); new_node->priv = calloc(1, sizeof(pcmk__node_private_t)); if ((new_node->assign == NULL) || (new_node->details == NULL) || (new_node->priv == NULL)) { free(new_node->assign); free(new_node->details); free(new_node->priv); free(new_node); pcmk__sched_err(scheduler, "Could not allocate memory for node %s", uname); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->assign->score = score; new_node->priv->id = id; new_node->priv->name = uname; new_node->priv->flags = pcmk__node_probes_allowed; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->running_rsc = NULL; new_node->priv->scheduler = scheduler; new_node->priv->variant = variant; new_node->priv->attrs = pcmk__strkey_table(free, free); new_node->priv->utilization = pcmk__strkey_table(free, free); new_node->priv->digest_cache = pcmk__strkey_table(free, pe__free_digests); if (pcmk__is_pacemaker_remote_node(new_node)) { pcmk__insert_dup(new_node->priv->attrs, CRM_ATTR_KIND, "remote"); pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_remote_nodes); } else { pcmk__insert_dup(new_node->priv->attrs, CRM_ATTR_KIND, "cluster"); } scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node, pe__cmp_node_name); return new_node; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = pcmk__xe_id(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; // @TODO This doesn't handle rules or id-ref for (attr_set = pcmk__xe_first_child(xml_obj, PCMK_XE_META_ATTRIBUTES, NULL, NULL); attr_set != NULL; attr_set = pcmk__xe_next(attr_set, PCMK_XE_META_ATTRIBUTES)) { for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL); attr != NULL; attr = pcmk__xe_next(attr, NULL)) { const char *value = crm_element_value(attr, PCMK_XA_VALUE); const char *name = crm_element_value(attr, PCMK_XA_NAME); if (name == NULL) { // Sanity continue; } if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) { remote_name = value; } else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) { remote_server = value; } else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) { remote_port = value; } else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) { connect_timeout = value; } else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) { remote_allow_migrate = value; } else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (pe_find_resource(data->priv->resources, remote_name) != NULL) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node) { if ((new_node->priv->variant == pcmk__node_variant_remote) && (new_node->priv->remote == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (pcmk_is_set(scheduler->flags, pcmk__sched_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } } gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; pcmk_node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; for (xml_obj = pcmk__xe_first_child(xml_nodes, PCMK_XE_NODE, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, PCMK_XE_NODE)) { int score = 0; int rc = pcmk__xe_get_score(xml_obj, PCMK_XA_SCORE, &score, 0); new_node = NULL; id = crm_element_value(xml_obj, PCMK_XA_ID); uname = crm_element_value(xml_obj, PCMK_XA_UNAME); type = crm_element_value(xml_obj, PCMK_XA_TYPE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { pcmk__config_err("Ignoring <" PCMK_XE_NODE "> entry in configuration without id"); continue; } if (rc != pcmk_rc_ok) { // Not possible with schema validation enabled pcmk__config_warn("Using 0 as score for node %s " "because '%s' is not a valid score: %s", pcmk__s(uname, "without name"), crm_element_value(xml_obj, PCMK_XA_SCORE), pcmk_rc_str(rc)); } new_node = pe_create_node(id, uname, type, score, scheduler); if (new_node == NULL) { return FALSE; } handle_startup_fencing(scheduler, new_node); add_node_attrs(xml_obj, new_node, FALSE, scheduler); crm_trace("Done with node %s", crm_element_value(xml_obj, PCMK_XA_UNAME)); } return TRUE; } static void unpack_launcher(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { const char *launcher_id = NULL; if (rsc->priv->children != NULL) { g_list_foreach(rsc->priv->children, (GFunc) unpack_launcher, scheduler); return; } launcher_id = g_hash_table_lookup(rsc->priv->meta, PCMK__META_CONTAINER); if ((launcher_id != NULL) && !pcmk__str_eq(launcher_id, rsc->id, pcmk__str_none)) { pcmk_resource_t *launcher = pe_find_resource(scheduler->priv->resources, launcher_id); if (launcher != NULL) { rsc->priv->launcher = launcher; launcher->priv->launched = g_list_append(launcher->priv->launched, rsc); pcmk__rsc_trace(rsc, "Resource %s's launcher is %s", rsc->id, launcher_id); } else { pcmk__config_err("Resource %s: Unknown " PCMK__META_CONTAINER " %s", rsc->id, launcher_id); } } } gboolean unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = pcmk__xe_id(xml_obj); /* The pcmk_find_node() check ensures we don't iterate over an * expanded node that has already been added to the node list */ if (new_node_id && (pcmk_find_node(scheduler, new_node_id) == NULL)) { crm_trace("Found remote node %s defined by resource %s", new_node_id, pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, 0, scheduler); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, scheduler); if (new_node_id && (pcmk_find_node(scheduler, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s", new_node_id, pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, 0, scheduler); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL); xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2, NULL)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, scheduler); if (new_node_id && (pcmk_find_node(scheduler, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, pcmk__xe_id(xml_obj2), pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, 0, scheduler); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc) { pcmk_node_t *remote_node = NULL; if (!pcmk_is_set(new_rsc->flags, pcmk__rsc_is_remote_connection)) { return; } if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pcmk_find_node(scheduler, new_rsc->id); CRM_CHECK(remote_node != NULL, return); pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s", new_rsc->id, pcmk__node_name(remote_node)); remote_node->priv->remote = new_rsc; if (new_rsc->priv->launcher == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(scheduler, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ pcmk__insert_dup(remote_node->priv->attrs, CRM_ATTR_KIND, "container"); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] scheduler Scheduler data * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when pe__unpack_resource() calls resource_location() */ gboolean unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; GList *gIter = NULL; scheduler->priv->templates = pcmk__strkey_table(free, pcmk__free_idref); for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) { pcmk_resource_t *new_rsc = NULL; const char *id = pcmk__xe_id(xml_obj); if (pcmk__str_empty(id)) { pcmk__config_err("Ignoring <%s> resource without ID", xml_obj->name); continue; } if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) { if (g_hash_table_lookup_extended(scheduler->priv->templates, id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ pcmk__insert_dup(scheduler->priv->templates, id, NULL); } continue; } crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id); if (pe__unpack_resource(xml_obj, &new_rsc, NULL, scheduler) == pcmk_rc_ok) { scheduler->priv->resources = g_list_append(scheduler->priv->resources, new_rsc); pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { pcmk__config_err("Ignoring <%s> resource '%s' " "because configuration is invalid", xml_obj->name, id); } } for (gIter = scheduler->priv->resources; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; unpack_launcher(rsc, scheduler); link_rsc2remotenode(scheduler, rsc); } scheduler->priv->resources = g_list_sort(scheduler->priv->resources, pe__cmp_rsc_priority); if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) { /* Ignore */ } else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled) && !pcmk_is_set(scheduler->flags, pcmk__sched_have_fencing)) { pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined"); pcmk__config_err("Either configure some or disable STONITH with the " PCMK_OPT_STONITH_ENABLED " option"); pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } /*! * \internal * \brief Validate the levels in a fencing topology * * \param[in] xml \c PCMK_XE_FENCING_TOPOLOGY element */ void pcmk__validate_fencing_topology(const xmlNode *xml) { if (xml == NULL) { return; } CRM_CHECK(pcmk__xe_is(xml, PCMK_XE_FENCING_TOPOLOGY), return); for (const xmlNode *level = pcmk__xe_first_child(xml, PCMK_XE_FENCING_LEVEL, NULL, NULL); level != NULL; level = pcmk__xe_next(level, PCMK_XE_FENCING_LEVEL)) { const char *id = pcmk__xe_id(level); int index = 0; if (pcmk__str_empty(id)) { pcmk__config_err("Ignoring fencing level without ID"); continue; } if (crm_element_value_int(level, PCMK_XA_INDEX, &index) != 0) { pcmk__config_err("Ignoring fencing level %s with invalid index", id); continue; } if ((index < ST__LEVEL_MIN) || (index > ST__LEVEL_MAX)) { pcmk__config_err("Ignoring fencing level %s with out-of-range " "index %d", id, index); } } } gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler) { xmlNode *xml_tag = NULL; scheduler->priv->tags = pcmk__strkey_table(free, pcmk__free_idref); for (xml_tag = pcmk__xe_first_child(xml_tags, PCMK_XE_TAG, NULL, NULL); xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag, PCMK_XE_TAG)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = pcmk__xe_id(xml_tag); if (tag_id == NULL) { pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID, (const char *) xml_tag->name); continue; } for (xml_obj_ref = pcmk__xe_first_child(xml_tag, PCMK_XE_OBJ_REF, NULL, NULL); xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref, PCMK_XE_OBJ_REF)) { const char *obj_ref = pcmk__xe_id(xml_obj_ref); if (obj_ref == NULL) { pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID, xml_obj_ref->name, tag_id); continue; } pcmk__add_idref(scheduler->priv->tags, tag_id, obj_ref); } } return TRUE; } /*! * \internal * \brief Unpack a ticket state entry * * \param[in] xml_ticket XML ticket state to unpack * \param[in,out] userdata Scheduler data * * \return pcmk_rc_ok (to always continue unpacking further entries) */ static int unpack_ticket_state(xmlNode *xml_ticket, void *userdata) { pcmk_scheduler_t *scheduler = userdata; const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; pcmk__ticket_t *ticket = NULL; ticket_id = pcmk__xe_id(xml_ticket); if (pcmk__str_empty(ticket_id)) { return pcmk_rc_ok; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(scheduler->priv->ticket_constraints, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, scheduler); if (ticket == NULL) { return pcmk_rc_ok; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = pcmk__xml_attr_value(xIter); if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) { continue; } pcmk__insert_dup(ticket->state, prop_name, prop_value); } granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED); if (granted && crm_is_true(granted)) { pcmk__set_ticket_flags(ticket, pcmk__ticket_granted); crm_info("We have ticket '%s'", ticket->id); } else { pcmk__clear_ticket_flags(ticket, pcmk__ticket_granted); crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED); if (last_granted) { long long last_granted_ll = 0LL; int rc = pcmk__scan_ll(last_granted, &last_granted_ll, 0LL); if (rc != pcmk_rc_ok) { crm_warn("Using %lld instead of invalid " PCMK_XA_LAST_GRANTED " value '%s' in state for ticket %s: %s", last_granted_ll, last_granted, ticket->id, pcmk_rc_str(rc)); } ticket->last_granted = (time_t) last_granted_ll; } standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY); if (standby && crm_is_true(standby)) { pcmk__set_ticket_flags(ticket, pcmk__ticket_standby); if (pcmk_is_set(ticket->flags, pcmk__ticket_granted)) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { pcmk__clear_ticket_flags(ticket, pcmk__ticket_standby); } crm_trace("Done with ticket state for %s", ticket_id); return pcmk_rc_ok; } static void unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = NULL; pcmk_resource_t *rsc = NULL; int maint = 0; if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) { return; } if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) { return; } crm_trace("Processing Pacemaker Remote node %s", pcmk__node_name(this_node)); pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE), &maint, 0); if (maint) { pcmk__set_node_flags(this_node, pcmk__node_remote_maint); } else { pcmk__clear_node_flags(this_node, pcmk__node_remote_maint); } rsc = this_node->priv->remote; if (!pcmk_is_set(this_node->priv->flags, pcmk__node_remote_reset)) { this_node->details->unclean = FALSE; pcmk__set_node_flags(this_node, pcmk__node_seen); } attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL, NULL); add_node_attrs(attrs, this_node, TRUE, scheduler); if (pe__shutdown_requested(this_node)) { crm_info("%s is shutting down", pcmk__node_name(this_node)); this_node->details->shutdown = TRUE; } if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL, pcmk__rsc_node_current))) { crm_info("%s is in standby mode", pcmk__node_name(this_node)); pcmk__set_node_flags(this_node, pcmk__node_standby); } if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL, pcmk__rsc_node_current)) || ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed))) { crm_info("%s is in maintenance mode", pcmk__node_name(this_node)); this_node->details->maintenance = TRUE; } discovery = pcmk__node_attr(this_node, PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED, NULL, pcmk__rsc_node_current); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__warn_once(pcmk__wo_rdisc_enabled, "Support for the " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " node attribute is deprecated and will be removed" " (and behave as 'true') in a future release."); if (pcmk__is_remote_node(this_node) && !pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { pcmk__config_warn("Ignoring " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " attribute on Pacemaker Remote node %s" " because fencing is disabled", pcmk__node_name(this_node)); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("%s has resource discovery disabled", pcmk__node_name(this_node)); pcmk__clear_node_flags(this_node, pcmk__node_probes_allowed); } } } /*! * \internal * \brief Unpack a cluster node's transient attributes * * \param[in] state CIB node state XML * \param[in,out] node Cluster node whose attributes are being unpacked * \param[in,out] scheduler Scheduler data */ static void unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL, NULL); add_node_attrs(attrs, node, TRUE, scheduler); if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL, pcmk__rsc_node_current))) { crm_info("%s is in standby mode", pcmk__node_name(node)); pcmk__set_node_flags(node, pcmk__node_standby); } if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL, pcmk__rsc_node_current))) { crm_info("%s is in maintenance mode", pcmk__node_name(node)); node->details->maintenance = TRUE; } discovery = pcmk__node_attr(node, PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED, NULL, pcmk__rsc_node_current); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__config_warn("Ignoring " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " attribute for %s because disabling resource" " discovery is not allowed for cluster nodes", pcmk__node_name(node)); } } /*! * \internal * \brief Unpack a node state entry (first pass) * * Unpack one node state entry from status. This unpacks information from the * \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not * the resource history inside it. Multiple passes through the status are needed * to fully unpack everything. * * \param[in] state CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *id = NULL; const char *uname = NULL; pcmk_node_t *this_node = NULL; id = crm_element_value(state, PCMK_XA_ID); if (id == NULL) { pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without " PCMK_XA_ID); crm_log_xml_info(state, "missing-id"); return; } uname = crm_element_value(state, PCMK_XA_UNAME); if (uname == NULL) { /* If a joining peer makes the cluster acquire the quorum from Corosync * but has not joined the controller CPG membership yet, it's possible * that the created PCMK__XE_NODE_STATE entry doesn't have a * PCMK_XA_UNAME yet. Recognize the node as pending and wait for it to * join CPG. */ crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" " "without " PCMK_XA_UNAME, id); } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { crm_notice("Ignoring recorded state for removed node with name %s and " PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id); return; } if (pcmk__is_pacemaker_remote_node(this_node)) { int remote_fenced = 0; /* We can't determine the online status of Pacemaker Remote nodes until * after all resource history has been unpacked. In this first pass, we * do need to mark whether the node has been fenced, as this plays a * role during unpacking cluster node resource state. */ pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED), &remote_fenced, 0); if (remote_fenced) { pcmk__set_node_flags(this_node, pcmk__node_remote_fenced); } else { pcmk__clear_node_flags(this_node, pcmk__node_remote_fenced); } return; } unpack_transient_attributes(state, this_node, scheduler); /* Provisionally mark this cluster node as clean. We have at least seen it * in the current cluster's lifetime. */ this_node->details->unclean = FALSE; pcmk__set_node_flags(this_node, pcmk__node_seen); crm_trace("Determining online status of cluster node %s (id %s)", pcmk__node_name(this_node), id); determine_online_status(state, this_node, scheduler); if (!pcmk_is_set(scheduler->flags, pcmk__sched_quorate) && this_node->details->online && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) { /* Everything else should flow from this automatically * (at least until the scheduler becomes able to migrate off * healthy resources) */ pe_fence_node(scheduler, this_node, "cluster does not have quorum", FALSE); } } /*! * \internal * \brief Unpack nodes' resource history as much as possible * * Unpack as many nodes' resource history as possible in one pass through the * status. We need to process Pacemaker Remote nodes' connections/containers * before unpacking their history; the connection/container history will be * in another node's history, so it might take multiple passes to unpack * everything. * * \param[in] status CIB XML status section * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen * \param[in,out] scheduler Scheduler data * * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done, * or EAGAIN if more unpacking remains to be done) */ static int unpack_node_history(const xmlNode *status, bool fence, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; // Loop through all PCMK__XE_NODE_STATE entries in CIB status for (const xmlNode *state = pcmk__xe_first_child(status, PCMK__XE_NODE_STATE, NULL, NULL); state != NULL; state = pcmk__xe_next(state, PCMK__XE_NODE_STATE)) { const char *id = pcmk__xe_id(state); const char *uname = crm_element_value(state, PCMK_XA_UNAME); pcmk_node_t *this_node = NULL; if ((id == NULL) || (uname == NULL)) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history from malformed " PCMK__XE_NODE_STATE " without id and/or uname"); continue; } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history for node %s because " "no longer in configuration", id); continue; } if (pcmk_is_set(this_node->priv->flags, pcmk__node_unpacked)) { crm_trace("Not unpacking resource history for node %s because " "already unpacked", id); continue; } if (fence) { // We're processing all remaining nodes } else if (pcmk__is_guest_or_bundle_node(this_node)) { /* We can unpack a guest node's history only after we've unpacked * other resource history to the point that we know that the node's * connection and containing resource are both up. */ const pcmk_resource_t *remote = this_node->priv->remote; const pcmk_resource_t *launcher = remote->priv->launcher; if ((remote->priv->orig_role != pcmk_role_started) || (launcher->priv->orig_role != pcmk_role_started)) { crm_trace("Not unpacking resource history for guest node %s " "because launcher and connection are not known to " "be up", id); continue; } } else if (pcmk__is_remote_node(this_node)) { /* We can unpack a remote node's history only after we've unpacked * other resource history to the point that we know that the node's * connection is up, with the exception of when shutdown locks are * in use. */ pcmk_resource_t *rsc = this_node->priv->remote; if ((rsc == NULL) || (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock) && (rsc->priv->orig_role != pcmk_role_started))) { crm_trace("Not unpacking resource history for remote node %s " "because connection is not known to be up", id); continue; } /* If fencing and shutdown locks are disabled and we're not processing * unseen nodes, then we don't want to unpack offline nodes until online * nodes have been unpacked. This allows us to number active clone * instances first. */ } else if (!pcmk_any_flags_set(scheduler->flags, pcmk__sched_fencing_enabled |pcmk__sched_shutdown_lock) && !this_node->details->online) { crm_trace("Not unpacking resource history for offline " "cluster node %s", id); continue; } if (pcmk__is_pacemaker_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); unpack_handle_remote_attrs(this_node, state, scheduler); } crm_trace("Unpacking resource history for %snode %s", (fence? "unseen " : ""), id); pcmk__set_node_flags(this_node, pcmk__node_unpacked); unpack_node_lrm(this_node, state, scheduler); rc = EAGAIN; // Other node histories might depend on this one } return rc; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler) { xmlNode *state = NULL; crm_trace("Beginning unpack"); if (scheduler->priv->ticket_constraints == NULL) { scheduler->priv->ticket_constraints = pcmk__strkey_table(free, destroy_ticket); } for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL; state = pcmk__xe_next(state, NULL)) { if (pcmk__xe_is(state, PCMK_XE_TICKETS)) { pcmk__xe_foreach_child(state, PCMK__XE_TICKET_STATE, unpack_ticket_state, scheduler); } else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) { unpack_node_state(state, scheduler); } } while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) { crm_trace("Another pass through node resource histories is needed"); } // Now catch any nodes we didn't see unpack_node_history(status, pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled), scheduler); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (scheduler->priv->stop_needed != NULL) { for (GList *item = scheduler->priv->stop_needed; item != NULL; item = item->next) { pcmk_resource_t *container = item->data; pcmk_node_t *node = pcmk__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(scheduler->priv->stop_needed); scheduler->priv->stop_needed = NULL; } /* Now that we know status of all Pacemaker Remote connections and nodes, * we can stop connections for node shutdowns, and check the online status * of remote/guest nodes that didn't have any node history to unpack. */ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *this_node = gIter->data; if (!pcmk__is_pacemaker_remote_node(this_node)) { continue; } if (this_node->details->shutdown && (this_node->priv->remote != NULL)) { pe__set_next_role(this_node->priv->remote, pcmk_role_stopped, "remote shutdown"); } if (!pcmk_is_set(this_node->priv->flags, pcmk__node_unpacked)) { determine_remote_online_status(scheduler, this_node); } } return TRUE; } /*! * \internal * \brief Unpack node's time when it became a member at the cluster layer * * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry * \param[in,out] scheduler Scheduler data * * \return Epoch time when node became a cluster member * (or scheduler effective time for legacy entries) if a member, * 0 if not a member, or -1 if no valid information available */ static long long unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler) { const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM); int member = 0; if (member_time == NULL) { return -1LL; } else if (crm_str_to_boolean(member_time, &member) == 1) { /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was * recorded as a boolean for a DC < 2.1.7, or the node is pending * shutdown and has left the CPG, in which case it was set to 1 to avoid * fencing for PCMK_OPT_NODE_PENDING_TIMEOUT. * * We return the effective time for in_ccm=1 because what's important to * avoid fencing is that effective time minus this value is less than * the pending node timeout. */ return member? (long long) pcmk__scheduler_epoch_time(scheduler) : 0LL; } else { long long when_member = 0LL; if ((pcmk__scan_ll(member_time, &when_member, 0LL) != pcmk_rc_ok) || (when_member < 0LL)) { crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM " in " PCMK__XE_NODE_STATE " entry", member_time); return -1LL; } return when_member; } } /*! * \internal * \brief Unpack node's time when it became online in process group * * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry * * \return Epoch time when node became online in process group (or 0 if not * online, or 1 for legacy online entries) */ static long long unpack_node_online(const xmlNode *node_state) { const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD); // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline" if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE, pcmk__str_casei|pcmk__str_null_matches)) { return 0LL; } else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) { return 1LL; } else { long long when_online = 0LL; if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok) || (when_online < 0)) { crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in " PCMK__XE_NODE_STATE " entry, assuming offline", peer_time); return 0LL; } return when_online; } } /*! * \internal * \brief Unpack node attribute for user-requested fencing * * \param[in] node Node to check * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status * * \return \c true if fencing has been requested for \p node, otherwise \c false */ static bool unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state) { long long value = 0LL; int value_i = 0; int rc = pcmk_rc_ok; const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE, NULL, pcmk__rsc_node_current); // Value may be boolean or an epoch time if (crm_str_to_boolean(value_s, &value_i) == 1) { return (value_i != 0); } rc = pcmk__scan_ll(value_s, &value, 0LL); if (rc == pcmk_rc_ok) { return (value > 0); } crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE "node attribute for %s: %s", value_s, pcmk__node_name(node), pcmk_rc_str(rc)); return false; } static gboolean determine_online_status_no_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); if (when_member <= 0) { crm_trace("Node %s is %sdown", pcmk__node_name(this_node), ((when_member < 0)? "presumed " : "")); } else if (when_online > 0) { if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { online = TRUE; } else { crm_debug("Node %s is not ready to run resources: %s", pcmk__node_name(this_node), join); } } else if (!pcmk_is_set(this_node->priv->flags, pcmk__node_expected_up)) { crm_trace("Node %s controller is down: " "member@%lld online@%lld join=%s expected=%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } else { /* mark it unclean */ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE); crm_info("Node %s member@%lld online@%lld join=%s expected=%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } return online; } /*! * \internal * \brief Check whether a node has taken too long to join controller group * * \param[in,out] scheduler Scheduler data * \param[in] node Node to check * \param[in] when_member Epoch time when node became a cluster member * \param[in] when_online Epoch time when node joined controller group * * \return true if node has been pending (on the way up) longer than * \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false * \note This will also update the cluster's recheck time if appropriate. */ static inline bool pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, long long when_member, long long when_online) { if ((scheduler->priv->node_pending_ms > 0U) && (when_member > 0) && (when_online <= 0)) { // There is a timeout on pending nodes, and node is pending time_t timeout = when_member + pcmk__timeout_ms2s(scheduler->priv->node_pending_ms); if (pcmk__scheduler_epoch_time(node->priv->scheduler) >= timeout) { return true; // Node has timed out } // Node is pending, but still has time pcmk__update_recheck_time(timeout, scheduler, "pending node timeout"); } return false; } static bool determine_online_status_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { bool termination_requested = unpack_node_terminate(this_node, node_state); const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); /* - PCMK__XA_JOIN ::= member|down|pending|banned - PCMK_XA_EXPECTED ::= member|down @COMPAT with entries recorded for DCs < 2.1.7 - PCMK__XA_IN_CCM ::= true|false - PCMK_XA_CRMD ::= online|offline Since crm_feature_set 3.18.0 (pacemaker-2.1.7): - PCMK__XA_IN_CCM ::= |0 Since when node has been a cluster member. A value 0 of means the node is not a cluster member. - PCMK_XA_CRMD ::= |0 Since when peer has been online in CPG. A value 0 means the peer is offline in CPG. */ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, ""), (termination_requested? " (termination requested)" : "")); if (this_node->details->shutdown) { crm_debug("%s is shutting down", pcmk__node_name(this_node)); /* Slightly different criteria since we can't shut down a dead peer */ return (when_online > 0); } if (when_member < 0) { pe_fence_node(scheduler, this_node, "peer has not been seen by the cluster", FALSE); return false; } if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) { pe_fence_node(scheduler, this_node, "peer failed Pacemaker membership criteria", FALSE); } else if (termination_requested) { if ((when_member <= 0) && (when_online <= 0) && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) { crm_info("%s was fenced as requested", pcmk__node_name(this_node)); return false; } pe_fence_node(scheduler, this_node, "fencing was requested", false); } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_null_matches)) { if (pending_too_long(scheduler, this_node, when_member, when_online)) { pe_fence_node(scheduler, this_node, "peer pending timed out on joining the process group", FALSE); } else if ((when_member > 0) || (when_online > 0)) { crm_info("- %s is not ready to run resources", pcmk__node_name(this_node)); pcmk__set_node_flags(this_node, pcmk__node_standby); this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", pcmk__node_name(this_node)); } } else if (when_member <= 0) { // Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes pe_fence_node(scheduler, this_node, "peer is no longer part of the cluster", TRUE); } else if (when_online <= 0) { pe_fence_node(scheduler, this_node, "peer process is no longer available", FALSE); /* Everything is running at this point, now check join state */ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) { crm_info("%s is active", pcmk__node_name(this_node)); } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) { crm_info("%s is not ready to run resources", pcmk__node_name(this_node)); pcmk__set_node_flags(this_node, pcmk__node_standby); this_node->details->pending = TRUE; } else { pe_fence_node(scheduler, this_node, "peer was in an unknown state", FALSE); } return (when_member > 0); } static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node) { pcmk_resource_t *rsc = this_node->priv->remote; pcmk_resource_t *launcher = NULL; pcmk_node_t *host = NULL; const char *node_type = "Remote"; if (rsc == NULL) { /* This is a leftover node state entry for a former Pacemaker Remote * node whose connection resource was removed. Consider it offline. */ crm_trace("Pacemaker Remote node %s is considered OFFLINE because " "its connection resource has been removed from the CIB", this_node->priv->id); this_node->details->online = FALSE; return; } launcher = rsc->priv->launcher; if (launcher != NULL) { node_type = "Guest"; if (pcmk__list_of_1(rsc->priv->active_nodes)) { host = rsc->priv->active_nodes->data; } } /* If the resource is currently started, mark it online. */ if (rsc->priv->orig_role == pcmk_role_started) { this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if ((rsc->priv->orig_role == pcmk_role_started) && (rsc->priv->next_role == pcmk_role_stopped)) { crm_trace("%s node %s shutting down because connection resource is stopping", node_type, this_node->priv->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if ((launcher != NULL) && pcmk_is_set(launcher->flags, pcmk__rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->priv->id); this_node->details->online = FALSE; pcmk__set_node_flags(this_node, pcmk__node_remote_reset); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", node_type, this_node->priv->id); this_node->details->online = FALSE; } else if ((rsc->priv->orig_role == pcmk_role_stopped) || ((launcher != NULL) && (launcher->priv->orig_role == pcmk_role_stopped))) { crm_trace("%s node %s OFFLINE because its resource is stopped", node_type, this_node->priv->id); this_node->details->online = FALSE; pcmk__clear_node_flags(this_node, pcmk__node_remote_reset); } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->priv->id); this_node->details->online = FALSE; pcmk__set_node_flags(this_node, pcmk__node_remote_reset); } else { crm_trace("%s node %s is %s", node_type, this_node->priv->id, this_node->details->online? "ONLINE" : "OFFLINE"); } } static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); CRM_CHECK(this_node != NULL, return); this_node->details->shutdown = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { pcmk__set_node_flags(this_node, pcmk__node_expected_up); } if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { online = determine_online_status_no_fencing(scheduler, node_state, this_node); } else { online = determine_online_status_fencing(scheduler, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->assign->score = -PCMK_SCORE_INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->assign->score = -PCMK_SCORE_INFINITY; } if (this_node->details->unclean) { pcmk__sched_warn(scheduler, "%s is unclean", pcmk__node_name(this_node)); } else if (!this_node->details->online) { crm_trace("%s is offline", pcmk__node_name(this_node)); } else if (this_node->details->shutdown) { crm_info("%s is shutting down", pcmk__node_name(this_node)); } else if (this_node->details->pending) { crm_info("%s is pending", pcmk__node_name(this_node)); } else if (pcmk_is_set(this_node->priv->flags, pcmk__node_standby)) { crm_info("%s is in standby", pcmk__node_name(this_node)); } else if (this_node->details->maintenance) { crm_info("%s is in maintenance", pcmk__node_name(this_node)); } else { crm_info("%s is online", pcmk__node_name(this_node)); } } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!pcmk__str_empty(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; pcmk__assert(end != NULL); basename = strndup(last_rsc_id, end - last_rsc_id + 1); pcmk__assert(basename != NULL); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; pcmk__assert(end != NULL); zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char)); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static pcmk_resource_t * create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE); pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none); crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { pcmk_node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pcmk_find_node(scheduler, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, 0, scheduler); } link_rsc2remotenode(scheduler, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) { // This removed resource needs to be mapped to a launcher crm_trace("Launched resource %s was removed from the configuration", rsc_id); pcmk__set_rsc_flags(rsc, pcmk__rsc_removed_launched); } pcmk__set_rsc_flags(rsc, pcmk__rsc_removed); scheduler->priv->resources = g_list_append(scheduler->priv->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history * * \param[in,out] parent Clone resource that orphan will be added to * \param[in] rsc_id Orphan's resource ID * \param[in] node Where orphan is active (for logging only) * \param[in,out] scheduler Scheduler data * * \return Newly added orphaned instance of \p parent */ static pcmk_resource_t * create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *top = pe__create_clone_child(parent, scheduler); pcmk_resource_t *orphan = NULL; // find_rsc() because we might be a cloned group orphan = top->priv->fns->find_rsc(top, rsc_id, NULL, pcmk_rsc_match_clone_only); pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, pcmk__node_name(node)); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX * instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX * instances are already active). * * \param[in,out] scheduler Scheduler data * \param[in] node Node on which to check for instance * \param[in,out] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (no instance) */ static pcmk_resource_t * find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, pcmk_resource_t *parent, const char *rsc_id) { GList *rIter = NULL; pcmk_resource_t *rsc = NULL; pcmk_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; pcmk__assert(pcmk__is_anonymous_clone(parent)); // Check for active (or partially active, for cloned groups) instance pcmk__rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, pcmk__node_name(node), parent->id); for (rIter = parent->priv->children; (rIter != NULL) && (rsc == NULL); rIter = rIter->next) { GList *locations = NULL; pcmk_resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if * PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and * (3) when we re-run calculations on the same scheduler data as part of * a simulation. */ child->priv->fns->location(child, &locations, pcmk__rsc_node_current |pcmk__rsc_node_pending); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (pcmk__same_node((pcmk_node_t *) locations->data, node)) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->priv->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if PCMK_META_GLOBALLY_UNIQUE is switched from true * to false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->priv->active_nodes != NULL) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, pcmk__node_name(node)); skip_inactive = TRUE; rsc = NULL; } else { pcmk__rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && !pcmk_is_set(child->flags, pcmk__rsc_blocked)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->priv->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); /* ... but don't use it if it was already associated with a * pending action on another node */ if (inactive_instance != NULL) { const pcmk_node_t *pending_node = NULL; pending_node = inactive_instance->priv->pending_node; if ((pending_node != NULL) && !pcmk__same_node(pending_node, node)) { inactive_instance = NULL; } } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pcmk__rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or * PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we * don't want to consume a valid instance number for unclean nodes. Such * instances may appear to be active according to the history, but should be * considered inactive, so we can start an instance elsewhere. Treat such * instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pcmk__is_guest_or_bundle_node(node) && !pe__is_universal_clone(parent, scheduler)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler); pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static pcmk_resource_t * unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, const char *rsc_id) { pcmk_resource_t *rsc = NULL; pcmk_resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(scheduler->priv->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0, * we create a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); pcmk_resource_t *clone0 = pe_find_resource(scheduler->priv->resources, clone0_id); if (clone0 && !pcmk_is_set(clone0->flags, pcmk__rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->priv->variant > pcmk__rsc_variant_primitive) { crm_trace("Resource history for %s is orphaned " "because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pcmk__is_anonymous_clone(parent)) { if (pcmk__is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->priv->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(scheduler, node, parent, base); free(base); pcmk__assert(rsc != NULL); } } if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none) && !pcmk__str_eq(rsc_id, rsc->priv->history_id, pcmk__str_none)) { pcmk__str_update(&(rsc->priv->history_id), rsc_id); pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, pcmk__node_name(node), rsc->id, pcmk_is_set(rsc->flags, pcmk__rsc_removed)? " (ORPHAN)" : ""); } return rsc; } static pcmk_resource_t * process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, pcmk__node_name(node)); rsc = create_fake_resource(rsc_id, rsc_entry, scheduler); if (rsc == NULL) { return NULL; } if (!pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) { pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -PCMK_SCORE_INFINITY, "__orphan_do_not_run__", scheduler); } return rsc; } static void process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node, enum pcmk__on_fail on_fail) { pcmk_node_t *tmpnode = NULL; char *reason = NULL; enum pcmk__on_fail save_on_fail = pcmk__on_fail_ignore; pcmk_scheduler_t *scheduler = NULL; bool known_active = false; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; known_active = (rsc->priv->orig_role > pcmk_role_stopped); pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, pcmk_role_text(rsc->priv->orig_role), pcmk__node_name(node), pcmk__on_fail_text(on_fail)); /* process current state */ if (rsc->priv->orig_role != pcmk_role_unknown) { pcmk_resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->priv->probed_nodes, node->priv->id) == NULL) { pcmk_node_t *n = pe__copy_node(node); pcmk__rsc_trace(rsc, "%s (%s in history) known on %s", rsc->id, pcmk__s(rsc->priv->history_id, "the same"), pcmk__node_name(n)); g_hash_table_insert(iter->priv->probed_nodes, (gpointer) n->priv->id, n); } if (pcmk_is_set(iter->flags, pcmk__rsc_unique)) { break; } iter = iter->priv->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if (known_active && !node->details->online && !node->details->maintenance && pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pcmk__is_guest_or_bundle_node(node)) { pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); should_fence = TRUE; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { if (pcmk__is_remote_node(node) && (node->priv->remote != NULL) && !pcmk_is_set(node->priv->remote->flags, pcmk__rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ pcmk__clear_node_flags(node, pcmk__node_seen); reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(scheduler, node, reason, FALSE); } free(reason); } /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */ save_on_fail = on_fail; if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = pcmk__on_fail_ignore; } switch (on_fail) { case pcmk__on_fail_ignore: /* nothing to do */ break; case pcmk__on_fail_demote: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed); demote_action(rsc, node, FALSE); break; case pcmk__on_fail_fence_node: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(scheduler, node, reason, FALSE); free(reason); break; case pcmk__on_fail_standby_node: pcmk__set_node_flags(node, pcmk__node_standby|pcmk__node_fail_standby); break; case pcmk__on_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(rsc, pcmk__rsc_blocked); break; case pcmk__on_fail_ban: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -PCMK_SCORE_INFINITY, "__action_migration_auto__", scheduler); break; case pcmk__on_fail_stop: pe__set_next_role(rsc, pcmk_role_stopped, PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP); break; case pcmk__on_fail_restart: if (known_active) { pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); stop_action(rsc, node, FALSE); } break; case pcmk__on_fail_restart_container: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); if ((rsc->priv->launcher != NULL) && pcmk__is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ scheduler->priv->stop_needed = g_list_prepend(scheduler->priv->stop_needed, rsc->priv->launcher); } else if (rsc->priv->launcher != NULL) { stop_action(rsc->priv->launcher, node, FALSE); } else if (known_active) { stop_action(rsc, node, FALSE); } break; case pcmk__on_fail_reset_remote: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { tmpnode = NULL; if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) { tmpnode = pcmk_find_node(scheduler, rsc->id); } if (pcmk__is_remote_node(tmpnode) && !pcmk_is_set(tmpnode->priv->flags, pcmk__node_remote_fenced)) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(scheduler, tmpnode, "remote connection is unrecoverable", FALSE); } } /* require the stop action regardless if fencing is occurring or not. */ if (known_active) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->priv->remote_reconnect_ms > 0U) { pe__set_next_role(rsc, pcmk_role_stopped, "remote reset"); } break; } /* Ensure a remote connection failure forces an unclean Pacemaker Remote * node to be fenced. By marking the node as seen, the failure will result * in a fencing operation regardless if we're going to attempt to reconnect * in this transition. */ if (pcmk_all_flags_set(rsc->flags, pcmk__rsc_failed|pcmk__rsc_is_remote_connection)) { tmpnode = pcmk_find_node(scheduler, rsc->id); if (tmpnode && tmpnode->details->unclean) { pcmk__set_node_flags(tmpnode, pcmk__node_seen); } } if (known_active) { if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { crm_notice("Removed resource %s is active on %s and will be " "stopped when possible", rsc->id, pcmk__node_name(node)); } else { crm_notice("Removed resource %s must be stopped manually on %s " "because " PCMK_OPT_STOP_ORPHAN_RESOURCES " is set to false", rsc->id, pcmk__node_name(node)); } } native_add_running(rsc, node, scheduler, (save_on_fail != pcmk__on_fail_ignore)); switch (on_fail) { case pcmk__on_fail_ignore: break; case pcmk__on_fail_demote: case pcmk__on_fail_block: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed); break; default: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); break; } } else if ((rsc->priv->history_id != NULL) && (strchr(rsc->priv->history_id, ':') != NULL)) { /* @COMPAT This is for older (<1.1.8) status sections that included * instance numbers, otherwise stopped instances are considered orphans. * * @TODO We should be able to drop this, but some old regression tests * will need to be updated. Double-check that this is not still needed * for unique clones (which may have been later converted to anonymous). */ pcmk__rsc_trace(rsc, "Clearing history ID %s for %s (stopped)", rsc->priv->history_id, rsc->id); free(rsc->priv->history_id); rsc->priv->history_id = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, FALSE); GList *gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { pcmk_action_t *stop = (pcmk_action_t *) gIter->data; pcmk__set_action_flags(stop, pcmk__action_optional); } g_list_free(possible_matches); } /* A successful stop after migrate_to on the migration source doesn't make * the partially migrated resource stopped on the migration target. */ if ((rsc->priv->orig_role == pcmk_role_stopped) && (rsc->priv->active_nodes != NULL) && (rsc->priv->partial_migration_target != NULL) && pcmk__same_node(rsc->priv->partial_migration_source, node)) { rsc->priv->orig_role = pcmk_role_started; } } /* create active recurring operations as optional */ static void process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc, int start_index, int stop_index, GList *sorted_op_list, pcmk_scheduler_t *scheduler) { int counter = -1; const char *task = NULL; const char *status = NULL; GList *gIter = sorted_op_list; pcmk__assert(rsc != NULL); pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = pcmk__xe_id(rsc_op); counter++; if (node->details->online == FALSE) { pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline", rsc->id, pcmk__node_name(node)); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active", id, pcmk__node_name(node)); continue; } else if (counter < start_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d", id, pcmk__node_name(node), counter); continue; } crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms); if (interval_ms == 0) { pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring", id, pcmk__node_name(node)); continue; } status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS); if (pcmk__str_eq(status, "-1", pcmk__str_casei)) { pcmk__rsc_trace(rsc, "Skipping %s on %s: status", id, pcmk__node_name(node)); continue; } task = crm_element_value(rsc_op, PCMK_XA_OPERATION); /* create the action */ key = pcmk__op_key(rsc->id, task, interval_ms); pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node)); custom_action(rsc, key, task, node, TRUE, scheduler); } } void calculate_active_ops(const GList *sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; *stop_index = -1; *start_index = -1; for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) { const xmlNode *rsc_op = (const xmlNode *) iter->data; counter++; task = crm_element_value(rsc_op, PCMK_XA_OPERATION); status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS); if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei) && pcmk__str_eq(status, "0", pcmk__str_casei)) { *stop_index = counter; } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE); if (pcmk__strcase_any_of(rc, "0", "8", NULL)) { implied_monitor_start = counter; } } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, NULL)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } // If resource history entry has shutdown lock, remember lock node and time static void unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { time_t lock_time = 0; // When lock started (i.e. node shutdown time) if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK, &lock_time) == pcmk_ok) && (lock_time != 0)) { if ((scheduler->priv->shutdown_lock_ms > 0U) && (pcmk__scheduler_epoch_time(scheduler) > (lock_time + pcmk__timeout_ms2s(scheduler->priv->shutdown_lock_ms)))) { pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired", rsc->id, pcmk__node_name(node)); pe__clear_resource_history(rsc, node); } else { rsc->priv->lock_node = node; rsc->priv->lock_time = lock_time; } } } /*! * \internal * \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status * * \param[in,out] node Node whose status is being unpacked * \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked * \param[in,out] scheduler Scheduler data * * \return Resource corresponding to the entry, or NULL if no operation history */ static pcmk_resource_t * unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource, pcmk_scheduler_t *scheduler) { GList *gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = pcmk_role_unknown; const char *rsc_id = pcmk__xe_id(lrm_resource); pcmk_resource_t *rsc = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum pcmk__on_fail on_fail = pcmk__on_fail_ignore; enum rsc_role_e saved_role = pcmk_role_unknown; if (rsc_id == NULL) { pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE " entry: No " PCMK_XA_ID); crm_log_xml_info(lrm_resource, "missing-id"); return NULL; } crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s", rsc_id, pcmk__node_name(node)); /* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort * them */ for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL, NULL); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) { op_list = g_list_prepend(op_list, rsc_op); } if (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } } /* find the resource */ rsc = unpack_find_resource(scheduler, node, rsc_id); if (rsc == NULL) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } else { rsc = process_orphan_resource(lrm_resource, node, scheduler); } } pcmk__assert(rsc != NULL); // Check whether the resource is "shutdown-locked" to this node if (pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) { unpack_shutdown_lock(lrm_resource, rsc, node, scheduler); } /* process operations */ saved_role = rsc->priv->orig_role; rsc->priv->orig_role = pcmk_role_unknown; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, scheduler); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail); if (get_target_role(rsc, &req_role)) { if ((rsc->priv->next_role == pcmk_role_unknown) || (req_role < rsc->priv->next_role)) { pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE); } else if (req_role > rsc->priv->next_role) { pcmk__rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, pcmk_role_text(rsc->priv->next_role), pcmk_role_text(req_role)); } } if (saved_role > rsc->priv->orig_role) { rsc->priv->orig_role = saved_role; } return rsc; } static void handle_removed_launched_resources(const xmlNode *lrm_rsc_list, pcmk_scheduler_t *scheduler) { for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list, PCMK__XE_LRM_RESOURCE, NULL, NULL); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) { pcmk_resource_t *rsc; pcmk_resource_t *launcher = NULL; const char *rsc_id; const char *launcher_id = NULL; launcher_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER); rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID); if ((launcher_id == NULL) || (rsc_id == NULL)) { continue; } launcher = pe_find_resource(scheduler->priv->resources, launcher_id); if (launcher == NULL) { continue; } rsc = pe_find_resource(scheduler->priv->resources, rsc_id); if ((rsc == NULL) || (rsc->priv->launcher != NULL) || !pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) { continue; } pcmk__rsc_trace(rsc, "Mapped launcher of removed resource %s to %s", rsc->id, launcher_id); rsc->priv->launcher = launcher; launcher->priv->launched = g_list_append(launcher->priv->launched, rsc); } } /*! * \internal * \brief Unpack one node's lrm status section * * \param[in,out] node Node whose status is being unpacked * \param[in] xml CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler) { bool found_removed_launched_resource = false; // Drill down to PCMK__XE_LRM_RESOURCES section xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL); if (xml == NULL) { return; } xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL); if (xml == NULL) { return; } // Unpack each PCMK__XE_LRM_RESOURCE entry for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCE, NULL, NULL); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) { pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler); if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) { found_removed_launched_resource = true; } } /* Now that all resource state has been unpacked for this node, map any * removed launched resources to their launchers. */ if (found_removed_launched_resource) { handle_removed_launched_resources(xml, scheduler); } } static void set_active(pcmk_resource_t *rsc) { const pcmk_resource_t *top = pe__const_top_resource(rsc, false); if (top && pcmk_is_set(top->flags, pcmk__rsc_promotable)) { rsc->priv->orig_role = pcmk_role_unpromoted; } else { rsc->priv->orig_role = pcmk_role_started; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { pcmk_node_t *node = value; int *score = user_data; node->assign->score = *score; } #define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \ "/" PCMK__XE_NODE_STATE #define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \ "/" PCMK__XE_LRM_RESOURCES \ "/" PCMK__XE_LRM_RESOURCE #define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, int target_rc, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']" SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'", NULL); /* Need to check against transition_magic too? */ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) { pcmk__g_strcat(xpath, " and @" PCMK__META_MIGRATE_TARGET "='", source, "']", NULL); } else if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) { pcmk__g_strcat(xpath, " and @" PCMK__META_MIGRATE_SOURCE "='", source, "']", NULL); } else { g_string_append_c(xpath, ']'); } xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); if (xml && target_rc >= 0) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_EXEC_ERROR; crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc); crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status); if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) { return NULL; } } return xml; } static xmlNode * find_lrm_resource(const char *rsc_id, const char *node_name, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']", NULL); xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); return xml; } /*! * \internal * \brief Check whether a resource has no completed action history on a node * * \param[in,out] rsc Resource to check * \param[in] node_name Node to check * * \return true if \p rsc_id is unknown on \p node_name, otherwise false */ static bool unknown_on_node(pcmk_resource_t *rsc, const char *node_name) { bool result = false; xmlXPathObject *search; char *xpath = NULL; xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" SUB_XPATH_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "!='%d']", node_name, rsc->id, PCMK_OCF_UNKNOWN); search = pcmk__xpath_search(rsc->priv->scheduler->input->doc, xpath); result = (pcmk__xpath_num_results(search) == 0); - freeXpathObject(search); + xmlXPathFreeObject(search); free(xpath); return result; } /*! * \internal * \brief Check whether a probe/monitor indicating the resource was not running * on a node happened after some event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that monitor is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a monitor happened after event, false otherwise */ static bool monitor_not_running_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, pcmk_scheduler_t *scheduler) { /* Any probe/monitor operation on the node indicating it was not running * there */ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name, NULL, PCMK_OCF_NOT_RUNNING, scheduler); return (monitor != NULL) && (pe__is_newer_op(monitor, xml_op) > 0); } /*! * \internal * \brief Check whether any non-monitor operation on a node happened after some * event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that non-monitor is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool non_monitor_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, pcmk_scheduler_t *scheduler) { xmlNode *lrm_resource = NULL; lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler); if (lrm_resource == NULL) { return false; } for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL, NULL); op != NULL; op = pcmk__xe_next(op, PCMK__XE_LRM_RSC_OP)) { const char * task = NULL; if (op == xml_op) { continue; } task = crm_element_value(op, PCMK_XA_OPERATION); if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL) && pe__is_newer_op(op, xml_op) > 0) { return true; } } return false; } /*! * \internal * \brief Check whether the resource has newer state on a node after a migration * attempt * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] migrate_to Any migrate_to event that is being compared to * \param[in] migrate_from Any migrate_from event that is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool newer_state_after_migrate(const char *rsc_id, const char *node_name, const xmlNode *migrate_to, const xmlNode *migrate_from, pcmk_scheduler_t *scheduler) { const xmlNode *xml_op = (migrate_from != NULL)? migrate_from : migrate_to; const char *source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE); /* It's preferred to compare to the migrate event on the same node if * existing, since call ids are more reliable. */ if ((xml_op != migrate_to) && (migrate_to != NULL) && pcmk__str_eq(node_name, source, pcmk__str_casei)) { xml_op = migrate_to; } /* If there's any newer non-monitor operation on the node, or any newer * probe/monitor operation on the node indicating it was not running there, * the migration events potentially no longer matter for the node. */ return non_monitor_after(rsc_id, node_name, xml_op, scheduler) || monitor_not_running_after(rsc_id, node_name, xml_op, scheduler); } /*! * \internal * \brief Parse migration source and target node names from history entry * * \param[in] entry Resource history entry for a migration action * \param[in] source_node If not NULL, source must match this node * \param[in] target_node If not NULL, target must match this node * \param[out] source_name Where to store migration source node name * \param[out] target_name Where to store migration target node name * * \return Standard Pacemaker return code */ static int get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node, const pcmk_node_t *target_node, const char **source_name, const char **target_name) { *source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE); *target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET); if ((*source_name == NULL) || (*target_name == NULL)) { pcmk__config_err("Ignoring resource history entry %s without " PCMK__META_MIGRATE_SOURCE " and " PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry)); return pcmk_rc_unpack_error; } if ((source_node != NULL) && !pcmk__str_eq(*source_name, source_node->priv->name, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " PCMK__META_MIGRATE_SOURCE "='%s' does not match %s", pcmk__xe_id(entry), *source_name, pcmk__node_name(source_node)); return pcmk_rc_unpack_error; } if ((target_node != NULL) && !pcmk__str_eq(*target_name, target_node->priv->name, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " PCMK__META_MIGRATE_TARGET "='%s' does not match %s", pcmk__xe_id(entry), *target_name, pcmk__node_name(target_node)); return pcmk_rc_unpack_error; } return pcmk_rc_ok; } /* * \internal * \brief Add a migration source to a resource's list of dangling migrations * * If the migrate_to and migrate_from actions in a live migration both * succeeded, but there is no stop on the source, the migration is considered * "dangling." Add the source to the resource's dangling migration list, which * will be used to schedule a stop on the source without affecting the target. * * \param[in,out] rsc Resource involved in migration * \param[in] node Migration source */ static void add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node) { pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s", rsc->id, pcmk__node_name(node)); rsc->priv->orig_role = pcmk_role_stopped; rsc->priv->dangling_migration_sources = g_list_prepend(rsc->priv->dangling_migration_sources, (gpointer) node); } /*! * \internal * \brief Update resource role etc. after a successful migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_success(struct action_history *history) { /* A complete migration sequence is: * 1. migrate_to on source node (which succeeded if we get to this function) * 2. migrate_from on target node * 3. stop on source node * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from succeeded but no stop has happened, the * migration is considered to be "dangling". * * If a successful migrate_to and stop have happened on the source node, we * still need to check for a partial migration, due to scenarios (easier to * produce with batch-limit=1) like: * * - A resource is migrating from node1 to node2, and a migrate_to is * initiated for it on node1. * * - node2 goes into standby mode while the migrate_to is pending, which * aborts the transition. * * - Upon completion of the migrate_to, a new transition schedules a stop * on both nodes and a start on node1. * * - If the new transition is aborted for any reason while the resource is * stopping on node1, the transition after that stop completes will see * the migrate_to and stop on the source, but it's still a partial * migration, and the resource must be stopped on node2 because it is * potentially active there due to the migrate_to. * * We also need to take into account that either node's history may be * cleared at any point in the migration process. */ int from_rc = PCMK_OCF_OK; int from_status = PCMK_EXEC_PENDING; pcmk_node_t *target_node = NULL; xmlNode *migrate_from = NULL; const char *source = NULL; const char *target = NULL; bool source_newer_op = false; bool target_newer_state = false; bool active_on_target = false; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } // Check for newer state on the source source_newer_op = non_monitor_after(history->rsc->id, source, history->xml, scheduler); // Check for a migrate_from action from this source on the target migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, -1, scheduler); if (migrate_from != NULL) { if (source_newer_op) { /* There's a newer non-monitor operation on the source and a * migrate_from on the target, so this migrate_to is irrelevant to * the resource's state. */ return; } crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc); crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status); } /* If the resource has newer state on both the source and target after the * migration events, this migrate_to is irrelevant to the resource's state. */ target_newer_state = newer_state_after_migrate(history->rsc->id, target, history->xml, migrate_from, scheduler); if (source_newer_op && target_newer_state) { return; } /* Check for dangling migration (migrate_from succeeded but stop not done). * We know there's no stop because we already returned if the target has a * migrate_from and the source has any newer non-monitor operation. */ if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) { add_dangling_migration(history->rsc, history->node); return; } /* Without newer state, this migrate_to implies the resource is active. * (Clones are not allowed to migrate, so role can't be promoted.) */ history->rsc->priv->orig_role = pcmk_role_started; target_node = pcmk_find_node(scheduler, target); active_on_target = !target_newer_state && (target_node != NULL) && target_node->details->online; if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target if (active_on_target) { native_add_running(history->rsc, target_node, scheduler, TRUE); } else { // Mark resource as failed, require recovery, and prevent migration pcmk__set_rsc_flags(history->rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable); } return; } // The migrate_from is pending, complete but erased, or to be scheduled /* If there is no history at all for the resource on an online target, then * it was likely cleaned. Just return, and we'll schedule a probe. Once we * have the probe result, it will be reflected in target_newer_state. */ if ((target_node != NULL) && target_node->details->online && unknown_on_node(history->rsc, target)) { return; } if (active_on_target) { pcmk_node_t *source_node = pcmk_find_node(scheduler, source); native_add_running(history->rsc, target_node, scheduler, FALSE); if ((source_node != NULL) && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ history->rsc->priv->partial_migration_target = target_node; history->rsc->priv->partial_migration_source = source_node; } } else if (!source_newer_op) { // Mark resource as failed, require recovery, and prevent migration pcmk__set_rsc_flags(history->rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_failure(struct action_history *history) { xmlNode *target_migrate_from = NULL; const char *source = NULL; const char *target = NULL; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->priv->orig_role = pcmk_role_started; // Check for migrate_from on the target target_migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, PCMK_OCF_OK, scheduler); if (/* If the resource state is unknown on the target, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, target) /* If the resource has newer state on the target after the migration * events, this migrate_to no longer matters for the target. */ && !newer_state_after_migrate(history->rsc->id, target, history->xml, target_migrate_from, scheduler)) { /* The resource has no newer state on the target, so assume it's still * active there. * (if it is up). */ pcmk_node_t *target_node = pcmk_find_node(scheduler, target); if (target_node && target_node->details->online) { native_add_running(history->rsc, target_node, scheduler, FALSE); } } else if (!non_monitor_after(history->rsc->id, source, history->xml, scheduler)) { /* We know the resource has newer state on the target, but this * migrate_to still matters for the source as long as there's no newer * non-monitor operation there. */ // Mark node as having dangling migration so we can force a stop later history->rsc->priv->dangling_migration_sources = g_list_prepend(history->rsc->priv->dangling_migration_sources, (gpointer) history->node); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_from action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_from_failure(struct action_history *history) { xmlNode *source_migrate_to = NULL; const char *source = NULL; const char *target = NULL; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; // Get source and target node names from XML if (get_migration_node_names(history->xml, NULL, history->node, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->priv->orig_role = pcmk_role_started; // Check for a migrate_to on the source source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO, source, target, PCMK_OCF_OK, scheduler); if (/* If the resource state is unknown on the source, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, source) /* If the resource has newer state on the source after the migration * events, this migrate_from no longer matters for the source. */ && !newer_state_after_migrate(history->rsc->id, source, source_migrate_to, history->xml, scheduler)) { /* The resource has no newer state on the source, so assume it's still * active there (if it is up). */ pcmk_node_t *source_node = pcmk_find_node(scheduler, source); if (source_node && source_node->details->online) { native_add_running(history->rsc, source_node, scheduler, TRUE); } } } /*! * \internal * \brief Add an action to cluster's list of failed actions * * \param[in,out] history Parsed action result history */ static void record_failed_op(struct action_history *history) { const pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; if (!(history->node->details->online)) { return; } for (const xmlNode *xIter = scheduler->priv->failed->children; xIter != NULL; xIter = xIter->next) { const char *key = pcmk__xe_history_key(xIter); const char *uname = crm_element_value(xIter, PCMK_XA_UNAME); if (pcmk__str_eq(history->key, key, pcmk__str_none) && pcmk__str_eq(uname, history->node->priv->name, pcmk__str_casei)) { crm_trace("Skipping duplicate entry %s on %s", history->key, pcmk__node_name(history->node)); return; } } crm_trace("Adding entry for %s on %s to failed action list", history->key, pcmk__node_name(history->node)); crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->priv->name); crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id); pcmk__xml_copy(scheduler->priv->failed, history->xml); } static char * last_change_str(const xmlNode *xml_op) { time_t when; char *result = NULL; if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE, &when) == pcmk_ok) { char *when_s = pcmk__epoch2str(&when, 0); const char *p = strchr(when_s, ' '); // Skip day of week to make message shorter if ((p != NULL) && (*(++p) != '\0')) { result = pcmk__str_copy(p); } free(when_s); } if (result == NULL) { result = pcmk__str_copy("unknown_time"); } return result; } /*! * \internal * \brief Ban a resource (or its clone if an anonymous instance) from all nodes * * \param[in,out] rsc Resource to ban */ static void ban_from_all_nodes(pcmk_resource_t *rsc) { int score = -PCMK_SCORE_INFINITY; const pcmk_scheduler_t *scheduler = rsc->priv->scheduler; if (rsc->priv->parent != NULL) { pcmk_resource_t *parent = uber_parent(rsc); if (pcmk__is_anonymous_clone(parent)) { /* For anonymous clones, if an operation with * PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the * entire clone must stop. */ rsc = parent; } } // Ban the resource from all nodes crm_notice("%s will not be started under current conditions", rsc->id); if (rsc->priv->allowed_nodes != NULL) { g_hash_table_destroy(rsc->priv->allowed_nodes); } rsc->priv->allowed_nodes = pe__node_list2table(scheduler->nodes); g_hash_table_foreach(rsc->priv->allowed_nodes, set_node_score, &score); } /*! * \internal * \brief Get configured failure handling and role after failure for an action * * \param[in,out] history Unpacked action history entry * \param[out] on_fail Where to set configured failure handling * \param[out] fail_role Where to set to role after failure */ static void unpack_failure_handling(struct action_history *history, enum pcmk__on_fail *on_fail, enum rsc_role_e *fail_role) { xmlNode *config = pcmk__find_action_config(history->rsc, history->task, history->interval_ms, true); GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node, history->task, history->interval_ms, config); const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL); *on_fail = pcmk__parse_on_fail(history->rsc, history->task, history->interval_ms, on_fail_str); *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail, meta); g_hash_table_destroy(meta); } /*! * \internal * \brief Update resource role, failure handling, etc., after a failed action * * \param[in,out] history Parsed action result history * \param[in] config_on_fail Action failure handling from configuration * \param[in] fail_role Resource's role after failure of this action * \param[out] last_failure This will be set to the history XML * \param[in,out] on_fail Actual handling of action result */ static void unpack_rsc_op_failure(struct action_history *history, enum pcmk__on_fail config_on_fail, enum rsc_role_e fail_role, xmlNode **last_failure, enum pcmk__on_fail *on_fail) { bool is_probe = false; char *last_change_s = NULL; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; *last_failure = history->xml; is_probe = pcmk_xe_is_probe(history->xml); last_change_s = last_change_str(history->xml); if (!pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster) && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " QB_XS " exit-status=%d id=%s", crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pcmk__node_name(history->node), last_change_s, history->exit_status, history->id); } else { pcmk__sched_warn(scheduler, "Unexpected result (%s%s%s) was recorded for %s of " "%s on %s at %s " QB_XS " exit-status=%d id=%s", crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pcmk__node_name(history->node), last_change_s, history->exit_status, history->id); if (is_probe && (history->exit_status != PCMK_OCF_OK) && (history->exit_status != PCMK_OCF_NOT_RUNNING) && (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the " PCMK_XA_RESOURCE_DISCOVERY " option for location " "constraints", history->rsc->id, pcmk__node_name(history->node)); } record_failed_op(history); } free(last_change_s); if (*on_fail < config_on_fail) { pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s", pcmk__on_fail_text(*on_fail), pcmk__on_fail_text(config_on_fail), history->key); *on_fail = config_on_fail; } if (strcmp(history->task, PCMK_ACTION_STOP) == 0) { resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY, "__stop_fail__", scheduler); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) { unpack_migrate_to_failure(history); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) { unpack_migrate_from_failure(history); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->priv->orig_role = pcmk_role_promoted; } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) { if (config_on_fail == pcmk__on_fail_block) { history->rsc->priv->orig_role = pcmk_role_promoted; pe__set_next_role(history->rsc, pcmk_role_stopped, "demote with " PCMK_META_ON_FAIL "=block"); } else if (history->exit_status == PCMK_OCF_NOT_RUNNING) { history->rsc->priv->orig_role = pcmk_role_stopped; } else { /* Staying in the promoted role would put the scheduler and * controller into a loop. Setting the role to unpromoted is not * dangerous because the resource will be stopped as part of * recovery, and any promotion will be ordered after that stop. */ history->rsc->priv->orig_role = pcmk_role_unpromoted; } } if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { /* leave stopped */ pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id); history->rsc->priv->orig_role = pcmk_role_stopped; } else if (history->rsc->priv->orig_role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id); set_active(history->rsc); } pcmk__rsc_trace(history->rsc, "Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s", history->rsc->id, pcmk_role_text(history->rsc->priv->orig_role), pcmk__btoa(history->node->details->unclean), pcmk__on_fail_text(config_on_fail), pcmk_role_text(fail_role)); if ((fail_role != pcmk_role_started) && (history->rsc->priv->next_role < fail_role)) { pe__set_next_role(history->rsc, fail_role, "failure"); } if (fail_role == pcmk_role_stopped) { ban_from_all_nodes(history->rsc); } } /*! * \internal * \brief Block a resource with a failed action if it cannot be recovered * * If resource action is a failed stop and fencing is not possible, mark the * resource as unmanaged and blocked, since recovery cannot be done. * * \param[in,out] history Parsed action history entry */ static void block_if_unrecoverable(struct action_history *history) { char *last_change_s = NULL; if (strcmp(history->task, PCMK_ACTION_STOP) != 0) { return; // All actions besides stop are always recoverable } if (pe_can_fence(history->node->priv->scheduler, history->node)) { return; // Failed stops are recoverable via fencing } last_change_s = last_change_str(history->xml); pcmk__sched_err(history->node->priv->scheduler, "No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " QB_XS " rc=%d id=%s", history->rsc->id, history->task, pcmk__node_name(history->node), crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), last_change_s, history->exit_status, history->id); free(last_change_s); pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(history->rsc, pcmk__rsc_blocked); } /*! * \internal * \brief Update action history's execution status and why * * \param[in,out] history Parsed action history entry * \param[out] why Where to store reason for update * \param[in] value New value * \param[in] reason Description of why value was changed */ static inline void remap_because(struct action_history *history, const char **why, int value, const char *reason) { if (history->execution_status != value) { history->execution_status = value; *why = reason; } } /*! * \internal * \brief Remap informational monitor results and operation status * * For the monitor results, certain OCF codes are for providing extended information * to the user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by Pacemaker. * * For operation status, the action result can be used to determine an appropriate * status for the purposes of responding to the action. The status provided by the * executor is not directly usable since the executor does not know what was expected. * * \param[in,out] history Parsed action history entry * \param[in,out] on_fail What should be done about the result * \param[in] expired Whether result is expired * * \note If the result is remapped and the node is not shutting down or failed, * the operation will be recorded in the scheduler data's list of failed * operations to highlight it for the user. * * \note This may update the resource's current and next role. */ static void remap_operation(struct action_history *history, enum pcmk__on_fail *on_fail, bool expired) { /* @TODO It would probably also be a good idea to map an exit status of * CRM_EX_PROMOTED or CRM_EX_DEGRADED_PROMOTED to CRM_EX_OK for promote * actions */ bool is_probe = false; int orig_exit_status = history->exit_status; int orig_exec_status = history->execution_status; const char *why = NULL; const char *task = history->task; // Remap degraded results to their successful counterparts history->exit_status = pcmk__effective_rc(history->exit_status); if (history->exit_status != orig_exit_status) { why = "degraded result"; if (!expired && (!history->node->details->shutdown || history->node->details->online)) { record_failed_op(history); } } if (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && ((history->execution_status != PCMK_EXEC_DONE) || (history->exit_status != PCMK_OCF_NOT_RUNNING))) { history->execution_status = PCMK_EXEC_DONE; history->exit_status = PCMK_OCF_NOT_RUNNING; why = "equivalent probe result"; } /* If the executor reported an execution status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ switch (history->execution_status) { case PCMK_EXEC_DONE: case PCMK_EXEC_ERROR: break; // These should be treated as node-fatal case PCMK_EXEC_NO_FENCE_DEVICE: case PCMK_EXEC_NO_SECRETS: remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "node-fatal error"); goto remap_done; default: goto remap_done; } is_probe = pcmk_xe_is_probe(history->xml); if (is_probe) { task = "probe"; } if (history->expected_exit_status < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * expected exit status in the transition key, which (along with the * similar case of a corrupted transition key in the CIB) will be * reported to this function as -1. Pacemaker 2.0+ does not support * rolling upgrades from those versions or processing of saved CIB files * from those versions, so we do not need to care much about this case. */ remap_because(history, &why, PCMK_EXEC_ERROR, "obsolete history format"); pcmk__config_warn("Expected result not found for %s on %s " "(corrupt or obsolete CIB?)", history->key, pcmk__node_name(history->node)); } else if (history->exit_status == history->expected_exit_status) { remap_because(history, &why, PCMK_EXEC_DONE, "expected result"); } else { remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result"); pcmk__rsc_debug(history->rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", history->key, pcmk__node_name(history->node), history->expected_exit_status, crm_exit_str(history->expected_exit_status), history->exit_status, crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, "")); } switch (history->exit_status) { case PCMK_OCF_OK: if (is_probe && (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active on %s at %s", history->rsc->id, pcmk__node_name(history->node), last_change_s); free(last_change_s); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || (history->expected_exit_status == history->exit_status) || !pcmk_is_set(history->rsc->flags, pcmk__rsc_managed)) { /* For probes, recurring monitors for the Stopped role, and * unmanaged resources, "not running" is not considered a * failure. */ remap_because(history, &why, PCMK_EXEC_DONE, "exit status"); history->rsc->priv->orig_role = pcmk_role_stopped; *on_fail = pcmk__on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "not running"); } break; case PCMK_OCF_RUNNING_PROMOTED: if (is_probe && (history->exit_status != history->expected_exit_status)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active and promoted on %s at %s", history->rsc->id, pcmk__node_name(history->node), last_change_s); free(last_change_s); } if (!expired || (history->exit_status == history->expected_exit_status)) { history->rsc->priv->orig_role = pcmk_role_promoted; } break; case PCMK_OCF_FAILED_PROMOTED: if (!expired) { history->rsc->priv->orig_role = pcmk_role_promoted; } remap_because(history, &why, PCMK_EXEC_ERROR, "exit status"); break; case PCMK_OCF_NOT_CONFIGURED: remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status"); break; case PCMK_OCF_UNIMPLEMENT_FEATURE: { guint interval_ms = 0; crm_element_value_ms(history->xml, PCMK_META_INTERVAL, &interval_ms); if (interval_ms == 0) { if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); } else { remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED, "exit status"); } } break; case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); break; default: if (history->execution_status == PCMK_EXEC_DONE) { char *last_change_s = last_change_str(history->xml); crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", history->exit_status, task, history->rsc->id, pcmk__node_name(history->node), last_change_s); remap_because(history, &why, PCMK_EXEC_ERROR, "unknown exit status"); free(last_change_s); } break; } remap_done: if (why != NULL) { pcmk__rsc_trace(history->rsc, "Remapped %s result from [%s: %s] to [%s: %s] " "because of %s", history->key, pcmk_exec_status_str(orig_exec_status), crm_exit_str(orig_exit_status), pcmk_exec_status_str(history->execution_status), crm_exit_str(history->exit_status), why); } } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(const xmlNode *xml_op, const char *task, pcmk_resource_t *rsc, pcmk_node_t *node) { if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) { if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pcmk__add_param_check(xml_op, rsc, node, pcmk__check_last_failure); } else { pcmk__op_digest_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->priv->scheduler); switch (digest_data->rc) { case pcmk__digest_unknown: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, pcmk__xe_history_key(xml_op), node->priv->id); break; case pcmk__digest_match: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn, pcmk_scheduler_t *scheduler) { pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id); if (remote_node) { pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, FALSE, scheduler); order_actions(fence, action, pcmk__ar_first_implies_then); } } static bool should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task, guint interval_ms, bool is_last_failure) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if ((rsc->priv->remote_reconnect_ms > 0U) && pcmk_is_set(rsc->priv->scheduler->flags, pcmk__sched_fencing_enabled) && (interval_ms != 0) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { pcmk_node_t *remote_node = pcmk_find_node(rsc->priv->scheduler, rsc->id); if (remote_node && !pcmk_is_set(remote_node->priv->flags, pcmk__node_remote_fenced)) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in,out] history Parsed action result history * * \return true if operation history entry is expired, otherwise false */ static bool check_operation_expiry(struct action_history *history) { bool expired = false; bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0"); time_t last_run = 0; int unexpired_fail_count = 0; const char *clear_reason = NULL; const guint expiration_sec = pcmk__timeout_ms2s(history->rsc->priv->failure_expiration_ms); pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) { pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not expired: " "Not Installed does not expire", history->id, pcmk__node_name(history->node)); return false; // "Not installed" must always be cleared manually } if ((expiration_sec > 0) && (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE, &last_run) == 0)) { /* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a * timestamp */ time_t now = pcmk__scheduler_epoch_time(scheduler); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + expiration_sec)) && !should_ignore_failure_timeout(history->rsc, history->task, history->interval_ms, is_last_failure)) { expired = true; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(history->node, history->rsc, &last_failure, pcmk__fc_effective, history->xml); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d " "expiration=%s last-failure@%lld", history->id, (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, pcmk__readable_interval(expiration_sec * 1000), (long long) last_failure); last_failure += expiration_sec + 1; if (unexpired_fail_count && (now < last_failure)) { pcmk__update_recheck_time(last_failure, scheduler, "fail count expiration"); } } if (expired) { if (pe_get_failcount(history->node, history->rsc, NULL, pcmk__fc_default, history->xml)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Unexpired fail count", history->id, pcmk__node_name(history->node)); expired = false; } } else if (is_last_failure && (history->rsc->priv->remote_reconnect_ms > 0U)) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(history->xml, history->task, history->rsc, history->node)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { pcmk_action_t *clear_op = NULL; // Schedule clearing of the fail count clear_op = pe__clear_failcount(history->rsc, history->node, clear_reason, scheduler); if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled) && (history->rsc->priv->remote_reconnect_ms > 0)) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until * after unpack_node_history() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", history->task, history->rsc->id); order_after_remote_fencing(clear_op, history->rsc, scheduler); } } if (expired && (history->interval_ms == 0) && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { switch (history->exit_status) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_PROMOTED: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_PROMOTED: // Don't expire probes that return these values pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Probe result", history->id, pcmk__node_name(history->node)); expired = false; break; } } return expired; } int pe__target_rc_from_xml(const xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } /*! * \internal * \brief Update a resource's state for an action result * * \param[in,out] history Parsed action history entry * \param[in] exit_status Exit status to base new state on * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void update_resource_state(struct action_history *history, int exit_status, const xmlNode *last_failure, enum pcmk__on_fail *on_fail) { bool clear_past_failure = false; if ((exit_status == PCMK_OCF_NOT_INSTALLED) || (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml))) { history->rsc->priv->orig_role = pcmk_role_stopped; } else if (exit_status == PCMK_OCF_NOT_RUNNING) { clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { if ((last_failure != NULL) && pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure), pcmk__str_none)) { clear_past_failure = true; } if (history->rsc->priv->orig_role < pcmk_role_started) { set_active(history->rsc); } } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_stopped; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_promoted; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE, pcmk__str_none)) { if (*on_fail == pcmk__on_fail_demote) { /* Demote clears an error only if * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE */ clear_past_failure = true; } history->rsc->priv->orig_role = pcmk_role_unpromoted; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { unpack_migrate_to_success(history); } else if (history->rsc->priv->orig_role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "%s active on %s", history->rsc->id, pcmk__node_name(history->node)); set_active(history->rsc); } if (!clear_past_failure) { return; } switch (*on_fail) { case pcmk__on_fail_stop: case pcmk__on_fail_ban: case pcmk__on_fail_standby_node: case pcmk__on_fail_fence_node: pcmk__rsc_trace(history->rsc, "%s (%s) is not cleared by a completed %s", history->rsc->id, pcmk__on_fail_text(*on_fail), history->task); break; case pcmk__on_fail_block: case pcmk__on_fail_ignore: case pcmk__on_fail_demote: case pcmk__on_fail_restart: case pcmk__on_fail_restart_container: *on_fail = pcmk__on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures"); break; case pcmk__on_fail_reset_remote: if (history->rsc->priv->remote_reconnect_ms == 0U) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = pcmk__on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures and reset remote"); } break; } } /*! * \internal * \brief Check whether a given history entry matters for resource state * * \param[in] history Parsed action history entry * * \return true if action can affect resource state, otherwise false */ static inline bool can_affect_state(struct action_history *history) { return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, "asyncmon", NULL); } /*! * \internal * \brief Unpack execution/exit status and exit reason from a history entry * * \param[in,out] history Action history entry to unpack * * \return Standard Pacemaker return code */ static int unpack_action_result(struct action_history *history) { if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS, &(history->execution_status)) < 0) || (history->execution_status < PCMK_EXEC_PENDING) || (history->execution_status > PCMK_EXEC_MAX) || (history->execution_status == PCMK_EXEC_CANCELLED)) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " PCMK__XA_OP_STATUS " '%s'", history->id, history->rsc->id, pcmk__node_name(history->node), pcmk__s(crm_element_value(history->xml, PCMK__XA_OP_STATUS), "")); return pcmk_rc_unpack_error; } if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE, &(history->exit_status)) < 0) || (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " PCMK__XA_RC_CODE " '%s'", history->id, history->rsc->id, pcmk__node_name(history->node), pcmk__s(crm_element_value(history->xml, PCMK__XA_RC_CODE), "")); return pcmk_rc_unpack_error; } history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON); return pcmk_rc_ok; } /*! * \internal * \brief Process an action history entry whose result expired * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * * \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the * entry needs no further processing) */ static int process_expired_result(struct action_history *history, int orig_exit_status) { if (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && (orig_exit_status != history->expected_exit_status)) { if (history->rsc->priv->orig_role <= pcmk_role_stopped) { history->rsc->priv->orig_role = pcmk_role_unknown; } crm_trace("Ignoring resource history entry %s for probe of %s on %s: " "Masked failure expired", history->id, history->rsc->id, pcmk__node_name(history->node)); return pcmk_rc_ok; } if (history->exit_status == history->expected_exit_status) { return pcmk_rc_undetermined; // Only failures expire } if (history->interval_ms == 0) { crm_notice("Ignoring resource history entry %s for %s of %s on %s: " "Expired failure", history->id, history->task, history->rsc->id, pcmk__node_name(history->node)); return pcmk_rc_ok; } if (history->node->details->online && !history->node->details->unclean) { /* Reschedule the recurring action. schedule_cancel() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so pcmk__check_action_config() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s-interval %s of %s on %s " "after failure expired", pcmk__readable_interval(history->interval_ms), history->task, history->rsc->id, pcmk__node_name(history->node)); crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST, "calculated-failure-timeout"); return pcmk_rc_ok; } return pcmk_rc_undetermined; } /*! * \internal * \brief Process a masked probe failure * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void mask_probe_failure(struct action_history *history, int orig_exit_status, const xmlNode *last_failure, enum pcmk__on_fail *on_fail) { pcmk_resource_t *ban_rsc = history->rsc; if (!pcmk_is_set(history->rsc->flags, pcmk__rsc_unique)) { ban_rsc = uber_parent(history->rsc); } crm_notice("Treating probe result '%s' for %s on %s as 'not running'", crm_exit_str(orig_exit_status), history->rsc->id, pcmk__node_name(history->node)); update_resource_state(history, history->expected_exit_status, last_failure, on_fail); crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->priv->name); record_failed_op(history); resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY, "masked-probe-failure", ban_rsc->priv->scheduler); } /*! * \internal Check whether a given failure is for a given pending action * * \param[in] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known * * \return true if \p last_failure is failure of pending action in \p history, * otherwise false * \note Both \p history and \p last_failure must come from the same * \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be * the same. */ static bool failure_is_newer(const struct action_history *history, const xmlNode *last_failure) { guint failure_interval_ms = 0U; long long failure_change = 0LL; long long this_change = 0LL; if (last_failure == NULL) { return false; // Resource has no last_failure entry } if (!pcmk__str_eq(history->task, crm_element_value(last_failure, PCMK_XA_OPERATION), pcmk__str_none)) { return false; // last_failure is for different action } if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL, &failure_interval_ms) != pcmk_ok) || (history->interval_ms != failure_interval_ms)) { return false; // last_failure is for action with different interval } if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE), &this_change, 0LL) != pcmk_rc_ok) || (pcmk__scan_ll(crm_element_value(last_failure, PCMK_XA_LAST_RC_CHANGE), &failure_change, 0LL) != pcmk_rc_ok) || (failure_change < this_change)) { return false; // Failure is not known to be newer } return true; } /*! * \internal * \brief Update a resource's role etc. for a pending action * * \param[in,out] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known */ static void process_pending_action(struct action_history *history, const xmlNode *last_failure) { /* For recurring monitors, a failure is recorded only in RSC_last_failure_0, * and there might be a RSC_monitor_INTERVAL entry with the last successful * or pending result. * * If last_failure contains the failure of the pending recurring monitor * we're processing here, and is newer, the action is no longer pending. * (Pending results have call ID -1, which sorts last, so the last failure * if any should be known.) */ if (failure_is_newer(history, last_failure)) { return; } if (strcmp(history->task, PCMK_ACTION_START) == 0) { pcmk__set_rsc_flags(history->rsc, pcmk__rsc_start_pending); set_active(history->rsc); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->priv->orig_role = pcmk_role_promoted; } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) && history->node->details->unclean) { /* A migrate_to action is pending on a unclean source, so force a stop * on the target. */ const char *migrate_target = NULL; pcmk_node_t *target = NULL; migrate_target = crm_element_value(history->xml, PCMK__META_MIGRATE_TARGET); target = pcmk_find_node(history->rsc->priv->scheduler, migrate_target); if (target != NULL) { stop_action(history->rsc, target, FALSE); } } if (history->rsc->priv->pending_action != NULL) { /* There should never be multiple pending actions, but as a failsafe, * just remember the first one processed for display purposes. */ return; } if (pcmk_is_probe(history->task, history->interval_ms)) { /* Pending probes are currently never displayed, even if pending * operations are requested. If we ever want to change that, * enable the below and the corresponding part of * native.c:native_pending_action(). */ #if 0 history->rsc->private->pending_action = strdup("probe"); history->rsc->private->pending_node = history->node; #endif } else { history->rsc->priv->pending_action = strdup(history->task); history->rsc->priv->pending_node = history->node; } } static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum pcmk__on_fail *on_fail) { int old_rc = 0; bool expired = false; pcmk_resource_t *parent = rsc; enum rsc_role_e fail_role = pcmk_role_unknown; enum pcmk__on_fail failure_strategy = pcmk__on_fail_restart; struct action_history history = { .rsc = rsc, .node = node, .xml = xml_op, .execution_status = PCMK_EXEC_UNKNOWN, }; CRM_CHECK(rsc && node && xml_op, return); history.id = pcmk__xe_id(xml_op); if (history.id == NULL) { pcmk__config_err("Ignoring resource history entry for %s on %s " "without ID", rsc->id, pcmk__node_name(node)); return; } // Task and interval history.task = crm_element_value(xml_op, PCMK_XA_OPERATION); if (history.task == NULL) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "without " PCMK_XA_OPERATION, history.id, rsc->id, pcmk__node_name(node)); return; } crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms)); if (!can_affect_state(&history)) { pcmk__rsc_trace(rsc, "Ignoring resource history entry %s for %s on %s " "with irrelevant action '%s'", history.id, rsc->id, pcmk__node_name(node), history.task); return; } if (unpack_action_result(&history) != pcmk_rc_ok) { return; // Error already logged } history.expected_exit_status = pe__target_rc_from_xml(xml_op); history.key = pcmk__xe_history_key(xml_op); crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id)); pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)", history.id, history.task, history.call_id, pcmk__node_name(node), pcmk_exec_status_str(history.execution_status), crm_exit_str(history.exit_status)); if (node->details->unclean) { pcmk__rsc_trace(rsc, "%s is running on %s, which is unclean (further action " "depends on value of stop's on-fail attribute)", rsc->id, pcmk__node_name(node)); } expired = check_operation_expiry(&history); old_rc = history.exit_status; remap_operation(&history, on_fail, expired); if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) { goto done; } if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) { mask_probe_failure(&history, old_rc, *last_failure, on_fail); goto done; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { parent = uber_parent(rsc); } switch (history.execution_status) { case PCMK_EXEC_PENDING: process_pending_action(&history, *last_failure); goto done; case PCMK_EXEC_DONE: update_resource_state(&history, history.exit_status, *last_failure, on_fail); goto done; case PCMK_EXEC_NOT_INSTALLED: unpack_failure_handling(&history, &failure_strategy, &fail_role); if (failure_strategy == pcmk__on_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " QB_XS " status=%d rc=%d id=%s", history.task, rsc->id, pcmk__node_name(node), history.execution_status, history.exit_status, history.id); /* Also for printing it as "FAILED" by marking it as * pcmk__rsc_failed later */ *on_fail = pcmk__on_fail_ban; } resource_location(parent, node, -PCMK_SCORE_INFINITY, "hard-error", rsc->priv->scheduler); unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); goto done; case PCMK_EXEC_NOT_CONNECTED: if (pcmk__is_pacemaker_remote_node(node) && pcmk_is_set(node->priv->remote->flags, pcmk__rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ pcmk__set_rsc_flags(node->priv->remote, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); } break; // Not done, do error handling case PCMK_EXEC_ERROR: case PCMK_EXEC_ERROR_HARD: case PCMK_EXEC_ERROR_FATAL: case PCMK_EXEC_TIMEOUT: case PCMK_EXEC_NOT_SUPPORTED: case PCMK_EXEC_INVALID: break; // Not done, do error handling default: // No other value should be possible at this point break; } unpack_failure_handling(&history, &failure_strategy, &fail_role); if ((failure_strategy == pcmk__on_fail_ignore) || ((failure_strategy == pcmk__on_fail_restart_container) && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) { char *last_change_s = last_change_str(xml_op); crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded " QB_XS " %s", history.task, crm_exit_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), rsc->id, pcmk__node_name(node), last_change_s, history.id); free(last_change_s); update_resource_state(&history, history.expected_exit_status, *last_failure, on_fail); crm_xml_add(xml_op, PCMK_XA_UNAME, node->priv->name); pcmk__set_rsc_flags(rsc, pcmk__rsc_ignore_failure); record_failed_op(&history); if ((failure_strategy == pcmk__on_fail_restart_container) && (*on_fail <= pcmk__on_fail_restart)) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); if (history.execution_status == PCMK_EXEC_ERROR_HARD) { uint8_t log_level = LOG_ERR; if (history.exit_status == PCMK_OCF_NOT_INSTALLED) { log_level = LOG_NOTICE; } do_crm_log(log_level, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s) " QB_XS " %s", parent->id, pcmk__node_name(node), crm_exit_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, node, -PCMK_SCORE_INFINITY, "hard-error", rsc->priv->scheduler); } else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) { pcmk__sched_err(rsc->priv->scheduler, "Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " QB_XS " %s", parent->id, crm_exit_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, NULL, -PCMK_SCORE_INFINITY, "fatal-error", rsc->priv->scheduler); } } done: pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)", rsc->id, pcmk__node_name(node), history.id, pcmk_role_text(rsc->priv->orig_role), pcmk_role_text(rsc->priv->next_role)); } /*! * \internal * \brief Insert a node attribute with value into a \c GHashTable * * \param[in,out] key Key to insert (either freed or owned by * \p user_data upon return) * \param[in] value Value to insert (owned by \p user_data upon return) * \param[in] user_data \c GHashTable to insert into */ static gboolean insert_attr(gpointer key, gpointer value, gpointer user_data) { GHashTable *table = user_data; g_hash_table_insert(table, key, value); return TRUE; } static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler) { const char *cluster_name = NULL; const char *dc_id = crm_element_value(scheduler->input, PCMK_XA_DC_UUID); const pcmk_rule_input_t rule_input = { .now = scheduler->priv->now, }; pcmk__insert_dup(node->priv->attrs, CRM_ATTR_UNAME, node->priv->name); pcmk__insert_dup(node->priv->attrs, CRM_ATTR_ID, node->priv->id); if ((scheduler->dc_node == NULL) && pcmk__str_eq(node->priv->id, dc_id, pcmk__str_casei)) { scheduler->dc_node = node; pcmk__insert_dup(node->priv->attrs, CRM_ATTR_IS_DC, PCMK_VALUE_TRUE); } else if (!pcmk__same_node(node, scheduler->dc_node)) { pcmk__insert_dup(node->priv->attrs, CRM_ATTR_IS_DC, PCMK_VALUE_FALSE); } cluster_name = g_hash_table_lookup(scheduler->priv->options, PCMK_OPT_CLUSTER_NAME); if (cluster_name) { pcmk__insert_dup(node->priv->attrs, CRM_ATTR_CLUSTER_NAME, cluster_name); } if (overwrite) { /* @TODO Try to reorder some unpacking so that we don't need the * overwrite argument or to unpack into a temporary table */ GHashTable *unpacked = pcmk__strkey_table(free, free); pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_input, unpacked, NULL, scheduler); g_hash_table_foreach_steal(unpacked, insert_attr, node->priv->attrs); g_hash_table_destroy(unpacked); } else { pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_input, node->priv->attrs, NULL, scheduler); } pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_input, node->priv->utilization, NULL, scheduler); if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL, pcmk__rsc_node_current) == NULL) { const char *site_name = pcmk__node_attr(node, "site-name", NULL, pcmk__rsc_node_current); if (site_name) { pcmk__insert_dup(node->priv->attrs, CRM_ATTR_SITE_NAME, site_name); } else if (cluster_name) { /* Default to cluster-name if unset */ pcmk__insert_dup(node->priv->attrs, CRM_ATTR_SITE_NAME, cluster_name); } } } static GList * extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GList *gIter = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP, NULL, NULL); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) { crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc); crm_xml_add(rsc_op, PCMK_XA_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GList * find_operations(const char *rsc, const char *node, gboolean active_filter, pcmk_scheduler_t *scheduler) { GList *output = NULL; GList *intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS, NULL, NULL); pcmk_node_t *this_node = NULL; xmlNode *node_state = NULL; CRM_CHECK(status != NULL, return NULL); for (node_state = pcmk__xe_first_child(status, PCMK__XE_NODE_STATE, NULL, NULL); node_state != NULL; node_state = pcmk__xe_next(node_state, PCMK__XE_NODE_STATE)) { const char *uname = crm_element_value(node_state, PCMK_XA_UNAME); if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) { continue; } this_node = pcmk_find_node(scheduler, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pcmk__is_pacemaker_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); } else { determine_online_status(node_state, this_node, scheduler); } if (this_node->details->online || pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL, NULL); tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL, NULL); for (lrm_rsc = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCE, NULL, NULL); lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc, PCMK__XE_LRM_RESOURCE)) { const char *rsc_id = crm_element_value(lrm_rsc, PCMK_XA_ID); if ((rsc != NULL) && !pcmk__str_eq(rsc_id, rsc, pcmk__str_none)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } return output; } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 7d6051aae9..926322a5dc 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,2402 +1,2402 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include // bool, true, false #include // uint32_t #include #include #include #include #include #include #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include // PCMK_RESOURCE_CLASS_* #include #include #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources" enum rsc_command { cmd_ban, cmd_cleanup, cmd_clear, cmd_colocations, cmd_cts, cmd_delete, cmd_delete_param, cmd_digests, cmd_execute_agent, cmd_fail, cmd_get_param, cmd_list_active_ops, cmd_list_agents, cmd_list_all_ops, cmd_list_alternatives, cmd_list_instances, cmd_list_options, cmd_list_providers, cmd_list_resources, cmd_list_standards, cmd_locate, cmd_metadata, cmd_move, cmd_query_xml, cmd_query_xml_raw, cmd_refresh, cmd_restart, cmd_set_param, cmd_wait, cmd_why, // Update this when adding new commands cmd_max = cmd_why, }; /*! * \internal * \brief Handler function for a crm_resource command */ typedef crm_exit_t (*crm_resource_fn_t)(pcmk_resource_t *, pcmk_node_t *, cib_t *, pcmk_scheduler_t *, pcmk_ipc_api_t *, xmlNode *); /*! * \internal * \brief Flags to define attributes of a given command * * These attributes may include required command-line options, how to look up a * resource in the scheduler data, whether the command supports clone instances, * etc. */ enum crm_rsc_flags { //! Use \c pcmk_rsc_match_anon_basename when looking up a resource crm_rsc_find_match_anon_basename = (UINT32_C(1) << 0), //! Use \c pcmk_rsc_match_basename when looking up a resource crm_rsc_find_match_basename = (UINT32_C(1) << 1), //! Use \c pcmk_rsc_match_history when looking up a resource crm_rsc_find_match_history = (UINT32_C(1) << 2), //! Fail if \c --resource refers to a particular clone instance crm_rsc_rejects_clone_instance = (UINT32_C(1) << 3), //! Require CIB connection unless resource is specified by agent crm_rsc_requires_cib = (UINT32_C(1) << 4), //! Require controller connection crm_rsc_requires_controller = (UINT32_C(1) << 5), //! Require \c --node argument crm_rsc_requires_node = (UINT32_C(1) << 6), //! Require \c --resource argument crm_rsc_requires_resource = (UINT32_C(1) << 7), //! Require scheduler data unless resource is specified by agent crm_rsc_requires_scheduler = (UINT32_C(1) << 8), }; /*! * \internal * \brief Handler function and flags for a given command */ typedef struct { crm_resource_fn_t fn; //!< Command handler function uint32_t flags; //!< Group of enum crm_rsc_flags } crm_resource_cmd_info_t; struct { enum rsc_command rsc_cmd; // crm_resource command to perform // Command-line option values gchar *rsc_id; // Value of --resource gchar *rsc_type; // Value of --resource-type gboolean all; // --all was given gboolean force; // --force was given gboolean clear_expired; // --expired was given gboolean recursive; // --recursive was given gboolean promoted_role_only; // --promoted was given gchar *host_uname; // Value of --node gchar *interval_spec; // Value of --interval gchar *move_lifetime; // Value of --lifetime gchar *operation; // Value of --operation enum pcmk__opt_flags opt_list; // Parsed from --list-options const char *attr_set_type; // Instance, meta, utilization, or element attribute gchar *prop_id; // --nvpair (attribute XML ID) char *prop_name; // Attribute name gchar *prop_set; // --set-name (attribute block XML ID) gchar *prop_value; // --parameter-value (attribute value) guint timeout_ms; // Parsed from --timeout value char *agent_spec; // Standard and/or provider and/or agent int check_level; // Optional value of --validate or --force-check // Resource configuration specified via command-line arguments gchar *agent; // Value of --agent gchar *class; // Value of --class gchar *provider; // Value of --provider GHashTable *cmdline_params; // Resource parameters specified // Positional command-line arguments gchar **remainder; // Positional arguments as given GHashTable *override_params; // Resource parameter values that override config } options = { .attr_set_type = PCMK_XE_INSTANCE_ATTRIBUTES, .check_level = -1, .rsc_cmd = cmd_list_resources, // List all resources if no command given }; static crm_exit_t exit_code = CRM_EX_OK; static pcmk__output_t *out = NULL; static pcmk__common_args_t *args = NULL; // Things that should be cleaned up on exit static GError *error = NULL; static GMainLoop *mainloop = NULL; #define MESSAGE_TIMEOUT_S 60 #define INDENT " " static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; static void quit_main_loop(crm_exit_t ec) { exit_code = ec; if (mainloop != NULL) { GMainLoop *mloop = mainloop; mainloop = NULL; // Don't re-enter this block pcmk_quit_main_loop(mloop, 10); g_main_loop_unref(mloop); } } static gboolean resource_ipc_timeout(gpointer data) { // Start with newline because "Waiting for ..." message doesn't have one if (error != NULL) { g_clear_error(&error); } g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT, _("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S); quit_main_loop(CRM_EX_TIMEOUT); return FALSE; } static void controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { crm_exit_t *ec = user_data; pcmk__assert(ec != NULL); switch (event_type) { case pcmk_ipc_event_disconnect: if (exit_code == CRM_EX_DISCONNECT) { // Unexpected crm_info("Connection to controller was terminated"); } *ec = exit_code; quit_main_loop(*ec); break; case pcmk_ipc_event_reply: if (status != CRM_EX_OK) { out->err(out, "Error: bad reply from controller: %s", crm_exit_str(status)); pcmk_disconnect_ipc(api); *ec = status; quit_main_loop(*ec); } else { if ((pcmk_controld_api_replies_expected(api) == 0) && (mainloop != NULL) && g_main_loop_is_running(mainloop)) { out->info(out, "... got reply (done)"); crm_debug("Got all the replies we expected"); pcmk_disconnect_ipc(api); *ec = CRM_EX_OK; quit_main_loop(*ec); } else { out->info(out, "... got reply"); } } break; default: break; } } static void start_mainloop(pcmk_ipc_api_t *capi) { // @TODO See if we can avoid setting exit_code as a global variable unsigned int count = pcmk_controld_api_replies_expected(capi); if (count > 0) { out->info(out, "Waiting for %u %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects mainloop = g_main_loop_new(NULL, FALSE); pcmk__create_timer(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } } static GList * build_constraint_list(xmlNode *root) { GList *retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObject *xpathObj = NULL; int ndx = 0; int num_results = 0; cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS); xpathObj = pcmk__xpath_search(cib_constraints->doc, "//" PCMK_XE_RSC_LOCATION); num_results = pcmk__xpath_num_results(xpathObj); for (ndx = 0; ndx < num_results; ndx++) { xmlNode *match = pcmk__xpath_result(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) pcmk__xe_id(match), (GCompareFunc) g_strcmp0); } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); return retval; } static gboolean validate_opt_list(const gchar *optarg) { if (pcmk__str_eq(optarg, PCMK_VALUE_FENCING, pcmk__str_none)) { options.opt_list = pcmk__opt_fencing; } else if (pcmk__str_eq(optarg, PCMK__VALUE_PRIMITIVE, pcmk__str_none)) { options.opt_list = pcmk__opt_primitive; } else { return FALSE; } return TRUE; } // GOptionArgFunc callback functions static gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) { options.attr_set_type = PCMK_XE_META_ATTRIBUTES; } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) { options.attr_set_type = PCMK_XE_UTILIZATION; } else if (pcmk__str_eq(option_name, "--element", pcmk__str_none)) { options.attr_set_type = ATTR_SET_ELEMENT; } return TRUE; } /*! * \internal * \brief Process options that set the command * * Nothing else should set \c options.rsc_cmd. * * \param[in] option_name Name of the option being parsed * \param[in] optarg Value to be parsed * \param[in] data Ignored * \param[out] error Where to store recoverable error, if any * * \return \c TRUE if the option was successfully parsed, or \c FALSE if an * error occurred, in which case \p *error is set */ static gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { // Sorted by enum rsc_command name if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) { options.rsc_cmd = cmd_ban; } else if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) { options.rsc_cmd = cmd_cleanup; } else if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) { options.rsc_cmd = cmd_clear; } else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) { options.rsc_cmd = cmd_colocations; } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) { options.rsc_cmd = cmd_colocations; options.recursive = TRUE; } else if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) { options.rsc_cmd = cmd_cts; } else if (pcmk__str_any_of(option_name, "-D", "--delete", NULL)) { options.rsc_cmd = cmd_delete; } else if (pcmk__str_any_of(option_name, "-d", "--delete-parameter", NULL)) { options.rsc_cmd = cmd_delete_param; pcmk__str_update(&options.prop_name, optarg); } else if (pcmk__str_eq(option_name, "--digests", pcmk__str_none)) { options.rsc_cmd = cmd_digests; if (options.override_params == NULL) { options.override_params = pcmk__strkey_table(g_free, g_free); } } else if (pcmk__str_any_of(option_name, "--force-demote", "--force-promote", "--force-start", "--force-stop", "--force-check", "--validate", NULL)) { options.rsc_cmd = cmd_execute_agent; g_free(options.operation); options.operation = g_strdup(option_name + 2); // skip "--" if (options.override_params == NULL) { options.override_params = pcmk__strkey_table(g_free, g_free); } if (optarg != NULL) { if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) { g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, _("Invalid check level setting: %s"), optarg); return FALSE; } } } else if (pcmk__str_any_of(option_name, "-F", "--fail", NULL)) { options.rsc_cmd = cmd_fail; } else if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) { options.rsc_cmd = cmd_get_param; pcmk__str_update(&options.prop_name, optarg); } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) { options.rsc_cmd = cmd_list_active_ops; } else if (pcmk__str_eq(option_name, "--list-agents", pcmk__str_none)) { options.rsc_cmd = cmd_list_agents; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_any_of(option_name, "-o", "--list-all-operations", NULL)) { options.rsc_cmd = cmd_list_all_ops; } else if (pcmk__str_eq(option_name, "--list-ocf-alternatives", pcmk__str_none)) { options.rsc_cmd = cmd_list_alternatives; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_eq(option_name, "--list-options", pcmk__str_none)) { options.rsc_cmd = cmd_list_options; return validate_opt_list(optarg); } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) { options.rsc_cmd = cmd_list_instances; } else if (pcmk__str_eq(option_name, "--list-ocf-providers", pcmk__str_none)) { options.rsc_cmd = cmd_list_providers; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) { options.rsc_cmd = cmd_list_resources; } else if (pcmk__str_eq(option_name, "--list-standards", pcmk__str_none)) { options.rsc_cmd = cmd_list_standards; } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) { options.rsc_cmd = cmd_locate; } else if (pcmk__str_eq(option_name, "--show-metadata", pcmk__str_none)) { options.rsc_cmd = cmd_metadata; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) { options.rsc_cmd = cmd_move; } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) { options.rsc_cmd = cmd_query_xml; } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) { options.rsc_cmd = cmd_query_xml_raw; } else if (pcmk__str_any_of(option_name, "-R", "--refresh", NULL)) { options.rsc_cmd = cmd_refresh; } else if (pcmk__str_eq(option_name, "--restart", pcmk__str_none)) { options.rsc_cmd = cmd_restart; } else if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) { options.rsc_cmd = cmd_set_param; pcmk__str_update(&options.prop_name, optarg); } else if (pcmk__str_eq(option_name, "--wait", pcmk__str_none)) { options.rsc_cmd = cmd_wait; } else if (pcmk__str_any_of(option_name, "-Y", "--why", NULL)) { options.rsc_cmd = cmd_why; } return TRUE; } static gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { gchar *name = NULL; gchar *value = NULL; if (pcmk__scan_nvpair(optarg, &name, &value) != pcmk_rc_ok) { return FALSE; } /* services__create_resource_action() ultimately takes ownership of * options.cmdline_params. It's not worth trying to ensure that the entire * call path uses (gchar *) strings and g_free(). So create the table for * (char *) strings, and duplicate the (gchar *) strings when inserting. */ if (options.cmdline_params == NULL) { options.cmdline_params = pcmk__strkey_table(free, free); } pcmk__insert_dup(options.cmdline_params, name, value); g_free(name); g_free(value); return TRUE; } static gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { long long timeout_ms = crm_get_msec(optarg); if (timeout_ms < 0) { return FALSE; } options.timeout_ms = (guint) QB_MIN(timeout_ms, UINT_MAX); return TRUE; } // Command line option specification /* short option letters still available: eEJkKXyYZ */ static GOptionEntry query_entries[] = { { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List all cluster resources with status", NULL }, { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List IDs of all instantiated resources (individual members\n" INDENT "rather than groups etc.)", NULL }, { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, NULL, NULL }, { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List active resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List all resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-options", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "List all available options of the given type.\n" INDENT "Allowed values:\n" INDENT PCMK__VALUE_PRIMITIVE " (primitive resource meta-attributes),\n" INDENT PCMK_VALUE_FENCING " (parameters common to all fencing resources)", "TYPE" }, { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List supported standards", NULL }, { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List all available OCF providers", NULL }, { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "List all agents available for the named standard and/or provider", "STD:PROV" }, { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "List all available providers for the named OCF agent", "AGENT" }, { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Show the metadata for the named class:provider:agent", "SPEC" }, { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show XML configuration of resource (after any template expansion)", NULL }, { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show XML configuration of resource (before any template expansion)", NULL }, { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Display named parameter for resource (use instance attribute\n" INDENT "unless --element, --meta, or --utilization is specified)", "PARAM" }, { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show node(s) currently running resource", NULL }, { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Display the location and colocation constraints that apply to a\n" INDENT "resource, and if --recursive is specified, to the resources\n" INDENT "directly or indirectly involved in those colocations.\n" INDENT "If the named resource is part of a group, or a clone or\n" INDENT "bundle instance, constraints for the collective resource\n" INDENT "will be shown unless --force is given.", NULL }, { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Equivalent to --constraints --recursive", NULL }, { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show why resources are not running, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { NULL } }; static GOptionEntry command_entries[] = { { "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Validate resource configuration by calling agent's validate-all\n" INDENT "action. The configuration may be specified either by giving an\n" INDENT "existing resource name with -r, or by specifying --class,\n" INDENT "--agent, and --provider arguments, along with any number of\n" INDENT "--option arguments. An optional LEVEL argument can be given\n" INDENT "to control the level of checking performed.", "LEVEL" }, { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "If resource has any past failures, clear its history and fail\n" INDENT "count. Optionally filtered by --resource, --node, --operation\n" INDENT "and --interval (otherwise all). --operation and --interval\n" INDENT "apply to fail counts, but entire history is always clear, to\n" INDENT "allow current state to be rechecked. If the named resource is\n" INDENT "part of a group, or one numbered instance of a clone or bundled\n" INDENT "resource, the clean-up applies to the whole collective resource\n" INDENT "unless --force is given.", NULL }, { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Delete resource's history (including failures) so its current state\n" INDENT "is rechecked. Optionally filtered by --resource and --node\n" INDENT "(otherwise all). If the named resource is part of a group, or one\n" INDENT "numbered instance of a clone or bundled resource, the refresh\n" INDENT "applies to the whole collective resource unless --force is given.", NULL }, { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Set named parameter for resource (requires -v). Use instance\n" INDENT "attribute unless --element, --meta, or --utilization is " "specified.", "PARAM" }, { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Delete named parameter for resource. Use instance attribute\n" INDENT "unless --element, --meta or, --utilization is specified.", "PARAM" }, { NULL } }; static GOptionEntry location_entries[] = { { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Create a constraint to move resource. If --node is specified,\n" INDENT "the constraint will be to move to that node, otherwise it\n" INDENT "will be to ban the current node. Unless --force is specified\n" INDENT "this will return an error if the resource is already running\n" INDENT "on the specified node. If --force is specified, this will\n" INDENT "always ban the current node.\n" INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n" INDENT "resource from running on its previous location until the\n" INDENT "implicit constraint expires or is removed with --clear.", NULL }, { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Create a constraint to keep resource off a node.\n" INDENT "Optional: --node, --lifetime, --promoted.\n" INDENT "NOTE: This will prevent the resource from running on the\n" INDENT "affected node until the implicit constraint expires or is\n" INDENT "removed with --clear. If --node is not specified, it defaults\n" INDENT "to the node currently running the resource for primitives\n" INDENT "and groups, or the promoted instance of promotable clones with\n" INDENT PCMK_META_PROMOTED_MAX "=1 (all other situations result in an\n" INDENT "error as there is no sane default).", NULL }, { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Remove all constraints created by the --ban and/or --move\n" INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n" INDENT "--expired. If --node is not specified, all constraints created\n" INDENT "by --ban and --move will be removed for the named resource. If\n" INDENT "--node and --force are specified, any constraint created by\n" INDENT "--move will be cleared, even if it is not for the specified\n" INDENT "node. If --expired is specified, only those constraints whose\n" INDENT "lifetimes have expired will be removed.", NULL }, { "expired", 'e', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.clear_expired, "Modifies the --clear argument to remove constraints with\n" INDENT "expired lifetimes.", NULL }, { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime, "Lifespan (as ISO 8601 duration) of created constraints (with\n" INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)", "TIMESPEC" }, { "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Limit scope of command to promoted role (with -B, -M, -U). For\n" INDENT "-B and -M, previously promoted instances may remain\n" INDENT "active in the unpromoted role.", NULL }, // Deprecated since 2.1.0 { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Deprecated: Use --promoted instead", NULL }, { NULL } }; static GOptionEntry advanced_entries[] = { { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Delete a resource from the CIB. Required: -t", NULL }, { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Tell the cluster this resource has failed", NULL }, { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Tell the cluster to restart this resource and\n" INDENT "anything that depends on it. This temporarily modifies\n" INDENT "the CIB, and other CIB modifications should be avoided\n" INDENT "while this is in progress. If a node is fenced because\n" INDENT "the stop portion of the restart fails, CIB modifications\n" INDENT "such as target-role may remain.", NULL }, { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Wait until the cluster settles into a stable state", NULL }, { "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Show parameter hashes that Pacemaker uses to detect\n" INDENT "configuration changes (only accurate if there is resource\n" INDENT "history on the specified node). Required: --resource, --node.\n" INDENT "Optional: any NAME=VALUE parameters will be used to override\n" INDENT "the configuration (to see what the hash would be with those\n" INDENT "changes).", NULL }, { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and demote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and stop a resource on the local node", NULL }, { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and start a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and promote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and check the state of a resource on\n" INDENT "the local node. An optional LEVEL argument can be given\n" INDENT "to control the level of checking performed.", "LEVEL" }, { NULL } }; static GOptionEntry addl_entries[] = { { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname, "Node name", "NAME" }, { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive, "Follow colocation chains when using --set-parameter or --constraints", NULL }, { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type, "Resource XML element (primitive, group, etc.) (with -D)", "ELEMENT" }, { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value, "Value to use with -p", "PARAM" }, { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource meta-attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource utilization attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "element", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource element attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation, "Operation to clear instead of all (with -C -r)", "OPERATION" }, { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec, "Interval of operation to clear (default 0s) (with -C -r -n)", "N" }, { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.class, "The standard the resource agent conforms to (for example, ocf).\n" INDENT "Use with --agent, --provider, --option, and --validate.", "CLASS" }, { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.agent, "The agent to use (for example, IPaddr). Use with --class,\n" INDENT "--provider, --option, and --validate.", "AGENT" }, { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.provider, "The vendor that supplies the resource agent (for example,\n" INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", "PROVIDER" }, { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, "Specify a device configuration parameter as NAME=VALUE (may be\n" INDENT "specified multiple times). Use with --validate and without the\n" INDENT "-r option.", "PARAM" }, { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set, "(Advanced) XML ID of attributes element to use (with -p, -d)", "ID" }, { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id, "(Advanced) XML ID of nvpair element to use (with -p, -d)", "ID" }, { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb, "(Advanced) Abort if command does not finish in this time (with\n" INDENT "--restart, --wait, --force-*)", "N" }, { "all", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.all, "List all options, including advanced and deprecated (with\n" INDENT "--list-options)", NULL }, { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force, "Force the action to be performed. See help for individual commands for\n" INDENT "additional behavior.", NULL }, // @COMPAT Used in resource-agents prior to v4.2.0 { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname, NULL, "HOST" }, { NULL } }; static int ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc, cib_t *cib_conn, const char *move_lifetime) { int rc = pcmk_rc_ok; pcmk_node_t *current = NULL; unsigned int nactive = 0; CRM_CHECK(rsc != NULL, return EINVAL); current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(out, options.rsc_id, current->priv->name, move_lifetime, cib_conn, options.promoted_role_only, PCMK_ROLE_PROMOTED); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { int count = 0; GList *iter = NULL; current = NULL; for (iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = (pcmk_resource_t *)iter->data; enum rsc_role_e child_role = child->priv->fns->state(child, true); if (child_role == pcmk_role_promoted) { count++; current = pcmk__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(out, options.rsc_id, current->priv->name, move_lifetime, cib_conn, options.promoted_role_only, PCMK_ROLE_PROMOTED); } else { rc = EINVAL; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("Resource '%s' not moved: active in %d locations (promoted in %d).\n" "To prevent '%s' from running on a specific location, " "specify a node." "To prevent '%s' from being promoted at a specific " "location, specify a node and the --promoted option."), options.rsc_id, nactive, count, options.rsc_id, options.rsc_id); } } else { rc = EINVAL; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("Resource '%s' not moved: active in %d locations.\n" "To prevent '%s' from running on a specific location, " "specify a node."), options.rsc_id, nactive, options.rsc_id); } return rc; } static void cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_ipc_api_t *controld_api) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, options.rsc_id, ((node != NULL)? pcmk__node_name(node) : "all nodes")); rc = cli_resource_delete(controld_api, rsc, node, options.operation, options.interval_spec, true, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, rsc, node); } /* @FIXME The mainloop functions in this file set exit_code. What happens to * exit_code if rc != pcmk_rc_ok here? */ if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } /*! * \internal * \brief Allocate a scheduler data object and initialize it from the CIB * * We transform the queried CIB XML to the latest schema version before using it * to populate the scheduler data. * * \param[out] scheduler Where to store scheduler data * \param[in] cib_conn CIB connection * \param[in] out Output object for new scheduler data object * \param[out] cib_xml_orig Where to store queried CIB XML from before any * schema upgrades * * \return Standard Pacemaker return code * * \note \p *scheduler and \p *cib_xml_orig must be \c NULL when this function * is called. * \note The caller is responsible for freeing \p *scheduler using * \c pcmk_free_scheduler. */ static int initialize_scheduler_data(pcmk_scheduler_t **scheduler, cib_t *cib_conn, pcmk__output_t *out, xmlNode **cib_xml_orig) { int rc = pcmk_rc_ok; pcmk__assert((scheduler != NULL) && (*scheduler == NULL) && (cib_conn != NULL) && (out != NULL) && (cib_xml_orig != NULL) && (*cib_xml_orig == NULL)); *scheduler = pcmk_new_scheduler(); if (*scheduler == NULL) { return ENOMEM; } pcmk__set_scheduler_flags(*scheduler, pcmk__sched_no_counts); (*scheduler)->priv->out = out; rc = update_scheduler_input(out, *scheduler, cib_conn, cib_xml_orig); if (rc != pcmk_rc_ok) { pcmk_free_scheduler(*scheduler); *scheduler = NULL; return rc; } cluster_status(*scheduler); return pcmk_rc_ok; } static crm_exit_t refresh(pcmk__output_t *out, const pcmk_node_t *node, pcmk_ipc_api_t *controld_api) { const char *node_name = NULL; const char *log_node_name = "all nodes"; const char *router_node = NULL; int attr_options = pcmk__node_attr_none; int rc = pcmk_rc_ok; if (node != NULL) { node_name = node->priv->name; log_node_name = pcmk__node_name(node); router_node = node->priv->name; } if (pcmk__is_pacemaker_remote_node(node)) { const pcmk_node_t *conn_host = pcmk__current_node(node->priv->remote); if (conn_host == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, _("No cluster connection to Pacemaker Remote node %s " "detected"), log_node_name); return pcmk_rc2exitc(rc); } router_node = conn_host->priv->name; pcmk__set_node_attr_flags(attr_options, pcmk__node_attr_remote); } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", log_node_name); return CRM_EX_OK; } crm_debug("Re-checking the state of all resources on %s", log_node_name); // @FIXME We shouldn't discard rc here rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, NULL, NULL, NULL, attr_options); /* @FIXME The mainloop functions in this file set exit_code. What happens to * exit_code if pcmk_controld_api_reprobe() doesn't return pcmk_rc_ok? */ if (pcmk_controld_api_reprobe(controld_api, node_name, router_node) == pcmk_rc_ok) { start_mainloop(controld_api); return exit_code; } return pcmk_rc2exitc(rc); } static void refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_ipc_api_t *controld_api) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, options.rsc_id, ((node != NULL)? pcmk__node_name(node) : "all nodes")); rc = cli_resource_delete(controld_api, rsc, node, NULL, 0, false, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, rsc, node); } /* @FIXME The mainloop functions in this file set exit_code. What happens to * exit_code if rc != pcmk_rc_ok here? */ if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } /*! * \internal * \brief Check whether a command-line resource configuration was given * * \return \c true if \c --class, \c --provider, or \c --agent was specified, or * \c false otherwise */ static inline bool has_cmdline_config(void) { return ((options.class != NULL) || (options.provider != NULL) || (options.agent != NULL)); } static void validate_cmdline_config(void) { bool is_ocf = pcmk__str_eq(options.class, PCMK_RESOURCE_CLASS_OCF, pcmk__str_none); // Sanity check before throwing any errors if (!has_cmdline_config()) { return; } // Cannot use both --resource and command-line resource configuration if (options.rsc_id != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("--class, --agent, and --provider cannot be used with " "-r/--resource")); return; } /* Check whether command supports command-line resource configuration * * @FIXME According to the help text, these options can only be used with * --validate. The --force-* commands are documented for resources that are * configured in Pacemaker. So this is a bug. We have two choices: * * Throw an error if --force-* commands are used with these options. * * Document that --force-* commands can be used with these options. * * An error seems safer. If a user really wants to run a non-trivial * resource action based on CLI parameters, they can do so by executing the * resource agent directly. It's unsafe to do so if Pacemaker is managing * the resource that's specified via --class, --option, etc. * * On the other hand, besides safety concerns, running other actions is * exactly the same as running a validate action, and the implementation is * already in place. */ if (options.rsc_cmd != cmd_execute_agent) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("--class, --agent, and --provider can only be used with " "--validate and --force-*")); return; } // Check for a valid combination of --class, --agent, and --provider if (is_ocf) { if ((options.provider == NULL) || (options.agent == NULL)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("--provider and --agent are required with " "--class=ocf")); return; } } else { if (options.provider != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("--provider is supported only with --class=ocf")); return; } // Either --class or --agent was given if (options.agent == NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("--agent is required with --class")); return; } if (options.class == NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("--class is required with --agent")); return; } } // Check whether agent exists if (pcmk__str_eq(options.class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) { if (!stonith_agent_exists(options.agent, 0)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("%s is not a known stonith agent"), options.agent); return; } } else if (!resources_agent_exists(options.class, options.provider, options.agent)) { if (is_ocf) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("%s:%s:%s is not a known resource agent"), options.class, options.provider, options.agent); } else { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("%s:%s is not a known resource agent"), options.class, options.agent); } return; } if (options.cmdline_params == NULL) { options.cmdline_params = pcmk__strkey_table(free, free); } } static crm_exit_t handle_ban(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk_rc_ok; if (node == NULL) { rc = ban_or_move(out, rsc, cib_conn, options.move_lifetime); } else { rc = cli_resource_ban(out, options.rsc_id, node->priv->name, options.move_lifetime, cib_conn, options.promoted_role_only, PCMK_ROLE_PROMOTED); } if (rc == EINVAL) { return CRM_EX_USAGE; } return pcmk_rc2exitc(rc); } static crm_exit_t handle_cleanup(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { if (rsc == NULL) { int rc = cli_cleanup_all(controld_api, node, options.operation, options.interval_spec, scheduler); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } else { cleanup(out, rsc, node, controld_api); } /* @FIXME Both of the blocks above are supposed to set exit_code via * start_mainloop(). But if cli_cleanup_all() or cli_resource_delete() * fails, we never start the mainloop. It looks as if we exit with CRM_EX_OK * in those cases. */ return exit_code; } static crm_exit_t handle_clear(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { const char *node_name = (node != NULL)? node->priv->name : NULL; GList *before = NULL; GList *after = NULL; GList *remaining = NULL; int rc = pcmk_rc_ok; if (!out->is_quiet(out)) { before = build_constraint_list(scheduler->input); } if (options.clear_expired) { rc = cli_resource_clear_all_expired(scheduler->input, cib_conn, options.rsc_id, node_name, options.promoted_role_only); } else if (node != NULL) { rc = cli_resource_clear(options.rsc_id, node_name, NULL, cib_conn, true, options.force); } else { rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes, cib_conn, true, options.force); } if (!out->is_quiet(out)) { xmlNode *cib_xml = NULL; rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, _("Could not get modified CIB: %s"), pcmk_rc_str(rc)); g_list_free(before); pcmk__xml_free(cib_xml); return pcmk_rc2exitc(rc); } scheduler->input = cib_xml; cluster_status(scheduler); after = build_constraint_list(scheduler->input); remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); for (const GList *iter = remaining; iter != NULL; iter = iter->next) { const char *constraint = iter->data; out->info(out, "Removing constraint: %s", constraint); } g_list_free(before); g_list_free(after); g_list_free(remaining); } return pcmk_rc2exitc(rc); } static crm_exit_t handle_colocations(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = out->message(out, "locations-and-colocations", rsc, options.recursive, options.force); return pcmk_rc2exitc(rc); } static crm_exit_t handle_cts(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { g_list_foreach(scheduler->priv->resources, (GFunc) cli_resource_print_cts, out); cli_resource_print_cts_constraints(scheduler); return CRM_EX_OK; } static crm_exit_t handle_delete(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { /* rsc_id was already checked for NULL much earlier when validating command * line arguments */ int rc = pcmk_rc_ok; if (options.rsc_type == NULL) { crm_exit_t ec = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, ec, _("You need to specify a resource type with -t")); return ec; } rc = pcmk__resource_delete(cib_conn, cib_sync_call, options.rsc_id, options.rsc_type); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, _("Could not delete resource %s: %s"), options.rsc_id, pcmk_rc_str(rc)); } return pcmk_rc2exitc(rc); } static crm_exit_t handle_delete_param(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = cli_resource_delete_attribute(rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, cib_conn, cib_xml_orig, options.force); return pcmk_rc2exitc(rc); } static crm_exit_t handle_digests(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk__resource_digests(out, rsc, node, options.override_params); return pcmk_rc2exitc(rc); } static crm_exit_t handle_execute_agent(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { if (has_cmdline_config()) { return cli_resource_execute_from_params(out, NULL, options.class, options.provider, options.agent, options.operation, options.cmdline_params, options.override_params, options.timeout_ms, args->verbosity, options.force, options.check_level); } return cli_resource_execute(rsc, options.rsc_id, options.operation, options.override_params, options.timeout_ms, cib_conn, args->verbosity, options.force, options.check_level); } static crm_exit_t handle_fail(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = cli_resource_fail(controld_api, rsc, options.rsc_id, node); if (rc == pcmk_rc_ok) { // start_mainloop() sets exit_code start_mainloop(controld_api); return exit_code; } return pcmk_rc2exitc(rc);; } static crm_exit_t handle_get_param(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { unsigned int count = 0; GHashTable *params = NULL; pcmk_node_t *current = rsc->priv->fns->active_node(rsc, &count, NULL); bool free_params = true; const char *value = NULL; int rc = pcmk_rc_ok; if (count > 1) { out->err(out, "%s is active on more than one node, returning the default " "value for %s", rsc->id, pcmk__s(options.prop_name, "unspecified property")); current = NULL; } crm_debug("Looking up %s in %s", options.prop_name, rsc->id); if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, pcmk__str_none)) { params = pe_rsc_params(rsc, current, scheduler); free_params = false; value = g_hash_table_lookup(params, options.prop_name); } else if (pcmk__str_eq(options.attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) { params = pcmk__strkey_table(free, free); get_meta_attributes(params, rsc, NULL, scheduler); value = g_hash_table_lookup(params, options.prop_name); } else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { value = crm_element_value(rsc->priv->xml, options.prop_name); free_params = false; } else { const pcmk_rule_input_t rule_input = { .now = scheduler->priv->now, }; params = pcmk__strkey_table(free, free); pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_UTILIZATION, &rule_input, params, NULL, scheduler); value = g_hash_table_lookup(params, options.prop_name); } rc = out->message(out, "attribute-list", rsc, options.prop_name, value); if (free_params) { g_hash_table_destroy(params); } return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_active_ops(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { const char *node_name = (node != NULL)? node->priv->name : NULL; int rc = cli_resource_print_operations(options.rsc_id, node_name, true, scheduler); return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_agents(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk__list_agents(out, options.agent_spec); return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_all_ops(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { const char *node_name = (node != NULL)? node->priv->name : NULL; int rc = cli_resource_print_operations(options.rsc_id, node_name, false, scheduler); return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_alternatives(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk__list_alternatives(out, options.agent_spec); return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_instances(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = out->message(out, "resource-names-list", scheduler->priv->resources); if (rc == pcmk_rc_no_output) { // @COMPAT It seems wrong to return an error because there no resources return CRM_EX_NOSUCH; } return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_options(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { crm_exit_t ec = CRM_EX_OK; int rc = pcmk_rc_ok; switch (options.opt_list) { case pcmk__opt_fencing: rc = pcmk__list_fencing_params(out, options.all); return pcmk_rc2exitc(rc); case pcmk__opt_primitive: rc = pcmk__list_primitive_meta(out, options.all); return pcmk_rc2exitc(rc); default: ec = CRM_EX_SOFTWARE; g_set_error(&error, PCMK__EXITC_ERROR, ec, "Bug: Invalid option list type"); return ec; } } static crm_exit_t handle_list_providers(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk__list_providers(out, options.agent_spec); return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_resources(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { GList *all = g_list_prepend(NULL, (gpointer) "*"); int rc = out->message(out, "resource-list", scheduler, pcmk_show_inactive_rscs |pcmk_show_rsc_only |pcmk_show_pending, true, all, all, false); g_list_free(all); if (rc == pcmk_rc_no_output) { // @COMPAT It seems wrong to return an error because there no resources return CRM_EX_NOSUCH; } return pcmk_rc2exitc(rc); } static crm_exit_t handle_list_standards(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk__list_standards(out); return pcmk_rc2exitc(rc); } static crm_exit_t handle_locate(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { GList *nodes = cli_resource_search(rsc, options.rsc_id); int rc = out->message(out, "resource-search-list", nodes, options.rsc_id); g_list_free_full(nodes, free); return pcmk_rc2exitc(rc); } static crm_exit_t handle_metadata(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk_rc_ok; char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = NULL; rc = lrmd__new(&lrmd_conn, NULL, NULL, 0); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, _("Could not create executor connection")); lrmd_api_delete(lrmd_conn); return pcmk_rc2exitc(rc); } rc = crm_parse_agent_spec(options.agent_spec, &standard, &provider, &type); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); rc = pcmk_legacy2rc(rc); if (metadata != NULL) { out->output_xml(out, PCMK_XE_METADATA, metadata); free(metadata); } else { /* We were given a validly formatted spec, but it doesn't necessarily * match up with anything that exists. Use ENXIO as the return code * here because that maps to an exit code of CRM_EX_NOSUCH, which * probably is the most common reason to get here. */ rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, _("Metadata query for %s failed: %s"), options.agent_spec, pcmk_rc_str(rc)); } } else { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, _("'%s' is not a valid agent specification"), options.agent_spec); } lrmd_api_delete(lrmd_conn); return pcmk_rc2exitc(rc); } static crm_exit_t handle_move(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk_rc_ok; if (node == NULL) { rc = ban_or_move(out, rsc, cib_conn, options.move_lifetime); } else { rc = cli_resource_move(rsc, options.rsc_id, node, options.move_lifetime, cib_conn, options.promoted_role_only, options.force); } if (rc == EINVAL) { return CRM_EX_USAGE; } return pcmk_rc2exitc(rc); } static crm_exit_t handle_query_xml(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = cli_resource_print(rsc, true); return pcmk_rc2exitc(rc); } static crm_exit_t handle_query_xml_raw(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = cli_resource_print(rsc, false); return pcmk_rc2exitc(rc); } static crm_exit_t handle_refresh(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { if (rsc == NULL) { return refresh(out, node, controld_api); } refresh_resource(out, rsc, node, controld_api); /* @FIXME Both of the calls above are supposed to set exit_code via * start_mainloop(). But there appear to be cases in which we can return * from refresh() or refresh_resource() without starting the mainloop or * returning an error code. It looks as if we exit with CRM_EX_OK in those * cases. */ return exit_code; } static crm_exit_t handle_restart(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { /* We don't pass scheduler because rsc needs to stay valid for the entire * lifetime of cli_resource_restart(), but it will reset and update the * scheduler data multiple times, so it needs to use its own copy. */ int rc = cli_resource_restart(out, rsc, node, options.move_lifetime, options.timeout_ms, cib_conn, options.promoted_role_only, options.force); return pcmk_rc2exitc(rc); } static crm_exit_t handle_set_param(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = pcmk_rc_ok; if (pcmk__str_empty(options.prop_value)) { crm_exit_t ec = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, ec, _("You need to supply a value with the -v option")); return ec; } rc = cli_resource_update_attribute(rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, options.prop_value, options.recursive, cib_conn, cib_xml_orig, options.force); return pcmk_rc2exitc(rc); } static crm_exit_t handle_wait(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = wait_till_stable(out, options.timeout_ms, cib_conn); return pcmk_rc2exitc(rc); } static crm_exit_t handle_why(pcmk_resource_t *rsc, pcmk_node_t *node, cib_t *cib_conn, pcmk_scheduler_t *scheduler, pcmk_ipc_api_t *controld_api, xmlNode *cib_xml_orig) { int rc = out->message(out, "resource-reasons-list", scheduler->priv->resources, rsc, node); return pcmk_rc2exitc(rc); } static const crm_resource_cmd_info_t crm_resource_command_info[] = { [cmd_ban] = { handle_ban, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_rejects_clone_instance |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_cleanup] = { handle_cleanup, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_controller |crm_rsc_requires_scheduler, }, [cmd_clear] = { handle_clear, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_rejects_clone_instance |crm_rsc_requires_cib |crm_rsc_requires_resource // Unless options.clear_expired |crm_rsc_requires_scheduler, }, [cmd_colocations] = { handle_colocations, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_cts] = { handle_cts, crm_rsc_requires_cib |crm_rsc_requires_scheduler, }, [cmd_delete] = { handle_delete, crm_rsc_rejects_clone_instance |crm_rsc_requires_cib |crm_rsc_requires_resource, }, [cmd_delete_param] = { handle_delete_param, crm_rsc_find_match_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_digests] = { handle_digests, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_node |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_execute_agent] = { handle_execute_agent, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_fail] = { handle_fail, crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_controller |crm_rsc_requires_node |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_get_param] = { handle_get_param, crm_rsc_find_match_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_list_active_ops] = { handle_list_active_ops, crm_rsc_requires_cib |crm_rsc_requires_scheduler, }, [cmd_list_agents] = { handle_list_agents, 0, }, [cmd_list_all_ops] = { handle_list_all_ops, crm_rsc_requires_cib |crm_rsc_requires_scheduler, }, [cmd_list_alternatives] = { handle_list_alternatives, 0, }, [cmd_list_instances] = { handle_list_instances, crm_rsc_requires_cib |crm_rsc_requires_scheduler, }, [cmd_list_options] = { handle_list_options, 0, }, [cmd_list_providers] = { handle_list_providers, 0, }, [cmd_list_resources] = { handle_list_resources, crm_rsc_requires_cib |crm_rsc_requires_scheduler, }, [cmd_list_standards] = { handle_list_standards, 0, }, [cmd_locate] = { handle_locate, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_metadata] = { handle_metadata, 0, }, [cmd_move] = { handle_move, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_rejects_clone_instance |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_query_xml] = { handle_query_xml, crm_rsc_find_match_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_query_xml_raw] = { handle_query_xml_raw, crm_rsc_find_match_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_refresh] = { handle_refresh, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_controller |crm_rsc_requires_scheduler, }, [cmd_restart] = { handle_restart, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_rejects_clone_instance |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_set_param] = { handle_set_param, crm_rsc_find_match_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_resource |crm_rsc_requires_scheduler, }, [cmd_wait] = { handle_wait, crm_rsc_requires_cib, }, [cmd_why] = { handle_why, crm_rsc_find_match_anon_basename |crm_rsc_find_match_history |crm_rsc_requires_cib |crm_rsc_requires_scheduler, }, }; static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet), "Be less descriptive in output.", NULL }, { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id, "Resource ID", "ID" }, { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder, NULL, NULL }, { NULL } }; const char *description = "Examples:\n\n" "List the available OCF agents:\n\n" "\t# crm_resource --list-agents ocf\n\n" "List the available OCF agents from the linux-ha project:\n\n" "\t# crm_resource --list-agents ocf:heartbeat\n\n" "Move 'myResource' to a specific node:\n\n" "\t# crm_resource --resource myResource --move --node altNode\n\n" "Allow (but not force) 'myResource' to move back to its original " "location:\n\n" "\t# crm_resource --resource myResource --clear\n\n" "Stop 'myResource' (and anything that depends on it):\n\n" "\t# crm_resource --resource myResource --set-parameter " PCMK_META_TARGET_ROLE "--meta --parameter-value Stopped\n\n" "Tell the cluster not to manage 'myResource' (the cluster will not " "attempt to start or stop the\n" "resource under any circumstances; useful when performing maintenance " "tasks on a resource):\n\n" "\t# crm_resource --resource myResource --set-parameter " PCMK_META_IS_MANAGED "--meta --parameter-value false\n\n" "Erase the operation history of 'myResource' on 'aNode' (the cluster " "will 'forget' the existing\n" "resource state, including any errors, and attempt to recover the" "resource; useful when a resource\n" "had failed permanently and has been repaired by an administrator):\n\n" "\t# crm_resource --resource myResource --cleanup --node aNode\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); /* Add the -Q option, which cannot be part of the globally supported options * because some tools use that flag for something else. */ pcmk__add_main_args(context, extra_prog_entries); pcmk__add_arg_group(context, "queries", "Queries:", "Show query help", query_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command help", command_entries); pcmk__add_arg_group(context, "locations", "Locations:", "Show location help", location_entries); pcmk__add_arg_group(context, "advanced", "Advanced:", "Show advanced option help", advanced_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { const crm_resource_cmd_info_t *command_info = NULL; pcmk_resource_t *rsc = NULL; pcmk_node_t *node = NULL; cib_t *cib_conn = NULL; pcmk_scheduler_t *scheduler = NULL; pcmk_ipc_api_t *controld_api = NULL; xmlNode *cib_xml_orig = NULL; uint32_t find_flags = 0; int rc = pcmk_rc_ok; GOptionGroup *output_group = NULL; gchar **processed_args = NULL; GOptionContext *context = NULL; /* * Parse command line arguments */ args = pcmk__new_common_args(SUMMARY); processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx"); context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } pcmk__cli_init_logging("crm_resource", args->verbosity); rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"), args->output_ty, pcmk_rc_str(rc)); goto done; } pe__register_messages(out); crm_resource_register_messages(out); lrmd__register_messages(out); pcmk__register_lib_messages(out); out->quiet = args->quiet; crm_log_args(argc, argv); /* * Validate option combinations */ // --expired without --clear/-U doesn't make sense if (options.clear_expired && (options.rsc_cmd != cmd_clear)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U")); goto done; } if (options.remainder != NULL) { // Commands that use positional arguments will create override_params if (options.override_params == NULL) { GString *msg = g_string_sized_new(128); guint len = g_strv_length(options.remainder); g_string_append(msg, "non-option ARGV-elements:"); for (int i = 0; i < len; i++) { g_string_append_printf(msg, "\n[%d of %u] %s", i + 1, len, options.remainder[i]); } exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg->str); g_string_free(msg, TRUE); goto done; } for (gchar **arg = options.remainder; *arg != NULL; arg++) { gchar *name = NULL; gchar *value = NULL; int rc = pcmk__scan_nvpair(*arg, &name, &value); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error parsing '%s' as a name=value pair"), *arg); goto done; } g_hash_table_insert(options.override_params, name, value); } } if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { switch (options.rsc_cmd) { /* These are the only commands that have historically used the * elements in their XML schema. For all others, use the simple list * argument. */ case cmd_get_param: case cmd_list_instances: case cmd_list_standards: pcmk__output_enable_list_element(out); break; default: break; } } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { switch (options.rsc_cmd) { case cmd_colocations: case cmd_list_resources: pcmk__output_text_set_fancy(out, true); break; default: break; } } if (args->version) { out->version(out, false); goto done; } // Ensure command is in valid range and has a handler function if ((options.rsc_cmd >= 0) && (options.rsc_cmd <= cmd_max)) { command_info = &crm_resource_command_info[options.rsc_cmd]; } if ((command_info == NULL) || (command_info->fn == NULL)) { exit_code = CRM_EX_SOFTWARE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Bug: Unimplemented command: %d"), (int) options.rsc_cmd); goto done; } /* If a command-line resource agent specification was given, validate it. * Otherwise, ensure --option was not given. */ if (has_cmdline_config()) { validate_cmdline_config(); if (error != NULL) { exit_code = CRM_EX_USAGE; goto done; } } else if (options.cmdline_params != NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--option must be used with --validate and without -r")); g_hash_table_destroy(options.cmdline_params); goto done; } // Ensure --resource is set if it's required if (pcmk_is_set(command_info->flags, crm_rsc_requires_resource) && !has_cmdline_config() && !options.clear_expired && (options.rsc_id == NULL)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Must supply a resource ID with -r/--resource")); goto done; } // Ensure --node is set if it's required if (pcmk_is_set(command_info->flags, crm_rsc_requires_node) && (options.host_uname == NULL)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Must supply a node name with -N/--node")); goto done; } // Establish a connection to the CIB if needed if (pcmk_is_set(command_info->flags, crm_rsc_requires_cib) && !has_cmdline_config()) { cib_conn = cib_new(); if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) { exit_code = CRM_EX_DISCONNECT; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Could not create CIB connection")); goto done; } rc = cib__signon_attempts(cib_conn, cib_command, 5); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Could not connect to the CIB: %s"), pcmk_rc_str(rc)); goto done; } } // Populate scheduler data from CIB query if needed if (pcmk_is_set(command_info->flags, crm_rsc_requires_scheduler) && !has_cmdline_config()) { rc = initialize_scheduler_data(&scheduler, cib_conn, out, &cib_xml_orig); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); goto done; } } // Establish a connection to the controller if needed if (pcmk_is_set(command_info->flags, crm_rsc_requires_controller) && (getenv("CIB_file") == NULL)) { rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error connecting to the controller: %s"), pcmk_rc_str(rc)); goto done; } pcmk_register_ipc_callback(controld_api, controller_event_callback, &exit_code); rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error connecting to %s: %s"), pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc)); goto done; } } /* Find node if --node was given. * * @TODO Consider stricter validation. Currently we ignore the --node * argument for commands that don't require scheduler data, since we have no * way to find the node in that case. This is really a usage error, but we * don't validate strictly. We allow multiple commands (and in some cases * their options like --node) to be specified, and we use the last one in * case of conflicts. * * This isn't universally true. --expired results in a usage error unless * the final command is --clear. */ if (options.host_uname != NULL) { node = pcmk_find_node(scheduler, options.host_uname); if (node == NULL) { exit_code = CRM_EX_NOSUCH; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Node '%s' not found"), options.host_uname); goto done; } } /* Find resource if --resource was given and any find flags are set. * * @TODO Consider stricter validation. See comment above for --node. * @TODO Setter macro for tracing? */ if (pcmk_is_set(command_info->flags, crm_rsc_find_match_anon_basename)) { find_flags |= pcmk_rsc_match_anon_basename; } if (pcmk_is_set(command_info->flags, crm_rsc_find_match_basename)) { find_flags |= pcmk_rsc_match_basename; } if (pcmk_is_set(command_info->flags, crm_rsc_find_match_history)) { find_flags |= pcmk_rsc_match_history; } if ((find_flags != 0) && (options.rsc_id != NULL)) { pcmk__assert(scheduler != NULL); rsc = pe_find_resource_with_flags(scheduler->priv->resources, options.rsc_id, find_flags); if (rsc == NULL) { exit_code = CRM_EX_NOSUCH; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Resource '%s' not found"), options.rsc_id); goto done; } if (pcmk_is_set(command_info->flags, crm_rsc_rejects_clone_instance) && pcmk__is_clone(rsc->priv->parent) && (strchr(options.rsc_id, ':') != NULL)) { exit_code = CRM_EX_INVALID_PARAM; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Cannot operate on clone resource instance '%s'"), options.rsc_id); goto done; } } exit_code = command_info->fn(rsc, node, cib_conn, scheduler, controld_api, cib_xml_orig); done: // For CRM_EX_USAGE, error is already set satisfactorily if ((exit_code != CRM_EX_OK) && (exit_code != CRM_EX_USAGE)) { if (error != NULL) { char *msg = crm_strdup_printf("%s\nError performing operation: %s", error->message, crm_exit_str(exit_code)); g_clear_error(&error); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg); free(msg); } else { g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error performing operation: %s"), crm_exit_str(exit_code)); } } g_free(options.host_uname); g_free(options.interval_spec); g_free(options.move_lifetime); g_free(options.operation); g_free(options.prop_id); free(options.prop_name); g_free(options.prop_set); g_free(options.prop_value); g_free(options.rsc_id); g_free(options.rsc_type); free(options.agent_spec); g_free(options.agent); g_free(options.class); g_free(options.provider); if (options.override_params != NULL) { g_hash_table_destroy(options.override_params); } g_strfreev(options.remainder); // Don't destroy options.cmdline_params here. See comment in option_cb(). g_strfreev(processed_args); g_option_context_free(context); pcmk__xml_free(cib_xml_orig); cib__clean_up_connection(&cib_conn); pcmk_free_ipc_api(controld_api); pcmk_free_scheduler(scheduler); if (mainloop != NULL) { g_main_loop_unref(mainloop); } pcmk__output_and_clear_error(&error, out); if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); return crm_exit(exit_code); } diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c index 4ba953d85a..cec7d9de58 100644 --- a/tools/crm_resource_ban.c +++ b/tools/crm_resource_ban.c @@ -1,518 +1,518 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include static char * parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime) { char *later_s = NULL; crm_time_t *now = NULL; crm_time_t *later = NULL; crm_time_t *duration = NULL; if (move_lifetime == NULL) { return NULL; } duration = crm_time_parse_duration(move_lifetime); if (duration == NULL) { out->err(out, "Invalid duration specified: %s\n" "Please refer to https://en.wikipedia.org/wiki/ISO_8601#Durations " "for examples of valid durations", move_lifetime); return NULL; } now = crm_time_new(NULL); later = crm_time_add(now, duration); if (later == NULL) { out->err(out, "Unable to add %s to current time\n" "Please report to " PACKAGE_BUGREPORT " as possible bug", move_lifetime); crm_time_free(now); crm_time_free(duration); return NULL; } crm_time_log(LOG_INFO, "now ", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_INFO, "later ", later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_INFO, "duration", duration, crm_time_log_date | crm_time_log_timeofday); later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); out->info(out, "Migration will take effect until: %s", later_s); crm_time_free(duration); crm_time_free(later); crm_time_free(now); return later_s; } // \return Standard Pacemaker return code int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, const char *move_lifetime, cib_t *cib_conn, gboolean promoted_role_only, const char *promoted_role) { char *later_s = NULL; int rc = pcmk_rc_ok; xmlNode *fragment = NULL; xmlNode *location = NULL; later_s = parse_cli_lifetime(out, move_lifetime); if(move_lifetime && later_s == NULL) { return EINVAL; } fragment = pcmk__xe_create(NULL, PCMK_XE_CONSTRAINTS); location = pcmk__xe_create(fragment, PCMK_XE_RSC_LOCATION); pcmk__xe_set_id(location, "cli-ban-%s-on-%s", rsc_id, host); out->info(out, "WARNING: Creating " PCMK_XE_RSC_LOCATION " constraint '%s' with " "a score of " PCMK_VALUE_MINUS_INFINITY " for resource %s on %s." "\n\tThis will prevent %s from %s on %s until the constraint is " "removed using the clear option or by editing the CIB with an " "appropriate tool.\n" "\tThis will be the case even if %s is the last node in the " "cluster", pcmk__xe_id(location), rsc_id, host, rsc_id, (promoted_role_only? "being promoted" : "running"), host, host); crm_xml_add(location, PCMK_XA_RSC, rsc_id); if(promoted_role_only) { crm_xml_add(location, PCMK_XA_ROLE, promoted_role); } else { crm_xml_add(location, PCMK_XA_ROLE, PCMK_ROLE_STARTED); } if (later_s == NULL) { /* Short form */ crm_xml_add(location, PCMK_XE_NODE, host); crm_xml_add(location, PCMK_XA_SCORE, PCMK_VALUE_MINUS_INFINITY); } else { xmlNode *rule = pcmk__xe_create(location, PCMK_XE_RULE); xmlNode *expr = pcmk__xe_create(rule, PCMK_XE_EXPRESSION); pcmk__xe_set_id(rule, "cli-ban-%s-on-%s-rule", rsc_id, host); crm_xml_add(rule, PCMK_XA_SCORE, PCMK_VALUE_MINUS_INFINITY); crm_xml_add(rule, PCMK_XA_BOOLEAN_OP, PCMK_VALUE_AND); pcmk__xe_set_id(expr, "cli-ban-%s-on-%s-expr", rsc_id, host); crm_xml_add(expr, PCMK_XA_ATTRIBUTE, CRM_ATTR_UNAME); crm_xml_add(expr, PCMK_XA_OPERATION, PCMK_VALUE_EQ); crm_xml_add(expr, PCMK_XA_VALUE, host); crm_xml_add(expr, PCMK_XA_TYPE, PCMK_VALUE_STRING); expr = pcmk__xe_create(rule, PCMK_XE_DATE_EXPRESSION); pcmk__xe_set_id(expr, "cli-ban-%s-on-%s-lifetime", rsc_id, host); crm_xml_add(expr, PCMK_XA_OPERATION, PCMK_VALUE_LT); crm_xml_add(expr, PCMK_XA_END, later_s); } crm_log_xml_notice(fragment, "Modify"); rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_CONSTRAINTS, fragment, cib_sync_call); rc = pcmk_legacy2rc(rc); pcmk__xml_free(fragment); free(later_s); if ((rc != pcmk_rc_ok) && promoted_role_only && (strcmp(promoted_role, PCMK_ROLE_PROMOTED) == 0)) { int banrc = cli_resource_ban(out, rsc_id, host, move_lifetime, cib_conn, promoted_role_only, PCMK__ROLE_PROMOTED_LEGACY); if (banrc == pcmk_rc_ok) { rc = banrc; } } return rc; } // \return Standard Pacemaker return code int cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host, const char *move_lifetime, cib_t *cib_conn, gboolean promoted_role_only, const char *promoted_role) { char *later_s = parse_cli_lifetime(out, move_lifetime); int rc = pcmk_rc_ok; xmlNode *location = NULL; xmlNode *fragment = NULL; if(move_lifetime && later_s == NULL) { return EINVAL; } if(cib_conn == NULL) { free(later_s); return ENOTCONN; } fragment = pcmk__xe_create(NULL, PCMK_XE_CONSTRAINTS); location = pcmk__xe_create(fragment, PCMK_XE_RSC_LOCATION); pcmk__xe_set_id(location, "cli-prefer-%s", rsc_id); crm_xml_add(location, PCMK_XA_RSC, rsc_id); if(promoted_role_only) { crm_xml_add(location, PCMK_XA_ROLE, promoted_role); } else { crm_xml_add(location, PCMK_XA_ROLE, PCMK_ROLE_STARTED); } if (later_s == NULL) { /* Short form */ crm_xml_add(location, PCMK_XE_NODE, host); crm_xml_add(location, PCMK_XA_SCORE, PCMK_VALUE_INFINITY); } else { xmlNode *rule = pcmk__xe_create(location, PCMK_XE_RULE); xmlNode *expr = pcmk__xe_create(rule, PCMK_XE_EXPRESSION); pcmk__xe_set_id(rule, "cli-prefer-rule-%s", rsc_id); crm_xml_add(rule, PCMK_XA_SCORE, PCMK_VALUE_INFINITY); crm_xml_add(rule, PCMK_XA_BOOLEAN_OP, PCMK_VALUE_AND); pcmk__xe_set_id(expr, "cli-prefer-expr-%s", rsc_id); crm_xml_add(expr, PCMK_XA_ATTRIBUTE, CRM_ATTR_UNAME); crm_xml_add(expr, PCMK_XA_OPERATION, PCMK_VALUE_EQ); crm_xml_add(expr, PCMK_XA_VALUE, host); crm_xml_add(expr, PCMK_XA_TYPE, PCMK_VALUE_STRING); expr = pcmk__xe_create(rule, PCMK_XE_DATE_EXPRESSION); pcmk__xe_set_id(expr, "cli-prefer-lifetime-end-%s", rsc_id); crm_xml_add(expr, PCMK_XA_OPERATION, PCMK_VALUE_LT); crm_xml_add(expr, PCMK_XA_END, later_s); } crm_log_xml_info(fragment, "Modify"); rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_CONSTRAINTS, fragment, cib_sync_call); rc = pcmk_legacy2rc(rc); pcmk__xml_free(fragment); free(later_s); if ((rc != pcmk_rc_ok) && promoted_role_only && (strcmp(promoted_role, PCMK_ROLE_PROMOTED) == 0)) { int preferrc = cli_resource_prefer(out, rsc_id, host, move_lifetime, cib_conn, promoted_role_only, PCMK__ROLE_PROMOTED_LEGACY); if (preferrc == pcmk_rc_ok) { rc = preferrc; } } return rc; } /* Nodes can be specified two different ways in the CIB, so we have two different * functions to try clearing out any constraints on them: * * (1) The node could be given by attribute=/value= in an expression XML node. * That's what resource_clear_node_in_expr handles. That XML looks like this: * * * * * * * * * (2) The node could be given by node= in a PCMK_XE_RSC_LOCATION XML node. * That's what resource_clear_node_in_location handles. That XML looks like * this: * * * * \return Standard Pacemaker return code */ static int resource_clear_node_in_expr(const char *rsc_id, const char *host, cib_t *cib_conn) { int rc = pcmk_rc_ok; char *xpath_string = NULL; #define XPATH_FMT \ "//" PCMK_XE_RSC_LOCATION "[@" PCMK_XA_ID "='cli-prefer-%s']" \ "[" PCMK_XE_RULE \ "[@" PCMK_XA_ID "='cli-prefer-rule-%s']" \ "/" PCMK_XE_EXPRESSION \ "[@" PCMK_XA_ATTRIBUTE "='" CRM_ATTR_UNAME "' " \ "and @" PCMK_XA_VALUE "='%s']" \ "]" xpath_string = crm_strdup_printf(XPATH_FMT, rsc_id, rsc_id, host); rc = cib_conn->cmds->remove(cib_conn, xpath_string, NULL, cib_xpath|cib_sync_call); if (rc == -ENXIO) { rc = pcmk_rc_ok; } else { rc = pcmk_legacy2rc(rc); } free(xpath_string); return rc; } // \return Standard Pacemaker return code static int resource_clear_node_in_location(const char *rsc_id, const char *host, cib_t * cib_conn, bool clear_ban_constraints, gboolean force) { int rc = pcmk_rc_ok; xmlNode *fragment = NULL; xmlNode *location = NULL; fragment = pcmk__xe_create(NULL, PCMK_XE_CONSTRAINTS); if (clear_ban_constraints == TRUE) { location = pcmk__xe_create(fragment, PCMK_XE_RSC_LOCATION); pcmk__xe_set_id(location, "cli-ban-%s-on-%s", rsc_id, host); } location = pcmk__xe_create(fragment, PCMK_XE_RSC_LOCATION); pcmk__xe_set_id(location, "cli-prefer-%s", rsc_id); if (force == FALSE) { crm_xml_add(location, PCMK_XE_NODE, host); } crm_log_xml_info(fragment, "Delete"); rc = cib_conn->cmds->remove(cib_conn, PCMK_XE_CONSTRAINTS, fragment, cib_sync_call); if (rc == -ENXIO) { rc = pcmk_rc_ok; } else { rc = pcmk_legacy2rc(rc); } pcmk__xml_free(fragment); return rc; } // \return Standard Pacemaker return code int cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t * cib_conn, bool clear_ban_constraints, gboolean force) { int rc = pcmk_rc_ok; if(cib_conn == NULL) { return ENOTCONN; } if (host) { rc = resource_clear_node_in_expr(rsc_id, host, cib_conn); /* rc does not tell us whether the previous operation did anything, only * whether it failed or not. Thus, as long as it did not fail, we need * to try the second clear method. */ if (rc == pcmk_rc_ok) { rc = resource_clear_node_in_location(rsc_id, host, cib_conn, clear_ban_constraints, force); } } else { GList *n = allnodes; /* Iterate over all nodes, attempting to clear the constraint from each. * On the first error, abort. */ for(; n; n = n->next) { pcmk_node_t *target = n->data; rc = cli_resource_clear(rsc_id, target->priv->name, NULL, cib_conn, clear_ban_constraints, force); if (rc != pcmk_rc_ok) { break; } } } return rc; } static void build_clear_xpath_string(GString *buf, const xmlNode *constraint_node, const char *rsc, const char *node, bool promoted_role_only) { const char *cons_id = pcmk__xe_id(constraint_node); const char *cons_rsc = crm_element_value(constraint_node, PCMK_XA_RSC); GString *rsc_role_substr = NULL; const char *promoted_role_rule = "@" PCMK_XA_ROLE "='" PCMK_ROLE_PROMOTED "' or @" PCMK_XA_ROLE "='" PCMK__ROLE_PROMOTED_LEGACY "'"; pcmk__assert(buf != NULL); g_string_truncate(buf, 0); if (!pcmk__starts_with(cons_id, "cli-ban-") && !pcmk__starts_with(cons_id, "cli-prefer-")) { return; } g_string_append(buf, "//" PCMK_XE_RSC_LOCATION); if ((node != NULL) || (rsc != NULL) || promoted_role_only) { g_string_append_c(buf, '['); if (node != NULL) { pcmk__g_strcat(buf, "@" PCMK_XE_NODE "='", node, "'", NULL); if (promoted_role_only || (rsc != NULL)) { g_string_append(buf, " and "); } } if ((rsc != NULL) && promoted_role_only) { rsc_role_substr = g_string_sized_new(64); pcmk__g_strcat(rsc_role_substr, "@" PCMK_XA_RSC "='", rsc, "' " "and (" , promoted_role_rule, ")", NULL); } else if (rsc != NULL) { rsc_role_substr = g_string_sized_new(64); pcmk__g_strcat(rsc_role_substr, "@" PCMK_XA_RSC "='", rsc, "'", NULL); } else if (promoted_role_only) { rsc_role_substr = g_string_sized_new(64); g_string_append(rsc_role_substr, promoted_role_rule); } if (rsc_role_substr != NULL) { g_string_append(buf, rsc_role_substr->str); } g_string_append_c(buf, ']'); } if (node != NULL) { g_string_append(buf, "|//" PCMK_XE_RSC_LOCATION); if (rsc_role_substr != NULL) { pcmk__g_strcat(buf, "[", rsc_role_substr, "]", NULL); } pcmk__g_strcat(buf, "/" PCMK_XE_RULE "[" PCMK_XE_EXPRESSION "[@" PCMK_XA_ATTRIBUTE "='" CRM_ATTR_UNAME "' " "and @" PCMK_XA_VALUE "='", node, "']]", NULL); } g_string_append(buf, "//" PCMK_XE_DATE_EXPRESSION "[@" PCMK_XA_ID "='"); if (pcmk__starts_with(cons_id, "cli-ban-")) { pcmk__g_strcat(buf, cons_id, "-lifetime']", NULL); } else { // starts with "cli-prefer-" pcmk__g_strcat(buf, "cli-prefer-lifetime-end-", cons_rsc, "']", NULL); } if (rsc_role_substr != NULL) { g_string_free(rsc_role_substr, TRUE); } } // \return Standard Pacemaker return code int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, const char *rsc, const char *node, gboolean promoted_role_only) { GString *buf = NULL; xmlXPathObject *xpathObj = NULL; xmlNode *cib_constraints = NULL; crm_time_t *now = crm_time_new(NULL); int num_results = 0; int rc = pcmk_rc_ok; cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS); xpathObj = pcmk__xpath_search(cib_constraints->doc, "//" PCMK_XE_RSC_LOCATION); num_results = pcmk__xpath_num_results(xpathObj); for (int i = 0; i < num_results; i++) { xmlNode *constraint_node = pcmk__xpath_result(xpathObj, i); xmlNode *date_expr_node = NULL; crm_time_t *end = NULL; int rc = pcmk_rc_ok; if (buf == NULL) { buf = g_string_sized_new(1024); } build_clear_xpath_string(buf, constraint_node, rsc, node, promoted_role_only); if (buf->len == 0) { continue; } date_expr_node = get_xpath_object((const char *) buf->str, constraint_node, LOG_DEBUG); if (date_expr_node == NULL) { continue; } /* And then finally, see if the date expression is expired. If so, * clear the constraint. */ rc = pcmk__xe_get_datetime(date_expr_node, PCMK_XA_END, &end); if (rc != pcmk_rc_ok) { crm_trace("Date expression %s has invalid " PCMK_XA_END ": %s", pcmk__s(pcmk__xe_id(date_expr_node), "without ID"), pcmk_rc_str(rc)); continue; // Treat as unexpired } if (crm_time_compare(now, end) == 1) { xmlNode *fragment = NULL; xmlNode *location = NULL; fragment = pcmk__xe_create(NULL, PCMK_XE_CONSTRAINTS); location = pcmk__xe_create(fragment, PCMK_XE_RSC_LOCATION); pcmk__xe_set_id(location, "%s", pcmk__xe_id(constraint_node)); crm_log_xml_info(fragment, "Delete"); rc = cib_conn->cmds->remove(cib_conn, PCMK_XE_CONSTRAINTS, fragment, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { goto done; } pcmk__xml_free(fragment); } crm_time_free(end); } done: if (buf != NULL) { g_string_free(buf, TRUE); } - freeXpathObject(xpathObj); + xmlXPathFreeObject(xpathObj); crm_time_free(now); return rc; } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 296e396859..ffb4bced98 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2581 +1,2581 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include // bool, true, false #include #include #include // bool, true, false #include #include -#include // xmlXPathObject +#include // xmlXPathObject, etc. #include #include #include #include #include /*! * \internal * \brief Resource with list of node info objects for its active nodes */ struct rsc_node_info { const pcmk_resource_t *rsc; //!< Resource //! Nodes where \c rsc is active (list of node_info_t *) GList *list; }; /*! * \internal * \brief Prepend a given node to a resource node info object's list * * \param[in] data Node to prepend (const pcmk_node_t *) * \param[in,out] user_data Resource node info object whose list to prepend to * (struct rsc_node_info * * * \note This is suitable for use with \c g_list_foreach(). */ static void prepend_node_info(gpointer data, gpointer user_data) { const pcmk_node_t *node = data; struct rsc_node_info *rni = user_data; node_info_t *ni = NULL; pcmk__assert(rni->rsc != NULL); ni = pcmk__assert_alloc(1, sizeof(node_info_t)); ni->node_name = node->priv->name; ni->promoted = pcmk_is_set(rni->rsc->flags, pcmk__rsc_promotable) && (rni->rsc->priv->fns->state(rni->rsc, true) == pcmk_role_promoted); rni->list = g_list_prepend(rni->list, ni); } GList * cli_resource_search(const pcmk_resource_t *rsc, const char *requested_name) { const pcmk_resource_t *clone = NULL; struct rsc_node_info rni = { .rsc = rsc, .list = NULL, }; pcmk__assert(rsc != NULL); if (pcmk__is_clone(rsc)) { clone = rsc; } else { const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); if (pcmk__is_clone(parent) && !pcmk_is_set(rsc->flags, pcmk__rsc_unique) && (rsc->priv->history_id != NULL) && pcmk__str_eq(requested_name, rsc->priv->history_id, pcmk__str_none) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_none)) { // The anonymous clone children's common ID is supplied clone = parent; } } if (clone == NULL) { g_list_foreach(rsc->priv->active_nodes, prepend_node_info, &rni); return rni.list; } for (const GList *iter = clone->priv->children; iter != NULL; iter = iter->next) { const pcmk_resource_t *child = iter->data; rni.rsc = child; g_list_foreach(child->priv->active_nodes, prepend_node_info, &rni); } return rni.list; } // \return Standard Pacemaker return code static int find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, xmlNode **result) { xmlNode *xml_search; int rc = pcmk_rc_ok; GString *xpath = NULL; const char *xpath_base = NULL; if (result) { *result = NULL; } if(the_cib == NULL) { return ENOTCONN; } xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES); if (xpath_base == NULL) { crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)"); return ENOMSG; } xpath = g_string_sized_new(1024); pcmk__g_strcat(xpath, xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL); if (attr_set_type != NULL) { pcmk__g_strcat(xpath, "/", attr_set_type, NULL); if (set_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]", NULL); } } g_string_append(xpath, "//" PCMK_XE_NVPAIR); if (attr_id != NULL && attr_name!= NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "' " "and @" PCMK_XA_NAME "='", attr_name, "']", NULL); } else if (attr_id != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL); } else if (attr_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL); } rc = the_cib->cmds->query(the_cib, xpath->str, &xml_search, cib_sync_call|cib_xpath); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { crm_log_xml_debug(xml_search, "Match"); if (xml_search->children != NULL) { rc = ENOTUNIQ; pcmk__warn_multiple_name_matches(out, xml_search, attr_name); out->spacer(out); } } if (result) { *result = xml_search; } else { pcmk__xml_free(xml_search); } g_string_free(xpath, TRUE); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(pcmk__output_t *out, GList /* */ **result, pcmk_resource_t *rsc, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, int depth) { int rc = pcmk_rc_ok; char *lookup_id = clone_strip(rsc->id); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { find_matching_attr_resources_recursive(out, result, (pcmk_resource_t *) gIter->data, attr_set, attr_set_type, attr_id, attr_name, cib, depth+1); /* do it only once for clones */ if (pcmk__is_clone(rsc)) { break; } } rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, gboolean force) { int rc = pcmk_rc_ok; char *lookup_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(force == TRUE) { return g_list_append(result, rsc); } if (pcmk__is_clone(rsc->priv->parent)) { int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc != pcmk_rc_ok) { rsc = rsc->priv->parent; out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", cmd, attr_name, rsc->id, rsc_id); } return g_list_append(result, rsc); } else if ((rsc->priv->parent == NULL) && (rsc->priv->children != NULL) && pcmk__is_clone(rsc)) { pcmk_resource_t *child = rsc->priv->children->data; if (pcmk__is_primitive(child)) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc == pcmk_rc_ok) { rsc = child; out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", attr_name, lookup_id, cmd, rsc_id); } free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(out, &result, rsc, attr_set, attr_set_type, attr_id, attr_name, cib, 0); return result; } /*! * \internal * \brief Get a resource's XML by resource ID from a given CIB XML tree * * \param[in] cib_xml CIB XML to search * \param[in] rsc Resource whose XML to get * * \return Subtree of \p cib_xml belonging to \p rsc, or \c NULL if not found */ static xmlNode * get_cib_rsc(xmlNode *cib_xml, const pcmk_resource_t *rsc) { char *xpath = crm_strdup_printf("%s//*[@" PCMK_XA_ID "='%s']", pcmk_cib_xpath_for(PCMK_XE_RESOURCES), pcmk__xe_id(rsc->priv->xml)); xmlNode *rsc_xml = get_xpath_object(xpath, cib_xml, LOG_ERR); free(xpath); return rsc_xml; } static int update_element_attribute(pcmk__output_t *out, pcmk_resource_t *rsc, cib_t *cib, xmlNode *cib_xml_orig, const char *attr_name, const char *attr_value) { int rc = pcmk_rc_ok; xmlNode *rsc_xml = rsc->priv->xml; rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { return ENXIO; } crm_xml_add(rsc_xml, attr_name, attr_value); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Set attribute: " PCMK_XA_NAME "=%s value=%s", attr_name, attr_value); } return rc; } static int resources_with_attr(pcmk__output_t *out, cib_t *cib, pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *top_id, gboolean force, GList **resources) { if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, pcmk__str_casei)) { if (!force) { xmlNode *xml_search = NULL; int rc = pcmk_rc_ok; rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id, PCMK_XE_META_ATTRIBUTES, attr_set, attr_id, attr_name, &xml_search); if (rc == pcmk_rc_ok || rc == ENOTUNIQ) { char *found_attr_id = NULL; found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); if (!out->is_quiet(out)) { out->err(out, "WARNING: There is already a meta attribute " "for '%s' called '%s' (id=%s)", top_id, attr_name, found_attr_id); out->err(out, " Delete '%s' first or use the force option " "to override", found_attr_id); } free(found_attr_id); pcmk__xml_free(xml_search); return ENOTUNIQ; } pcmk__xml_free(xml_search); } *resources = g_list_append(*resources, rsc); } else { *resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "update", force); } /* If the user specified attr_set or attr_id, the intent is to modify a * single resource, which will be the last item in the list. */ if ((attr_set != NULL) || (attr_id != NULL)) { GList *last = g_list_last(*resources); *resources = g_list_remove_link(*resources, last); g_list_free(*resources); *resources = last; } return pcmk_rc_ok; } static void free_attr_update_data(gpointer data) { attr_update_data_t *ud = data; if (ud == NULL) { return; } free(ud->attr_set_type); free(ud->attr_set_id); free(ud->attr_name); free(ud->attr_value); free(ud->given_rsc_id); free(ud->found_attr_id); free(ud); } static int update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force, GList **results) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; const char *top_id = pe__const_top_resource(rsc, false)->id; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL, attr_name, NULL); } rc = resources_with_attr(out, cib, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, top_id, force, &resources); if (rc != pcmk_rc_ok) { return rc; } for (GList *iter = resources; iter != NULL; iter = iter->next) { // @TODO Functionize loop body to simplify freeing allocated memory char *lookup_id = NULL; char *local_attr_set = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; const char *rsc_attr_set = attr_set; xmlNode *rsc_xml = rsc->priv->xml; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &xml_search); switch (rc) { case pcmk_rc_ok: found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); crm_debug("Found a match for " PCMK_XA_NAME "='%s': " PCMK_XA_ID "='%s'", attr_name, found_attr_id); rsc_attr_id = found_attr_id; break; case ENXIO: if (rsc_attr_set == NULL) { local_attr_set = crm_strdup_printf("%s-%s", lookup_id, attr_set_type); rsc_attr_set = local_attr_set; } if (rsc_attr_id == NULL) { found_attr_id = crm_strdup_printf("%s-%s", rsc_attr_set, attr_name); rsc_attr_id = found_attr_id; } rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { /* @TODO Warn and continue through the rest of the resources * and return the error at the end? This should never * happen, but if it does, then we could have a partial * update. */ free(lookup_id); free(found_attr_id); pcmk__xml_free(xml_search); g_list_free(resources); return ENXIO; } xml_top = pcmk__xe_create(NULL, (const char *) rsc_xml->name); crm_xml_add(xml_top, PCMK_XA_ID, lookup_id); xml_obj = pcmk__xe_create(xml_top, attr_set_type); crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set); break; default: free(lookup_id); free(found_attr_id); pcmk__xml_free(xml_search); g_list_free(resources); return rc; } xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { attr_update_data_t *ud = pcmk__assert_alloc(1, sizeof(attr_update_data_t)); if (attr_set_type == NULL) { attr_set_type = (const char *) xml_search->parent->name; } if (rsc_attr_set == NULL) { rsc_attr_set = crm_element_value(xml_search->parent, PCMK_XA_ID); } ud->attr_set_type = pcmk__str_copy(attr_set_type); ud->attr_set_id = pcmk__str_copy(rsc_attr_set); ud->attr_name = pcmk__str_copy(attr_name); ud->attr_value = pcmk__str_copy(attr_value); ud->given_rsc_id = pcmk__str_copy(lookup_id); ud->found_attr_id = pcmk__str_copy(found_attr_id); ud->rsc = rsc; *results = g_list_append(*results, ud); } pcmk__xml_free(xml_top); pcmk__xml_free(xml_search); free(lookup_id); free(found_attr_id); free(local_attr_set); if (recursive && pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) { /* We want to set the attribute only on resources explicitly * colocated with this one, so we use * rsc->priv->with_this_colocations directly rather than the * with_this_colocations() method. */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->with_this_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; crm_debug("Checking %s %d", cons->id, cons->score); if (pcmk_is_set(cons->dependent->flags, pcmk__rsc_detect_loop) || (cons->score <= 0)) { continue; } crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, cons->dependent->id); update_attribute(cons->dependent, cons->dependent->id, NULL, attr_set_type, NULL, attr_name, attr_value, recursive, cib, cib_xml_orig, force, results); } } } g_list_free(resources); return rc; } // \return Standard Pacemaker return code int cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force) { static bool need_init = true; int rc = pcmk_rc_ok; GList *results = NULL; pcmk__output_t *out = rsc->priv->scheduler->priv->out; pcmk__assert(cib_xml_orig != NULL); /* If we were asked to update the attribute in a resource element (for * instance, ) there's really not much we need to do. */ if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { return update_element_attribute(out, rsc, cib, cib_xml_orig, attr_name, attr_value); } /* One time initialization - clear flags so we can detect loops */ if (need_init) { need_init = false; pcmk__unpack_constraints(rsc->priv->scheduler); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); } rc = update_attribute(rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, attr_value, recursive, cib, cib_xml_orig, force, &results); if (rc == pcmk_rc_ok) { if (results == NULL) { return rc; } out->message(out, "attribute-changed-list", results); g_list_free_full(results, free_attr_update_data); } return rc; } // \return Standard Pacemaker return code int cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, xmlNode *cib_xml_orig, gboolean force) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; pcmk__assert((cib != NULL) && (cib_xml_orig != NULL)); if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, pe__const_top_resource(rsc, false)->id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { xmlNode *rsc_xml = rsc->priv->xml; rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { return ENXIO; } pcmk__xe_remove_attr(rsc_xml, attr_name); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted attribute: %s", attr_name); } return rc; } if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "delete", force); } else { resources = g_list_append(resources, rsc); } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; rsc = (pcmk_resource_t *) iter->data; /* @TODO Search the original CIB in find_resource_attr() for * future-proofing, to ensure that we're getting IDs of nvpairs that * exist in the CIB. */ lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &xml_search); switch (rc) { case pcmk_rc_ok: found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); pcmk__xml_free(xml_search); break; case ENXIO: free(lookup_id); pcmk__xml_free(xml_search); continue; default: free(lookup_id); pcmk__xml_free(xml_search); g_list_free(resources); return rc; } if (rsc_attr_id == NULL) { rsc_attr_id = found_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s", lookup_id, found_attr_id, ((attr_set == NULL)? "" : " set="), pcmk__s(attr_set, ""), ((attr_name == NULL)? "" : " " PCMK_XA_NAME "="), pcmk__s(attr_name, "")); } free(lookup_id); pcmk__xml_free(xml_obj); free(found_attr_id); } g_list_free(resources); return rc; } // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *node) { pcmk__output_t *out = NULL; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; const char *rsc_class = NULL; const char *rsc_provider = NULL; const char *rsc_type = NULL; const char *router_node = NULL; bool cib_only = false; pcmk__assert((rsc != NULL) && (rsc_id != NULL) && (node != NULL)); out = rsc->priv->scheduler->priv->out; if (!pcmk__is_primitive(rsc)) { out->err(out, "We can only process primitive resources, not %s", rsc_id); return EINVAL; } rsc_class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); rsc_provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); rsc_type = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); if ((rsc_class == NULL) || (rsc_type == NULL)) { out->err(out, "Resource %s does not have a class and type", rsc_id); return EINVAL; } router_node = node->priv->name; if (!node->details->online) { if (do_fail_resource) { out->err(out, "Node %s is not online", pcmk__node_name(node)); return ENOTCONN; } cib_only = true; } else if (pcmk__is_pacemaker_remote_node(node)) { const pcmk_node_t *conn_host = pcmk__current_node(node->priv->remote); if (conn_host == NULL) { out->err(out, "No cluster connection to Pacemaker Remote node %s " "detected", pcmk__node_name(node)); return ENOTCONN; } router_node = conn_host->priv->name; } if (rsc->priv->history_id != NULL) { rsc_api_id = rsc->priv->history_id; rsc_long_id = rsc->id; } else { rsc_api_id = rsc->id; } if (do_fail_resource) { return pcmk_controld_api_fail(controld_api, node->priv->name, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type); } return pcmk_controld_api_refresh(controld_api, node->priv->name, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type, cib_only); } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(const pcmk_resource_t *rsc) { const char *name = pcmk__s(rsc->priv->history_id, rsc->id); if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { return strdup(name); } return clone_strip(name); } // \return Standard Pacemaker return code static int clear_rsc_history(pcmk_ipc_api_t *controld_api, pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *node) { int rc = pcmk_rc_ok; pcmk__assert((rsc != NULL) && (node != NULL)); /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(controld_api, false, rsc, rsc_id, node); if (rc != pcmk_rc_ok) { return rc; } crm_trace("Processing %d mainloop inputs", pcmk_controld_api_replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", pcmk_controld_api_replies_expected(controld_api)); } return rc; } // \return Standard Pacemaker return code static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, pcmk_node_t *node, const char *rsc_id, const char *operation, const char *interval_spec) { int rc = pcmk_rc_ok; pcmk_scheduler_t *scheduler = NULL; const char *failed_value = NULL; const char *failed_id = NULL; char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; pcmk__assert(node != NULL); scheduler = node->priv->scheduler; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = pcmk__strkey_table(NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { guint interval_ms = 0U; pcmk_parse_interval_spec(interval_spec, &interval_ms); interval_ms_s = crm_strdup_printf("%u", interval_ms); } for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { failed_id = crm_element_value(xml_op, PCMK__XA_RSC_ID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { pcmk_resource_t *fail_rsc = NULL; fail_rsc = pe_find_resource_with_flags(scheduler->priv->resources, failed_id, pcmk_rsc_match_history |pcmk_rsc_match_anon_basename); if ((fail_rsc == NULL) || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_none)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, PCMK_XA_UNAME); if (!pcmk__str_eq(node->priv->name, failed_value, pcmk__str_casei)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, PCMK_XA_OPERATION); if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, PCMK_META_INTERVAL); if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) { continue; } } g_hash_table_add(rscs, (gpointer) failed_id); } free(interval_ms_s); g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { pcmk_resource_t *rsc = NULL; crm_debug("Erasing failures of %s on %s", failed_id, pcmk__node_name(node)); rsc = pe_find_resource(scheduler->priv->resources, failed_id); if (rsc == NULL) { out->err(out, "Resource %s not found", failed_id); return ENXIO; } rc = clear_rsc_history(controld_api, rsc, failed_id, node); if (rc != pcmk_rc_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } // \return Standard Pacemaker return code static int clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, const pcmk_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pcmk__is_pacemaker_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__attrd_api_clear_failures(NULL, node->priv->name, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } // \return Standard Pacemaker return code int cli_resource_delete(pcmk_ipc_api_t *controld_api, pcmk_resource_t *rsc, pcmk_node_t *node, const char *operation, const char *interval_spec, bool just_failures, bool force) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; if (rsc->priv->children != NULL) { for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; rc = cli_resource_delete(controld_api, child, node, operation, interval_spec, just_failures, force); if (rc != pcmk_rc_ok) { return rc; } } return pcmk_rc_ok; } if (node == NULL) { GList *nodes = g_hash_table_get_values(rsc->priv->probed_nodes); if (nodes == NULL) { if (force) { nodes = g_list_copy(scheduler->nodes); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if ((node != NULL) && (node->assign->score >= 0)) { nodes = g_list_prepend(nodes, (gpointer *) node); } } } else { nodes = g_hash_table_get_values(rsc->priv->allowed_nodes); } } for (GList *iter = nodes; iter != NULL; iter = iter->next) { node = (pcmk_node_t *) iter->data; if (!node->details->online) { continue; } rc = cli_resource_delete(controld_api, rsc, node, operation, interval_spec, just_failures, force); if (rc != pcmk_rc_ok) { break; } } g_list_free(nodes); return rc; } if (!pcmk_is_set(node->priv->flags, pcmk__node_probes_allowed)) { out->err(out, "Unable to clean up %s because resource discovery disabled on " "%s", rsc->id, pcmk__node_name(node)); return EOPNOTSUPP; } if (controld_api == NULL) { out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", rsc->id, pcmk__node_name(node)); return pcmk_rc_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up %s failures on %s: %s", rsc->id, pcmk__node_name(node), pcmk_rc_str(rc)); return rc; } if (just_failures) { rc = clear_rsc_failures(out, controld_api, node, rsc->id, operation, interval_spec); } else { rc = clear_rsc_history(controld_api, rsc, rsc->id, node); } if (rc != pcmk_rc_ok) { out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", rsc->id, pcmk__node_name(node), pcmk_rc_str(rc)); } else { out->info(out, "Cleaned up %s on %s", rsc->id, pcmk__node_name(node)); } return rc; } // \return Standard Pacemaker return code int cli_cleanup_all(pcmk_ipc_api_t *controld_api, pcmk_node_t *node, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; const char *node_name = NULL; const char *log_node_name = "all nodes"; pcmk__assert(scheduler != NULL); out = scheduler->priv->out; if (node != NULL) { node_name = node->priv->name; log_node_name = pcmk__node_name(node); } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", log_node_name); return rc; } if (pcmk__is_pacemaker_remote_node(node)) { pcmk__set_node_attr_flags(attr_options, pcmk__node_attr_remote); } rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up all failures on %s: %s", log_node_name, pcmk_rc_str(rc)); return rc; } if (node != NULL) { rc = clear_rsc_failures(out, controld_api, node, NULL, operation, interval_spec); } else { for (GList *iter = scheduler->nodes; iter; iter = iter->next) { pcmk_node_t *sched_node = iter->data; rc = clear_rsc_failures(out, controld_api, sched_node, NULL, operation, interval_spec); if (rc != pcmk_rc_ok) { break; } } } if (rc == pcmk_rc_ok) { out->info(out, "Cleaned up all resources on %s", log_node_name); } else { // @TODO But didn't clear_rsc_failures() fail? out->err(out, "Cleaned all resource failures on %s, but unable to clean " "history: %s", log_node_name, pcmk_rc_str(rc)); } return rc; } static void check_role(resource_checks_t *checks) { const char *role_s = g_hash_table_lookup(checks->rsc->priv->meta, PCMK_META_TARGET_ROLE); if (role_s == NULL) { return; } switch (pcmk_parse_role(role_s)) { case pcmk_role_stopped: checks->flags |= rsc_remain_stopped; break; case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags, pcmk__rsc_promotable)) { checks->flags |= rsc_unpromotable; } break; default: break; } } static void check_managed(resource_checks_t *checks) { const char *managed_s = g_hash_table_lookup(checks->rsc->priv->meta, PCMK_META_IS_MANAGED); if ((managed_s != NULL) && !crm_is_true(managed_s)) { checks->flags |= rsc_unmanaged; } } static void check_locked(resource_checks_t *checks) { const pcmk_node_t *lock_node = checks->rsc->priv->lock_node; if (lock_node != NULL) { checks->flags |= rsc_locked; checks->lock_node = lock_node->priv->name; } } static bool node_is_unhealthy(pcmk_node_t *node) { switch (pe__health_strategy(node->priv->scheduler)) { case pcmk__health_strategy_none: break; case pcmk__health_strategy_no_red: if (pe__node_health(node) < 0) { return true; } break; case pcmk__health_strategy_only_green: if (pe__node_health(node) <= 0) { return true; } break; case pcmk__health_strategy_progressive: case pcmk__health_strategy_custom: /* @TODO These are finite scores, possibly with rules, and possibly * combining with other scores, so attributing these as a cause is * nontrivial. */ break; } return false; } static void check_node_health(resource_checks_t *checks, pcmk_node_t *node) { if (node == NULL) { GHashTableIter iter; bool allowed = false; bool all_nodes_unhealthy = true; g_hash_table_iter_init(&iter, checks->rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { allowed = true; if (!node_is_unhealthy(node)) { all_nodes_unhealthy = false; break; } } if (allowed && all_nodes_unhealthy) { checks->flags |= rsc_node_health; } } else if (node_is_unhealthy(node)) { checks->flags |= rsc_node_health; } } /* @TODO Make this check all resources if rsc is NULL, so it can be called after * cleanup of all resources */ int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { resource_checks_t checks = { .rsc = rsc }; check_role(&checks); check_managed(&checks); check_locked(&checks); check_node_health(&checks, node); return out->message(out, "resource-check-list", &checks); } // \return Standard Pacemaker return code int cli_resource_fail(pcmk_ipc_api_t *controld_api, pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *node) { pcmk__assert((rsc != NULL) && (rsc_id != NULL) && (node != NULL)); if (controld_api == NULL) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; out->err(out, "Dry run: skipping fail of %s on %s due to CIB_file", rsc_id, pcmk__node_name(node)); return pcmk_rc_ok; } crm_notice("Failing %s on %s", rsc_id, pcmk__node_name(node)); return send_lrm_rsc_op(controld_api, true, rsc, rsc_id, node); } static GHashTable * generate_resource_params(pcmk_resource_t *rsc) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; char *key = NULL; char *value = NULL; combined = pcmk__strkey_table(free, free); params = pe_rsc_params(rsc, NULL, rsc->priv->scheduler); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { pcmk__insert_dup(combined, key, value); } } meta = pcmk__strkey_table(free, free); get_meta_attributes(meta, rsc, NULL, rsc->priv->scheduler); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } bool resource_is_running_on(pcmk_resource_t *rsc, const char *host) { bool found = true; GList *hIter = NULL; GList *hosts = NULL; if (rsc == NULL) { return false; } rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pcmk_node_t *node = (pcmk_node_t *) hIter->data; if (pcmk__strcase_any_of(host, node->priv->name, node->priv->id, NULL)) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if (host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = false; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = false; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (pcmk__is_group(rsc)) { GList *member_active = NULL; member_active = get_active_resources(host, rsc->priv->children); active = g_list_concat(active, member_active); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(pcmk__output_t *out, GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { out->info(out, "%s%s", tag, (const char *)item->data); } } /*! * \internal * \brief Update scheduler XML input based on a CIB query and the current time * * The CIB XML is upgraded to the latest schema version. * * \param[in,out] out Output object * \param[in,out] scheduler Scheduler data to update * \param[in] cib Connection to the CIB manager * \param[out] cib_xml_orig Where to store CIB XML before any schema * upgrades (can be \c NULL) * * \return Standard Pacemaker return code */ int update_scheduler_input(pcmk__output_t *out, pcmk_scheduler_t *scheduler, cib_t *cib, xmlNode **cib_xml_orig) { xmlNode *queried_xml = NULL; xmlNode *updated_xml = NULL; int rc = pcmk_rc_ok; pcmk__assert((out != NULL) && (scheduler != NULL) && (scheduler->input == NULL) && (scheduler->priv->now == NULL) && (cib != NULL) && ((cib_xml_orig == NULL) || (*cib_xml_orig == NULL))); rc = cib->cmds->query(cib, NULL, &queried_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not obtain the current CIB: %s", pcmk_rc_str(rc)); goto done; } if (cib_xml_orig != NULL) { updated_xml = pcmk__xml_copy(NULL, queried_xml); } else { // No need to preserve the pre-upgrade CIB, so don't make a copy updated_xml = queried_xml; queried_xml = NULL; } rc = pcmk__update_configured_schema(&updated_xml, false); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML: %s", pcmk_rc_str(rc)); pcmk__xml_free(updated_xml); goto done; } scheduler->input = updated_xml; scheduler->priv->now = crm_time_new(NULL); done: if ((rc == pcmk_rc_ok) && (cib_xml_orig != NULL)) { *cib_xml_orig = queried_xml; } else { pcmk__xml_free(queried_xml); } return rc; } // \return Standard Pacemaker return code static int update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, xmlNode **cib_xml_orig, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; pcmk__output_t *out = scheduler->priv->out; pcmk_reset_scheduler(scheduler); pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); if(simulate) { bool prev_quiet = false; rc = update_scheduler_input(out, scheduler, cib, NULL); if (rc != pcmk_rc_ok) { goto done; } pid = pcmk__getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { out->err(out, "Could not create shadow cib: '%s'", pid); rc = ENXIO; goto done; } rc = pcmk__xml_write_file(scheduler->input, shadow_file, false); if (rc != pcmk_rc_ok) { out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc)); goto done; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not connect to shadow cib: %s", pcmk_rc_str(rc)); goto done; } pcmk__schedule_actions(scheduler); prev_quiet = out->is_quiet(out); out->quiet = true; pcmk__simulate_transition(scheduler, shadow_cib, NULL); out->quiet = prev_quiet; rc = update_dataset(shadow_cib, scheduler, cib_xml_orig, false); } else { xmlNode *xml = NULL; rc = update_scheduler_input(out, scheduler, cib, &xml); if (rc != pcmk_rc_ok) { goto done; } pcmk__xml_free(*cib_xml_orig); *cib_xml_orig = xml; cluster_status(scheduler); } done: // Do not free scheduler->input because rsc->priv->xml must remain valid cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } /*! * \internal * \brief Find the maximum stop timeout of a resource and its children (if any) * * \param[in,out] rsc Resource to get timeout for * * \return Maximum stop timeout for \p rsc (in milliseconds) */ static guint max_rsc_stop_timeout(pcmk_resource_t *rsc) { long long result_ll; guint max_delay = 0; xmlNode *config = NULL; GHashTable *meta = NULL; if (rsc == NULL) { return 0; } // If resource is collective, use maximum of its children's stop timeouts if (rsc->priv->children != NULL) { for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; guint delay = max_rsc_stop_timeout(child); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Maximum stop timeout for %s is now %s " "due to %s", rsc->id, pcmk__readable_interval(delay), child->id); max_delay = delay; } } return max_delay; } // Get resource's stop action configuration from CIB config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true); /* Get configured timeout for stop action (fully evaluated for rules, * defaults, etc.). * * @TODO This currently ignores node (which might matter for rules) */ meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0)) { max_delay = (guint) QB_MIN(result_ll, UINT_MAX); } g_hash_table_destroy(meta); return max_delay; } /*! * \internal * \brief Find a reasonable waiting time for stopping any one resource in a list * * \param[in,out] scheduler Scheduler data * \param[in] resources List of names of resources that will be stopped * * \return Rough estimate of a reasonable time to wait (in seconds) to stop any * one resource in \p resources * \note This estimate is very rough, simply the maximum stop timeout of all * given resources and their children, plus a small fudge factor. It does * not account for children that must be stopped in sequence, action * throttling, or any demotions needed. It checks the stop timeout, even * if the resources in question are actually being started. */ static guint wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources) { guint max_delay = 0U; // Find maximum stop timeout in milliseconds for (const GList *item = resources; item != NULL; item = item->next) { pcmk_resource_t *rsc = pe_find_resource(scheduler->priv->resources, (const char *) item->data); guint delay = max_rsc_stop_timeout(rsc); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Wait time is now %s due to %s", pcmk__readable_interval(delay), rsc->id); max_delay = delay; } } return pcmk__timeout_ms2s(max_delay) + 5; } #define waiting_for_starts(d, r, h) ((d != NULL) || \ (!resource_is_running_on((r), (h)))) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in,out] out Output object * \param[in,out] rsc The resource to restart * \param[in] node Node to restart resource on (NULL for all) * \param[in] move_lifetime If not NULL, how long constraint should * remain in effect (as ISO 8601 string) * \param[in] timeout_ms Consider failed if actions do not complete * in this time (specified in milliseconds, * but a two-second granularity is actually * used; if 0, it will be calculated based on * the resource timeout) * \param[in,out] cib Connection to the CIB manager * \param[in] promoted_role_only If true, limit to promoted instances * \param[in] force If true, apply only to requested instance * if part of a collective resource * * \return Standard Pacemaker return code (exits on certain failures) */ int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, const pcmk_node_t *node, const char *move_lifetime, guint timeout_ms, cib_t *cib, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; int lpc = 0; int before = 0; guint step_timeout_s = 0; /* @TODO Due to this sleep interval, a timeout <2s will cause problems and * should be rejected */ guint sleep_interval = 2U; guint timeout = pcmk__timeout_ms2s(timeout_ms); bool stop_via_ban = false; char *rsc_id = NULL; char *lookup_id = NULL; char *orig_target_role = NULL; xmlNode *cib_xml_orig = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pcmk_scheduler_t *scheduler = NULL; pcmk_resource_t *parent = uber_parent(rsc); bool running = false; const char *id = pcmk__s(rsc->priv->history_id, rsc->id); const char *host = node ? node->priv->name : NULL; /* If the implicit resource or primitive resource of a bundle is given, operate on the * bundle itself instead. */ if (pcmk__is_bundled(rsc)) { rsc = parent->priv->parent; } running = resource_is_running_on(rsc, host); if (pcmk__is_clone(parent) && !running) { if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } rsc = parent->priv->fns->find_rsc(parent, lookup_id, node, pcmk_rsc_match_basename |pcmk_rsc_match_current_node); free(lookup_id); running = resource_is_running_on(rsc, host); } if (!running) { if (host) { out->err(out, "%s is not running on %s and so cannot be restarted", id, host); } else { out->err(out, "%s is not running anywhere and so cannot be restarted", id); } return ENXIO; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { out->err(out, "Unmanaged resources cannot be restarted."); return EAGAIN; } rsc_id = strdup(rsc->id); if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } if (host) { if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) { stop_via_ban = true; } else if (pcmk__is_clone(parent)) { stop_via_ban = true; free(lookup_id); lookup_id = strdup(parent->id); } } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { rc = errno; out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } scheduler->priv->out = out; rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc); goto done; } restart_target_active = get_active_resources(host, scheduler->priv->resources); current_active = get_active_resources(host, scheduler->priv->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); } else { xmlNode *xml_search = NULL; /* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped. * Remember any existing PCMK_META_TARGET_ROLE so we can restore it * later (though it only makes any difference if it's Unpromoted). */ rc = find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL, PCMK_META_TARGET_ROLE, &xml_search); if (rc == pcmk_rc_ok) { orig_target_role = crm_element_value_copy(xml_search, PCMK_XA_VALUE); } pcmk__xml_free(xml_search); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, PCMK_ACTION_STOPPED, FALSE, cib, cib_xml_orig, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); restart_target_active = NULL; } goto done; } rc = update_dataset(cib, scheduler, &cib_xml_orig, true); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } target_active = get_active_resources(host, scheduler->priv->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%us remaining", timeout); } rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; } if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, scheduler->priv->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(lookup_id, host, NULL, cib, true, force); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_xml_orig, force); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_xml_orig, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); goto done; } if (target_active != NULL) { g_list_free_full(target_active, free); } target_active = restart_target_active; list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(NULL, scheduler->priv->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } rc = pcmk_rc_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(lookup_id, host, NULL, cib, true, force); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_xml_orig, force); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_xml_orig, force); } done: if (list_delta != NULL) { g_list_free(list_delta); } if (current_active != NULL) { g_list_free_full(current_active, free); } if (target_active != NULL && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); } free(rsc_id); free(lookup_id); pcmk_free_scheduler(scheduler); return rc; } static inline bool action_is_pending(const pcmk_action_t *action) { if (pcmk_any_flags_set(action->flags, pcmk__action_optional|pcmk__action_pseudo) || !pcmk_is_set(action->flags, pcmk__action_runnable) || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) { return false; } return true; } /*! * \internal * \brief Check whether any actions in a list are pending * * \param[in] actions List of actions to check * * \return true if any actions in the list are pending, otherwise false */ static bool actions_are_pending(const GList *actions) { for (const GList *action = actions; action != NULL; action = action->next) { const pcmk_action_t *a = (const pcmk_action_t *) action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags); return true; } } return false; } static void print_pending_actions(pcmk__output_t *out, GList *actions) { GList *action; out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { pcmk_action_t *a = (pcmk_action_t *) action->data; if (!action_is_pending(a)) { continue; } if (a->node) { out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, pcmk__node_name(a->node)); } else { out->info(out, "\tAction %d: %s", a->id, a->uuid); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in,out] out Output object * \param[in] timeout_ms Consider failed if actions do not complete in * this time (specified in milliseconds, but * one-second granularity is actually used; if 0, a * default will be used) * \param[in,out] cib Connection to the CIB manager * * \return Standard Pacemaker return code */ int wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib) { // @FIXME This should bail out when run with CIB_file pcmk_scheduler_t *scheduler = NULL; xmlXPathObject *search = NULL; int rc = pcmk_rc_ok; bool pending_unknown_state_resources; time_t expire_time = time(NULL); time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet char *xpath = NULL; if (timeout_ms == 0) { expire_time += WAIT_DEFAULT_TIMEOUT_S; } else { expire_time += pcmk__timeout_ms2s(timeout_ms + 999); } scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { return ENOMEM; } xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS "/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES "/" PCMK__XE_LRM_RESOURCE "/" PCMK__XE_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "='%d']", PCMK_OCF_UNKNOWN); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff <= 0) { print_pending_actions(out, scheduler->priv->actions); rc = ETIME; break; } crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff); if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pcmk_reset_scheduler(scheduler); rc = update_scheduler_input(out, scheduler, cib, NULL); if (rc != pcmk_rc_ok) { break; } pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); pcmk__schedule_actions(scheduler); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = NULL; dc_version = g_hash_table_lookup(scheduler->priv->options, PCMK_OPT_DC_VERSION); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { out->info(out, "warning: wait option may not work properly in " "mixed-version cluster"); printed_version_warning = true; } } search = pcmk__xpath_search(scheduler->input->doc, xpath); pending_unknown_state_resources = (pcmk__xpath_num_results(search) > 0); - freeXpathObject(search); + xmlXPathFreeObject(search); } while (actions_are_pending(scheduler->priv->actions) || pending_unknown_state_resources); pcmk_free_scheduler(scheduler); free(xpath); return rc; } static const char * get_action(const char *rsc_action) { const char *action = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { action = PCMK_ACTION_VALIDATE_ALL; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { action = PCMK_ACTION_MONITOR; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", "force-demote", "force-promote", NULL)) { action = rsc_action+6; } else { action = rsc_action; } return action; } /*! * \brief Set up environment variables as expected by resource agents * * When the cluster executes resource agents, it adds certain environment * variables (directly or via resource meta-attributes) expected by some * resource agents. Add the essential ones that many resource agents expect, so * the behavior is the same for command-line execution. * * \param[in,out] params Resource parameters that will be passed to agent * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] check_level OCF check level * \param[in] verbosity Verbosity level */ static void set_agent_environment(GHashTable *params, guint timeout_ms, int check_level, int verbosity) { g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT), crm_strdup_printf("%u", timeout_ms)); pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); if (check_level >= 0) { char *level = crm_strdup_printf("%d", check_level); setenv("OCF_CHECK_LEVEL", level, 1); free(level); } pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true); if (verbosity > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); } /*! * \internal * \brief Apply command-line overrides to resource parameters * * \param[in,out] params Parameters to be passed to agent * \param[in] overrides Parameters to override (or NULL if none) */ static void apply_overrides(GHashTable *params, GHashTable *overrides) { if (overrides != NULL) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { pcmk__insert_dup(params, name, value); } } } /* Takes ownership of params. * Does not modify override_hash or its contents. */ crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, guint timeout_ms, int resource_verbose, gboolean force, int check_level) { const char *class = rsc_class; const char *action = get_action(rsc_action); crm_exit_t exit_code = CRM_EX_OK; svc_action_t *op = NULL; // If no timeout was provided, use the same default as the cluster if (timeout_ms == 0U) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } set_agent_environment(params, timeout_ms, check_level, resource_verbose); apply_overrides(params, override_hash); // services__create_resource_action() takes ownership of params on success op = services__create_resource_action(rsc_name? rsc_name : "test", rsc_class, rsc_prov, rsc_type, action, 0, QB_MIN(timeout_ms, INT_MAX), params, 0); if (op == NULL) { out->err(out, "Could not execute %s using %s%s%s:%s: %s", action, rsc_class, (rsc_prov? ":" : ""), (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM)); g_hash_table_destroy(params); return CRM_EX_OSERR; } #if PCMK__ENABLE_SERVICE if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { class = resources_find_service_class(rsc_type); } #endif if (!pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_cli_exec)) { services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR, "Manual execution of the %s standard is " "unsupported", pcmk__s(class, "unspecified")); } if (op->rc != PCMK_OCF_UNKNOWN) { exit_code = op->rc; goto done; } services_action_sync(op); // Map results to OCF codes for consistent reporting to user { enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc); // Cast variable instead of function return to keep compilers happy exit_code = (crm_exit_t) ocf_code; } done: out->message(out, "resource-agent-action", resource_verbose, rsc_class, rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, exit_code, op->status, services__exit_reason(op), op->stdout_data, op->stderr_data); services_action_free(op); return exit_code; } /*! * \internal * \brief Get the timeout the cluster would use for an action * * \param[in] rsc Resource that action is for * \param[in] action Name of action */ static guint get_action_timeout(pcmk_resource_t *rsc, const char *action) { long long timeout_ms = -1LL; xmlNode *op = pcmk__find_action_config(rsc, action, 0, true); GHashTable *meta = pcmk__unpack_action_meta(rsc, NULL, action, 0, op); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &timeout_ms, -1LL) != pcmk_rc_ok) || (timeout_ms <= 0LL)) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } g_hash_table_destroy(meta); return (guint) QB_MIN(timeout_ms, UINT_MAX); } // Does not modify override_hash or its contents crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, guint timeout_ms, cib_t *cib, int resource_verbose, bool force, int check_level) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; crm_exit_t exit_code = CRM_EX_OK; const char *rid = requested_name; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; GHashTable *params = NULL; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { if (pcmk__is_clone(rsc)) { GList *nodes = cli_resource_search(rsc, requested_name); if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", rsc_action, rsc->id); out->err(out, "Try setting " PCMK_META_TARGET_ROLE "=" PCMK_ROLE_STOPPED " first or specifying the force option"); return CRM_EX_UNSAFE; } g_list_free_full(nodes, free); } } if (pcmk__is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->priv->children->data; } if (pcmk__is_group(rsc)) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } else if (pcmk__is_bundled(rsc)) { out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } rclass = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); rprov = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); rtype = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); params = generate_resource_params(rsc); // @TODO use local node if (timeout_ms == 0U) { timeout_ms = get_action_timeout(rsc, get_action(rsc_action)); } if (!pcmk__is_anonymous_clone(rsc->priv->parent)) { rid = rsc->id; } exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action, params, override_hash, timeout_ms, resource_verbose, force, check_level); return exit_code; } // \return Standard Pacemaker return code int cli_resource_move(pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *dest, const char *move_lifetime, cib_t *cib, bool promoted_role_only, bool force) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; unsigned int count = 0; pcmk_node_t *current = NULL; bool cur_is_dest = false; const char *active_s = promoted_role_only? "promoted" : "active"; pcmk__assert((rsc != NULL) && (dest != NULL)); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; if (promoted_role_only && !pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { pcmk_resource_t *p = uber_parent(rsc); if (pcmk_is_set(p->flags, pcmk__rsc_promotable)) { /* @TODO This is dead code. If rsc is part of a promotable clone, * then it has the pcmk__rsc_promotable flag set. * * This was added by 36e4b490. Prior to that commit, we were * checking whether rsc itself is the promotable clone, and if not, * trying to get a promotable clone ancestor. * * As of that commit, we check whether rsc has the promotable flag * set. But if it has a promotable clone ancestor, that flag is set. * * Question: Should we drop this block and use rsc for the move, or * should we check whether rsc is a clone instead of only checking * whether the promotable flag is set (as we did prior to 36e4b490)? * The latter seems appropriate, especially considering the block * below with promoted_count and promoted_node; but we need to trace * and test. */ out->info(out, "Using parent '%s' for move instead of '%s'", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { out->info(out, "Ignoring --promoted option: %s is not promotable", rsc_id); promoted_role_only = false; } } current = pe__find_active_requires(rsc, &count); if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { unsigned int promoted_count = 0; pcmk_node_t *promoted_node = NULL; for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; enum rsc_role_e child_role = child->priv->fns->state(child, true); if (child_role == pcmk_role_promoted) { rsc = child; promoted_node = pcmk__current_node(child); promoted_count++; } } if (promoted_role_only || (promoted_count != 0)) { count = promoted_count; current = promoted_node; } } if (count > 1) { if (!pcmk__is_clone(rsc)) { return pcmk_rc_multiple; } current = NULL; } if (pcmk__same_node(current, dest)) { if (!force) { return pcmk_rc_already; } cur_is_dest = true; crm_info("%s is already %s on %s, reinforcing placement with location " "constraint", rsc_id, active_s, pcmk__node_name(dest)); } /* @TODO The constraint changes in the following commands should done * atomically in a single CIB transaction, to avoid the possibility of * multiple moves */ /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, false, force); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->priv->name, scheduler->nodes, cib, true, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->priv->name, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); crm_trace("%s%s now prefers %s%s", rsc->id, (promoted_role_only? " (promoted)" : ""), pcmk__node_name(dest), (force? " (forced)" : "")); /* Ban the current location if force is set and the current location is not * the destination. It is possible to use move to enforce a location without * regard for where the resource is currently located. */ if (force && !cur_is_dest) { /* Ban the original location if possible */ if (current != NULL) { cli_resource_ban(out, rsc_id, current->priv->name, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); } else if (count > 1) { out->info(out, "Resource '%s' is currently %s in %u locations. " "One may now move to %s", rsc_id, active_s, count, pcmk__node_name(dest)); out->info(out, "To prevent '%s' from being %s at a specific location, " "specify a node", rsc_id, active_s); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; }