diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c index d04ae85cf9..33ee2f2164 100644 --- a/daemons/fenced/fenced_remote.c +++ b/daemons/fenced/fenced_remote.c @@ -1,2608 +1,2606 @@ /* - * Copyright 2009-2024 the Pacemaker project contributors + * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TIMEOUT_MULTIPLY_FACTOR 1.2 /* When one fencer queries its peers for devices able to handle a fencing * request, each peer will reply with a list of such devices available to it. * Each reply will be parsed into a peer_device_info_t, with each device's * information kept in a device_properties_t. */ typedef struct device_properties_s { /* Whether access to this device has been verified */ gboolean verified; /* The remaining members are indexed by the operation's "phase" */ /* Whether this device has been executed in each phase */ gboolean executed[st_phase_max]; /* Whether this device is disallowed from executing in each phase */ gboolean disallowed[st_phase_max]; /* Action-specific timeout for each phase */ int custom_action_timeout[st_phase_max]; /* Action-specific maximum random delay for each phase */ int delay_max[st_phase_max]; /* Action-specific base delay for each phase */ int delay_base[st_phase_max]; /* Group of enum st_device_flags */ uint32_t device_support_flags; } device_properties_t; typedef struct { /* Name of peer that sent this result */ char *host; /* Only try peers for non-topology based operations once */ gboolean tried; /* Number of entries in the devices table */ int ndevices; /* Devices available to this host that are capable of fencing the target */ GHashTable *devices; } peer_device_info_t; GHashTable *stonith_remote_op_list = NULL; extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static void request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer); static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup); static void report_timeout_period(remote_fencing_op_t * op, int op_timeout); static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer); static gint sort_strings(gconstpointer a, gconstpointer b) { return strcmp(a, b); } static void free_remote_query(gpointer data) { if (data != NULL) { peer_device_info_t *peer = data; g_hash_table_destroy(peer->devices); free(peer->host); free(peer); } } void free_stonith_remote_op_list(void) { if (stonith_remote_op_list != NULL) { g_hash_table_destroy(stonith_remote_op_list); stonith_remote_op_list = NULL; } } struct peer_count_data { const remote_fencing_op_t *op; gboolean verified_only; uint32_t support_action_only; int count; }; /*! * \internal * \brief Increment a counter if a device has not been executed yet * * \param[in] key Device ID (ignored) * \param[in] value Device properties * \param[in,out] user_data Peer count data */ static void count_peer_device(gpointer key, gpointer value, gpointer user_data) { device_properties_t *props = (device_properties_t*)value; struct peer_count_data *data = user_data; if (!props->executed[data->op->phase] && (!data->verified_only || props->verified) && ((data->support_action_only == st_device_supports_none) || pcmk_is_set(props->device_support_flags, data->support_action_only))) { ++(data->count); } } /*! * \internal * \brief Check the number of available devices in a peer's query results * * \param[in] op Operation that results are for * \param[in] peer Peer to count * \param[in] verified_only Whether to count only verified devices * \param[in] support_action_only Whether to count only devices that support action * * \return Number of devices available to peer that were not already executed */ static int count_peer_devices(const remote_fencing_op_t *op, const peer_device_info_t *peer, gboolean verified_only, uint32_t support_on_action_only) { struct peer_count_data data; data.op = op; data.verified_only = verified_only; data.support_action_only = support_on_action_only; data.count = 0; if (peer) { g_hash_table_foreach(peer->devices, count_peer_device, &data); } return data.count; } /*! * \internal * \brief Search for a device in a query result * * \param[in] op Operation that result is for * \param[in] peer Query result for a peer * \param[in] device Device ID to search for * * \return Device properties if found, NULL otherwise */ static device_properties_t * find_peer_device(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, uint32_t support_action_only) { device_properties_t *props = g_hash_table_lookup(peer->devices, device); if (props && support_action_only != st_device_supports_none && !pcmk_is_set(props->device_support_flags, support_action_only)) { return NULL; } return (props && !props->executed[op->phase] && !props->disallowed[op->phase])? props : NULL; } /*! * \internal * \brief Find a device in a peer's device list and mark it as executed * * \param[in] op Operation that peer result is for * \param[in,out] peer Peer with results to search * \param[in] device ID of device to mark as done * \param[in] verified_devices_only Only consider verified devices * * \return TRUE if device was found and marked, FALSE otherwise */ static gboolean grab_peer_device(const remote_fencing_op_t *op, peer_device_info_t *peer, const char *device, gboolean verified_devices_only) { device_properties_t *props = find_peer_device(op, peer, device, fenced_support_flag(op->action)); if ((props == NULL) || (verified_devices_only && !props->verified)) { return FALSE; } crm_trace("Removing %s from %s (%d remaining)", device, peer->host, count_peer_devices(op, peer, FALSE, st_device_supports_none)); props->executed[op->phase] = TRUE; return TRUE; } static void clear_remote_op_timers(remote_fencing_op_t * op) { if (op->query_timer) { g_source_remove(op->query_timer); op->query_timer = 0; } if (op->op_timer_total) { g_source_remove(op->op_timer_total); op->op_timer_total = 0; } if (op->op_timer_one) { g_source_remove(op->op_timer_one); op->op_timer_one = 0; } } static void free_remote_op(gpointer data) { remote_fencing_op_t *op = data; crm_log_xml_debug(op->request, "Destroying"); clear_remote_op_timers(op); free(op->id); free(op->action); free(op->delegate); free(op->target); free(op->client_id); free(op->client_name); free(op->originator); if (op->query_results) { g_list_free_full(op->query_results, free_remote_query); } if (op->request) { free_xml(op->request); op->request = NULL; } if (op->devices_list) { g_list_free_full(op->devices_list, free); op->devices_list = NULL; } g_list_free_full(op->automatic_list, free); g_list_free(op->duplicates); pcmk__reset_result(&op->result); free(op); } void init_stonith_remote_op_hash_table(GHashTable **table) { if (*table == NULL) { *table = pcmk__strkey_table(NULL, free_remote_op); } } /*! * \internal * \brief Return an operation's originally requested action (before any remap) * * \param[in] op Operation to check * * \return Operation's original action */ static const char * op_requested_action(const remote_fencing_op_t *op) { return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action); } /*! * \internal * \brief Remap a "reboot" operation to the "off" phase * * \param[in,out] op Operation to remap */ static void op_phase_off(remote_fencing_op_t *op) { crm_info("Remapping multiple-device reboot targeting %s to 'off' " CRM_XS " id=%.8s", op->target, op->id); op->phase = st_phase_off; /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the * memory allocation at each phase. */ strcpy(op->action, PCMK_ACTION_OFF); } /*! * \internal * \brief Advance a remapped reboot operation to the "on" phase * * \param[in,out] op Operation to remap */ static void op_phase_on(remote_fencing_op_t *op) { GList *iter = NULL; crm_info("Remapped 'off' targeting %s complete, " "remapping to 'on' for %s " CRM_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_on; strcpy(op->action, PCMK_ACTION_ON); /* Skip devices with automatic unfencing, because the cluster will handle it * when the node rejoins. */ for (iter = op->automatic_list; iter != NULL; iter = iter->next) { GList *match = g_list_find_custom(op->devices_list, iter->data, sort_strings); if (match) { op->devices_list = g_list_remove(op->devices_list, match->data); } } g_list_free_full(op->automatic_list, free); op->automatic_list = NULL; /* Rewind device list pointer */ op->devices = op->devices_list; } /*! * \internal * \brief Reset a remapped reboot operation * * \param[in,out] op Operation to reset */ static void undo_op_remap(remote_fencing_op_t *op) { if (op->phase > 0) { crm_info("Undoing remap of reboot targeting %s for %s " CRM_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_requested; strcpy(op->action, PCMK_ACTION_REBOOT); } } /*! * \internal * \brief Create notification data XML for a fencing operation result * * \param[in,out] parent Parent XML element for newly created element * \param[in] op Fencer operation that completed * * \return Newly created XML to add as notification data * \note The caller is responsible for freeing the result. */ static xmlNode * fencing_result2xml(xmlNode *parent, const remote_fencing_op_t *op) { xmlNode *notify_data = pcmk__xe_create(parent, PCMK__XE_ST_NOTIFY_FENCE); crm_xml_add_int(notify_data, PCMK_XA_STATE, op->state); crm_xml_add(notify_data, PCMK__XA_ST_TARGET, op->target); crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ACTION, op->action); crm_xml_add(notify_data, PCMK__XA_ST_DELEGATE, op->delegate); crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(notify_data, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(notify_data, PCMK__XA_ST_CLIENTID, op->client_id); crm_xml_add(notify_data, PCMK__XA_ST_CLIENTNAME, op->client_name); return notify_data; } /*! * \internal * \brief Broadcast a fence result notification to all CPG peers * * \param[in] op Fencer operation that completed * \param[in] op_merged Whether this operation is a duplicate of another */ void fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged) { static int count = 0; xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); xmlNode *wrapper = NULL; xmlNode *notify_data = NULL; count++; crm_trace("Broadcasting result to peers"); crm_xml_add(bcast, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); crm_xml_add(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); crm_xml_add(bcast, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); crm_xml_add_int(bcast, PCMK_XA_COUNT, count); if (op_merged) { pcmk__xe_set_bool_attr(bcast, PCMK__XA_ST_OP_MERGED, true); } wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); notify_data = fencing_result2xml(wrapper, op); stonith__xe_set_result(notify_data, &op->result); pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, bcast); free_xml(bcast); return; } /*! * \internal * \brief Reply to a local request originator and notify all subscribed clients * * \param[in,out] op Fencer operation that completed * \param[in,out] data Top-level XML to add notification to */ static void handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data) { xmlNode *notify_data = NULL; xmlNode *reply = NULL; pcmk__client_t *client = NULL; if (op->notify_sent == TRUE) { /* nothing to do */ return; } /* Do notification with a clean data object */ crm_xml_add_int(data, PCMK_XA_STATE, op->state); crm_xml_add(data, PCMK__XA_ST_TARGET, op->target); crm_xml_add(data, PCMK__XA_ST_OP, op->action); reply = fenced_construct_reply(op->request, data, &op->result); crm_xml_add(reply, PCMK__XA_ST_DELEGATE, op->delegate); /* Send fencing OP reply to local client that initiated fencing */ client = pcmk__find_client_by_id(op->client_id); if (client == NULL) { crm_trace("Skipping reply to %s: no longer a client", op->client_id); } else { do_local_reply(reply, client, op->call_options); } /* bcast to all local clients that the fencing operation happend */ notify_data = fencing_result2xml(NULL, op); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, &op->result, notify_data); free_xml(notify_data); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); /* mark this op as having notify's already sent */ op->notify_sent = TRUE; free_xml(reply); } /*! * \internal * \brief Finalize all duplicates of a given fencer operation * * \param[in,out] op Fencer operation that completed * \param[in,out] data Top-level XML to add notification to */ static void finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data) { for (GList *iter = op->duplicates; iter != NULL; iter = iter->next) { remote_fencing_op_t *other = iter->data; if (other->state == st_duplicate) { other->state = op->state; crm_debug("Performing duplicate notification for %s@%s: %s " CRM_XS " id=%.8s", other->client_name, other->originator, pcmk_exec_status_str(op->result.execution_status), other->id); pcmk__copy_result(&op->result, &other->result); finalize_op(other, data, true); } else { // Possible if (for example) it timed out already crm_err("Skipping duplicate notification for %s@%s " CRM_XS " state=%s id=%.8s", other->client_name, other->originator, stonith_op_state_str(other->state), other->id); } } } static char * delegate_from_xml(xmlNode *xml) { xmlNode *match = get_xpath_object("//@" PCMK__XA_ST_DELEGATE, xml, LOG_NEVER); if (match == NULL) { return crm_element_value_copy(xml, PCMK__XA_SRC); } else { return crm_element_value_copy(match, PCMK__XA_ST_DELEGATE); } } /*! * \internal * \brief Finalize a peer fencing operation * * Clean up after a fencing operation completes. This function has two code * paths: the executioner uses it to broadcast the result to CPG peers, and then * each peer (including the executioner) uses it to process that broadcast and * notify its IPC clients of the result. * * \param[in,out] op Fencer operation that completed * \param[in,out] data If not NULL, XML reply of last delegated operation * \param[in] dup Whether this operation is a duplicate of another * (in which case, do not broadcast the result) * * \note The operation result should be set before calling this function. */ static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup) { int level = LOG_ERR; const char *subt = NULL; xmlNode *local_data = NULL; gboolean op_merged = FALSE; CRM_CHECK((op != NULL), return); // This is a no-op if timers have already been cleared clear_remote_op_timers(op); if (op->notify_sent) { // Most likely, this is a timed-out action that eventually completed crm_notice("Operation '%s'%s%s by %s for %s@%s%s: " "Result arrived too late " CRM_XS " id=%.8s", op->action, (op->target? " targeting " : ""), (op->target? op->target : ""), (op->delegate? op->delegate : "unknown node"), op->client_name, op->originator, (op_merged? " (merged)" : ""), op->id); return; } set_fencing_completed(op); undo_op_remap(op); if (data == NULL) { data = pcmk__xe_create(NULL, "remote-op"); local_data = data; } else if (op->delegate == NULL) { switch (op->result.execution_status) { case PCMK_EXEC_NO_FENCE_DEVICE: break; case PCMK_EXEC_INVALID: if (op->result.exit_status != CRM_EX_EXPIRED) { op->delegate = delegate_from_xml(data); } break; default: op->delegate = delegate_from_xml(data); break; } } if (dup || (crm_element_value(data, PCMK__XA_ST_OP_MERGED) != NULL)) { op_merged = true; } /* Tell everyone the operation is done, we will continue * with doing the local notifications once we receive * the broadcast back. */ subt = crm_element_value(data, PCMK__XA_SUBT); if (!dup && !pcmk__str_eq(subt, PCMK__VALUE_BROADCAST, pcmk__str_none)) { /* Defer notification until the bcast message arrives */ fenced_broadcast_op_result(op, op_merged); free_xml(local_data); return; } if (pcmk__result_ok(&op->result) || dup || !pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) { level = LOG_NOTICE; } do_crm_log(level, "Operation '%s'%s%s by %s for %s@%s%s: %s (%s%s%s) " CRM_XS " id=%.8s", op->action, (op->target? " targeting " : ""), (op->target? op->target : ""), (op->delegate? op->delegate : "unknown node"), op->client_name, op->originator, (op_merged? " (merged)" : ""), crm_exit_str(op->result.exit_status), pcmk_exec_status_str(op->result.execution_status), ((op->result.exit_reason == NULL)? "" : ": "), ((op->result.exit_reason == NULL)? "" : op->result.exit_reason), op->id); handle_local_reply_and_notify(op, data); if (!dup) { finalize_op_duplicates(op, data); } /* Free non-essential parts of the record * Keep the record around so we can query the history */ if (op->query_results) { g_list_free_full(op->query_results, free_remote_query); op->query_results = NULL; } if (op->request) { free_xml(op->request); op->request = NULL; } free_xml(local_data); } /*! * \internal * \brief Finalize a watchdog fencer op after the waiting time expires * * \param[in,out] userdata Fencer operation that completed * * \return G_SOURCE_REMOVE (which tells glib not to restart timer) */ static gboolean remote_op_watchdog_done(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_one = 0; crm_notice("Self-fencing (%s) by %s for %s assumed complete " CRM_XS " id=%.8s", op->action, op->target, op->client_name, op->id); op->state = st_done; pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); finalize_op(op, NULL, false); return G_SOURCE_REMOVE; } static gboolean remote_op_timeout_one(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_one = 0; crm_notice("Peer's '%s' action targeting %s for client %s timed out " CRM_XS " id=%.8s", op->action, op->target, op->client_name, op->id); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, "Peer did not return fence result within timeout"); // The requested delay has been applied for the first device if (op->client_delay > 0) { op->client_delay = 0; crm_trace("Try another device for '%s' action targeting %s " "for client %s without delay " CRM_XS " id=%.8s", op->action, op->target, op->client_name, op->id); } // Try another device, if appropriate request_peer_fencing(op, NULL); return G_SOURCE_REMOVE; } /*! * \internal * \brief Finalize a remote fencer operation that timed out * * \param[in,out] op Fencer operation that timed out * \param[in] reason Readable description of what step timed out */ static void finalize_timed_out_op(remote_fencing_op_t *op, const char *reason) { crm_debug("Action '%s' targeting %s for client %s timed out " CRM_XS " id=%.8s", op->action, op->target, op->client_name, op->id); if (op->phase == st_phase_on) { /* A remapped reboot operation timed out in the "on" phase, but the * "off" phase completed successfully, so quit trying any further * devices, and return success. */ op->state = st_done; pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { op->state = st_failed; pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, reason); } finalize_op(op, NULL, false); } /*! * \internal * \brief Finalize a remote fencer operation that timed out * * \param[in,out] userdata Fencer operation that timed out * * \return G_SOURCE_REMOVE (which tells glib not to restart timer) */ static gboolean remote_op_timeout(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_total = 0; if (op->state == st_done) { crm_debug("Action '%s' targeting %s for client %s already completed " CRM_XS " id=%.8s", op->action, op->target, op->client_name, op->id); } else { finalize_timed_out_op(userdata, "Fencing did not complete within a " "total timeout based on the " "configured timeout and retries for " "any devices attempted"); } return G_SOURCE_REMOVE; } static gboolean remote_op_query_timeout(gpointer data) { remote_fencing_op_t *op = data; op->query_timer = 0; if (op->state == st_done) { crm_debug("Operation %.8s targeting %s already completed", op->id, op->target); } else if (op->state == st_exec) { crm_debug("Operation %.8s targeting %s already in progress", op->id, op->target); } else if (op->query_results) { // Query succeeded, so attempt the actual fencing crm_debug("Query %.8s targeting %s complete (state=%s)", op->id, op->target, stonith_op_state_str(op->state)); request_peer_fencing(op, NULL); } else { crm_debug("Query %.8s targeting %s timed out (state=%s)", op->id, op->target, stonith_op_state_str(op->state)); finalize_timed_out_op(op, "No capable peers replied to device query " "within timeout"); } return G_SOURCE_REMOVE; } static gboolean topology_is_empty(stonith_topology_t *tp) { int i; if (tp == NULL) { return TRUE; } for (i = 0; i < ST__LEVEL_COUNT; i++) { if (tp->levels[i] != NULL) { return FALSE; } } return TRUE; } /*! * \internal * \brief Add a device to an operation's automatic unfencing list * * \param[in,out] op Operation to modify * \param[in] device Device ID to add */ static void add_required_device(remote_fencing_op_t *op, const char *device) { GList *match = g_list_find_custom(op->automatic_list, device, sort_strings); if (!match) { op->automatic_list = g_list_prepend(op->automatic_list, pcmk__str_copy(device)); } } /*! * \internal * \brief Remove a device from the automatic unfencing list * * \param[in,out] op Operation to modify * \param[in] device Device ID to remove */ static void remove_required_device(remote_fencing_op_t *op, const char *device) { GList *match = g_list_find_custom(op->automatic_list, device, sort_strings); if (match) { op->automatic_list = g_list_remove(op->automatic_list, match->data); } } /* deep copy the device list */ static void set_op_device_list(remote_fencing_op_t * op, GList *devices) { GList *lpc = NULL; if (op->devices_list) { g_list_free_full(op->devices_list, free); op->devices_list = NULL; } for (lpc = devices; lpc != NULL; lpc = lpc->next) { const char *device = lpc->data; op->devices_list = g_list_append(op->devices_list, pcmk__str_copy(device)); } op->devices = op->devices_list; } /*! * \internal * \brief Check whether a node matches a topology target * * \param[in] tp Topology table entry to check * \param[in] node Name of node to check * * \return TRUE if node matches topology target */ static gboolean topology_matches(const stonith_topology_t *tp, const char *node) { regex_t r_patt; CRM_CHECK(node && tp && tp->target, return FALSE); switch (tp->kind) { case fenced_target_by_attribute: /* This level targets by attribute, so tp->target is a NAME=VALUE pair * of a permanent attribute applied to targeted nodes. The test below * relies on the locally cached copy of the CIB, so if fencing needs to * be done before the initial CIB is received or after a malformed CIB * is received, then the topology will be unable to be used. */ if (node_has_attr(node, tp->target_attribute, tp->target_value)) { crm_notice("Matched %s with %s by attribute", node, tp->target); return TRUE; } break; case fenced_target_by_pattern: /* This level targets node names matching a pattern, so tp->target * (and tp->target_pattern) is a regular expression. */ if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) { crm_info("Bad regex '%s' for fencing level", tp->target); } else { int status = regexec(&r_patt, node, 0, NULL, 0); regfree(&r_patt); if (status == 0) { crm_notice("Matched %s with %s by name", node, tp->target); return TRUE; } } break; case fenced_target_by_name: crm_trace("Testing %s against %s", node, tp->target); return pcmk__str_eq(tp->target, node, pcmk__str_casei); default: break; } crm_trace("No match for %s with %s", node, tp->target); return FALSE; } stonith_topology_t * find_topology_for_host(const char *host) { GHashTableIter tIter; stonith_topology_t *tp = g_hash_table_lookup(topology, host); if(tp != NULL) { crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology)); return tp; } g_hash_table_iter_init(&tIter, topology); while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) { if (topology_matches(tp, host)) { crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology)); return tp; } } crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology)); return NULL; } /*! * \internal * \brief Set fencing operation's device list to target's next topology level * * \param[in,out] op Remote fencing operation to modify * \param[in] empty_ok If true, an operation without a target (i.e. * queries) or a target without a topology will get a * pcmk_rc_ok return value instead of ENODEV * * \return Standard Pacemaker return value */ static int advance_topology_level(remote_fencing_op_t *op, bool empty_ok) { stonith_topology_t *tp = NULL; if (op->target) { tp = find_topology_for_host(op->target); } if (topology_is_empty(tp)) { return empty_ok? pcmk_rc_ok : ENODEV; } pcmk__assert(tp->levels != NULL); stonith__set_call_options(op->call_options, op->id, st_opt_topology); /* This is a new level, so undo any remapping left over from previous */ undo_op_remap(op); do { op->level++; } while (op->level < ST__LEVEL_COUNT && tp->levels[op->level] == NULL); if (op->level < ST__LEVEL_COUNT) { crm_trace("Attempting fencing level %d targeting %s (%d devices) " "for client %s@%s (id=%.8s)", op->level, op->target, g_list_length(tp->levels[op->level]), op->client_name, op->originator, op->id); set_op_device_list(op, tp->levels[op->level]); // The requested delay has been applied for the first fencing level if ((op->level > 1) && (op->client_delay > 0)) { op->client_delay = 0; } if ((g_list_next(op->devices_list) != NULL) && pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A reboot has been requested for a topology level with multiple * devices. Instead of rebooting the devices sequentially, we will * turn them all off, then turn them all on again. (Think about * switched power outlets for redundant power supplies.) */ op_phase_off(op); } return pcmk_rc_ok; } crm_info("All %sfencing options targeting %s for client %s@%s failed " CRM_XS " id=%.8s", (stonith_watchdog_timeout_ms > 0)?"non-watchdog ":"", op->target, op->client_name, op->originator, op->id); return ENODEV; } /*! * \internal * \brief If fencing operation is a duplicate, merge it into the other one * * \param[in,out] op Fencing operation to check */ static void merge_duplicates(remote_fencing_op_t *op) { GHashTableIter iter; remote_fencing_op_t *other = NULL; time_t now = time(NULL); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) { const char *other_action = op_requested_action(other); crm_node_t *node = NULL; if (!strcmp(op->id, other->id)) { continue; // Don't compare against self } if (other->state > st_exec) { crm_trace("%.8s not duplicate of %.8s: not in progress", op->id, other->id); continue; } if (!pcmk__str_eq(op->target, other->target, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: node %s vs. %s", op->id, other->id, op->target, other->target); continue; } if (!pcmk__str_eq(op->action, other_action, pcmk__str_none)) { crm_trace("%.8s not duplicate of %.8s: action %s vs. %s", op->id, other->id, op->action, other_action); continue; } if (pcmk__str_eq(op->client_name, other->client_name, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: same client %s", op->id, other->id, op->client_name); continue; } if (pcmk__str_eq(other->target, other->originator, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: self-fencing for %s", op->id, other->id, other->target); continue; } node = pcmk__get_node(0, other->originator, NULL, pcmk__node_search_cluster_member); if (!fencing_peer_active(node)) { crm_notice("Failing action '%s' targeting %s originating from " "client %s@%s: Originator is dead " CRM_XS " id=%.8s", other->action, other->target, other->client_name, other->originator, other->id); crm_trace("%.8s not duplicate of %.8s: originator dead", op->id, other->id); other->state = st_failed; continue; } if ((other->total_timeout > 0) && (now > (other->total_timeout + other->created))) { crm_trace("%.8s not duplicate of %.8s: old (%lld vs. %lld + %ds)", op->id, other->id, (long long)now, (long long)other->created, other->total_timeout); continue; } /* There is another in-flight request to fence the same host * Piggyback on that instead. If it fails, so do we. */ other->duplicates = g_list_append(other->duplicates, op); if (other->total_timeout == 0) { other->total_timeout = op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL); crm_trace("Best guess as to timeout used for %.8s: %ds", other->id, other->total_timeout); } crm_notice("Merging fencing action '%s' targeting %s originating from " "client %s with identical request from %s@%s " CRM_XS " original=%.8s duplicate=%.8s total_timeout=%ds", op->action, op->target, op->client_name, other->client_name, other->originator, op->id, other->id, other->total_timeout); report_timeout_period(op, other->total_timeout); op->state = st_duplicate; } } static uint32_t fencing_active_peers(void) { uint32_t count = 0; crm_node_t *entry; GHashTableIter gIter; g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if(fencing_peer_active(entry)) { count++; } } return count; } /*! * \internal * \brief Process a manual confirmation of a pending fence action * * \param[in] client IPC client that sent confirmation * \param[in,out] msg Request XML with manual confirmation * * \return Standard Pacemaker return code */ int fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg) { remote_fencing_op_t *op = NULL; xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR); CRM_CHECK(dev != NULL, return EPROTO); crm_notice("Received manual confirmation that %s has been fenced", pcmk__s(crm_element_value(dev, PCMK__XA_ST_TARGET), "unknown target")); op = initiate_remote_stonith_op(client, msg, TRUE); if (op == NULL) { return EPROTO; } op->state = st_done; set_fencing_completed(op); op->delegate = pcmk__str_copy("a human"); // For the fencer's purposes, the fencing operation is done pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); finalize_op(op, msg, false); /* For the requester's purposes, the operation is still pending. The * actual result will be sent asynchronously via the operation's done_cb(). */ return EINPROGRESS; } /*! * \internal * \brief Create a new remote stonith operation * * \param[in] client ID of local stonith client that initiated the operation * \param[in] request The request from the client that started the operation * \param[in] peer TRUE if this operation is owned by another stonith peer * (an operation owned by one peer is stored on all peers, * but only the owner executes it; all nodes get the results * once the owner finishes execution) */ void * create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) { remote_fencing_op_t *op = NULL; xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request, LOG_NEVER); int rc = pcmk_rc_ok; const char *operation = NULL; init_stonith_remote_op_hash_table(&stonith_remote_op_list); /* If this operation is owned by another node, check to make * sure we haven't already created this operation. */ if (peer && dev) { const char *op_id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(op_id != NULL, return NULL); op = g_hash_table_lookup(stonith_remote_op_list, op_id); if (op) { crm_debug("Reusing existing remote fencing op %.8s for %s", op_id, ((client == NULL)? "unknown client" : client)); return op; } } op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); crm_element_value_int(request, PCMK__XA_ST_TIMEOUT, &(op->base_timeout)); // Value -1 means disable any static/random fencing delays crm_element_value_int(request, PCMK__XA_ST_DELAY, &(op->client_delay)); if (peer && dev) { op->id = crm_element_value_copy(dev, PCMK__XA_ST_REMOTE_OP); } else { op->id = crm_generate_uuid(); } g_hash_table_replace(stonith_remote_op_list, op->id, op); op->state = st_query; op->replies_expected = fencing_active_peers(); op->action = crm_element_value_copy(dev, PCMK__XA_ST_DEVICE_ACTION); /* The node initiating the stonith operation. If an operation is relayed, * this is the last node the operation lands on. When in standalone mode, * origin is the ID of the client that originated the operation. * * Or may be the name of the function that created the operation. */ op->originator = crm_element_value_copy(dev, PCMK__XA_ST_ORIGIN); if (op->originator == NULL) { /* Local or relayed request */ op->originator = pcmk__str_copy(stonith_our_uname); } // Delegate may not be set op->delegate = crm_element_value_copy(dev, PCMK__XA_ST_DELEGATE); op->created = time(NULL); CRM_LOG_ASSERT(client != NULL); op->client_id = pcmk__str_copy(client); /* For a RELAY operation, set fenced on the client. */ operation = crm_element_value(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { op->client_name = crm_strdup_printf("%s.%lu", crm_system_name, (unsigned long) getpid()); } else { op->client_name = crm_element_value_copy(request, PCMK__XA_ST_CLIENTNAME); } op->target = crm_element_value_copy(dev, PCMK__XA_ST_TARGET); // @TODO Figure out how to avoid copying XML here op->request = pcmk__xml_copy(NULL, request); rc = pcmk__xe_get_flags(request, PCMK__XA_ST_CALLOPT, &(op->call_options), 0U); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request %s: %s", op->id, pcmk_rc_str(rc)); } crm_element_value_int(request, PCMK__XA_ST_CALLID, &(op->client_callid)); crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, " "base timeout %ds, %u %s expected)", (peer && dev)? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name, op->base_timeout, op->replies_expected, pcmk__plural_alt(op->replies_expected, "reply", "replies")); if (op->call_options & st_opt_cs_nodeid) { int nodeid; crm_node_t *node; pcmk__scan_min_int(op->target, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); /* Ensure the conversion only happens once */ stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid); if (node && node->uname) { pcmk__str_update(&(op->target), node->uname); } else { crm_warn("Could not expand nodeid '%s' into a host name", op->target); } } /* check to see if this is a duplicate operation of another in-flight operation */ merge_duplicates(op); if (op->state != st_duplicate) { /* kick history readers */ fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } /* safe to trim as long as that doesn't touch pending ops */ stonith_fence_history_trim(); return op; } /*! * \internal * \brief Create a peer fencing operation from a request, and initiate it * * \param[in] client IPC client that made request (NULL to get from request) * \param[in] request Request XML * \param[in] manual_ack Whether this is a manual action confirmation * * \return Newly created operation on success, otherwise NULL */ remote_fencing_op_t * initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, gboolean manual_ack) { int query_timeout = 0; xmlNode *query = NULL; const char *client_id = NULL; remote_fencing_op_t *op = NULL; const char *relay_op_id = NULL; const char *operation = NULL; if (client) { client_id = client->id; } else { client_id = crm_element_value(request, PCMK__XA_ST_CLIENTID); } CRM_LOG_ASSERT(client_id != NULL); op = create_remote_stonith_op(client_id, request, FALSE); op->owner = TRUE; if (manual_ack) { return op; } CRM_CHECK(op->action, return NULL); if (advance_topology_level(op, true) != pcmk_rc_ok) { op->state = st_failed; } switch (op->state) { case st_failed: // advance_topology_level() exhausted levels pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "All topology levels failed"); crm_warn("Could not request peer fencing (%s) targeting %s " CRM_XS " id=%.8s", op->action, op->target, op->id); finalize_op(op, NULL, false); return op; case st_duplicate: crm_info("Requesting peer fencing (%s) targeting %s (duplicate) " CRM_XS " id=%.8s", op->action, op->target, op->id); return op; default: crm_notice("Requesting peer fencing (%s) targeting %s " CRM_XS " id=%.8s state=%s base_timeout=%ds", op->action, op->target, op->id, stonith_op_state_str(op->state), op->base_timeout); } query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY, NULL, op->call_options); crm_xml_add(query, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(query, PCMK__XA_ST_TARGET, op->target); crm_xml_add(query, PCMK__XA_ST_DEVICE_ACTION, op_requested_action(op)); crm_xml_add(query, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(query, PCMK__XA_ST_CLIENTID, op->client_id); crm_xml_add(query, PCMK__XA_ST_CLIENTNAME, op->client_name); crm_xml_add_int(query, PCMK__XA_ST_TIMEOUT, op->base_timeout); /* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */ operation = crm_element_value(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP); if (relay_op_id) { crm_xml_add(query, PCMK__XA_ST_REMOTE_OP_RELAY, relay_op_id); } } pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, query); free_xml(query); query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR; op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op); return op; } enum find_best_peer_options { /*! Skip checking the target peer for capable fencing devices */ FIND_PEER_SKIP_TARGET = 0x0001, /*! Only check the target peer for capable fencing devices */ FIND_PEER_TARGET_ONLY = 0x0002, /*! Skip peers and devices that are not verified */ FIND_PEER_VERIFIED_ONLY = 0x0004, }; static bool is_watchdog_fencing(const remote_fencing_op_t *op, const char *device) { return (stonith_watchdog_timeout_ms > 0 // Only an explicit mismatch is considered not a watchdog fencing. && pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_null_matches) && pcmk__is_fencing_action(op->action) && node_does_watchdog_fencing(op->target)); } static peer_device_info_t * find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options) { GList *iter = NULL; gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE; if (!device && pcmk_is_set(op->call_options, st_opt_topology)) { return NULL; } for (iter = op->query_results; iter != NULL; iter = iter->next) { peer_device_info_t *peer = iter->data; crm_trace("Testing result from %s targeting %s with %d device%s: %d %x", peer->host, op->target, peer->ndevices, pcmk__plural_s(peer->ndevices), peer->tried, options); if ((options & FIND_PEER_SKIP_TARGET) && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } if ((options & FIND_PEER_TARGET_ONLY) && !pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } if (pcmk_is_set(op->call_options, st_opt_topology)) { if (grab_peer_device(op, peer, device, verified_devices_only)) { return peer; } } else if (!peer->tried && count_peer_devices(op, peer, verified_devices_only, fenced_support_flag(op->action))) { /* No topology: Use the current best peer */ crm_trace("Simple fencing"); return peer; } } return NULL; } static peer_device_info_t * stonith_choose_peer(remote_fencing_op_t * op) { const char *device = NULL; peer_device_info_t *peer = NULL; uint32_t active = fencing_active_peers(); do { if (op->devices) { device = op->devices->data; crm_trace("Checking for someone to fence (%s) %s using %s", op->action, op->target, device); } else { crm_trace("Checking for someone to fence (%s) %s", op->action, op->target); } /* Best choice is a peer other than the target with verified access */ peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY); if (peer) { crm_trace("Found verified peer %s for %s", peer->host, device?device:""); return peer; } if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) { crm_trace("Waiting before looking for unverified devices to fence %s", op->target); return NULL; } /* If no other peer has verified access, next best is unverified access */ peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET); if (peer) { crm_trace("Found best unverified peer %s", peer->host); return peer; } /* If no other peer can do it, last option is self-fencing * (which is never allowed for the "on" phase of a remapped reboot) */ if (op->phase != st_phase_on) { peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY); if (peer) { crm_trace("%s will fence itself", peer->host); return peer; } } /* Try the next fencing level if there is one (unless we're in the "on" * phase of a remapped "reboot", because we ignore errors in that case) */ } while ((op->phase != st_phase_on) && pcmk_is_set(op->call_options, st_opt_topology) && (advance_topology_level(op, false) == pcmk_rc_ok)); /* With a simple watchdog fencing configuration without a topology, * "device" is NULL here. Consider it should be done with watchdog fencing. */ if (is_watchdog_fencing(op, device)) { crm_info("Couldn't contact watchdog-fencing target-node (%s)", op->target); /* check_watchdog_fencing_and_wait will log additional info */ } else { crm_notice("Couldn't find anyone to fence (%s) %s using %s", op->action, op->target, (device? device : "any device")); } return NULL; } static int valid_fencing_timeout(int specified_timeout, bool action_specific, const remote_fencing_op_t *op, const char *device) { int timeout = specified_timeout; if (!is_watchdog_fencing(op, device)) { return timeout; } timeout = (int) QB_MIN(QB_MAX(specified_timeout, stonith_watchdog_timeout_ms / 1000), INT_MAX); if (timeout > specified_timeout) { if (action_specific) { crm_warn("pcmk_%s_timeout %ds for %s is too short (must be >= " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " "instead", op->action, specified_timeout, device? device : "watchdog", timeout, timeout); } else { crm_warn("Fencing timeout %ds is too short (must be >= " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " "instead", specified_timeout, timeout, timeout); } } return timeout; } static int get_device_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, bool with_delay) { - int timeout = op->base_timeout; + int timeout = valid_fencing_timeout(op->base_timeout, false, op, device); device_properties_t *props; - timeout = valid_fencing_timeout(op->base_timeout, false, op, device); - if (!peer || !device) { return timeout; } props = g_hash_table_lookup(peer->devices, device); if (!props) { return timeout; } if (props->custom_action_timeout[op->phase]) { timeout = valid_fencing_timeout(props->custom_action_timeout[op->phase], true, op, device); } // op->client_delay < 0 means disable any static/random fencing delays if (with_delay && (op->client_delay >= 0)) { // delay_base is eventually limited by delay_max timeout += (props->delay_max[op->phase] > 0 ? props->delay_max[op->phase] : props->delay_base[op->phase]); } return timeout; } struct timeout_data { const remote_fencing_op_t *op; const peer_device_info_t *peer; int total_timeout; }; /*! * \internal * \brief Add timeout to a total if device has not been executed yet * * \param[in] key GHashTable key (device ID) * \param[in] value GHashTable value (device properties) * \param[in,out] user_data Timeout data */ static void add_device_timeout(gpointer key, gpointer value, gpointer user_data) { const char *device_id = key; device_properties_t *props = value; struct timeout_data *timeout = user_data; if (!props->executed[timeout->op->phase] && !props->disallowed[timeout->op->phase]) { timeout->total_timeout += get_device_timeout(timeout->op, timeout->peer, device_id, true); } } static int get_peer_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer) { struct timeout_data timeout; timeout.op = op; timeout.peer = peer; timeout.total_timeout = 0; g_hash_table_foreach(peer->devices, add_device_timeout, &timeout); return (timeout.total_timeout? timeout.total_timeout : op->base_timeout); } static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer) { long long total_timeout = 0; stonith_topology_t *tp = find_topology_for_host(op->target); if (pcmk_is_set(op->call_options, st_opt_topology) && tp) { int i; GList *device_list = NULL; GList *iter = NULL; GList *auto_list = NULL; if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none) && (op->automatic_list != NULL)) { auto_list = g_list_copy(op->automatic_list); } /* Yep, this looks scary, nested loops all over the place. * Here is what is going on. * Loop1: Iterate through fencing levels. * Loop2: If a fencing level has devices, loop through each device * Loop3: For each device in a fencing level, see what peer owns it * and what that peer has reported the timeout is for the device. */ for (i = 0; i < ST__LEVEL_COUNT; i++) { if (!tp->levels[i]) { continue; } for (device_list = tp->levels[i]; device_list; device_list = device_list->next) { bool found = false; for (iter = op->query_results; iter != NULL; iter = iter->next) { const peer_device_info_t *peer = iter->data; if (auto_list) { GList *match = g_list_find_custom(auto_list, device_list->data, sort_strings); if (match) { auto_list = g_list_remove(auto_list, match->data); } } if (find_peer_device(op, peer, device_list->data, fenced_support_flag(op->action))) { total_timeout += get_device_timeout(op, peer, device_list->data, true); found = true; break; } } /* End Loop3: match device with peer that owns device, find device's timeout period */ /* in case of watchdog-device we add the timeout to the budget if didn't get a reply */ if (!found && is_watchdog_fencing(op, device_list->data)) { total_timeout += stonith_watchdog_timeout_ms / 1000; } } /* End Loop2: iterate through devices at a specific level */ } /*End Loop1: iterate through fencing levels */ //Add only exists automatic_list device timeout if (auto_list) { for (iter = auto_list; iter != NULL; iter = iter->next) { GList *iter2 = NULL; for (iter2 = op->query_results; iter2 != NULL; iter = iter2->next) { peer_device_info_t *peer = iter2->data; if (find_peer_device(op, peer, iter->data, st_device_supports_on)) { total_timeout += get_device_timeout(op, peer, iter->data, true); break; } } } } g_list_free(auto_list); } else if (chosen_peer) { total_timeout = get_peer_timeout(op, chosen_peer); } else { total_timeout = valid_fencing_timeout(op->base_timeout, false, op, NULL); } if (total_timeout <= 0) { total_timeout = op->base_timeout; } /* Take any requested fencing delay into account to prevent it from eating * up the total timeout. */ if (op->client_delay > 0) { total_timeout += op->client_delay; } return (int) QB_MIN(total_timeout, INT_MAX); } static void report_timeout_period(remote_fencing_op_t * op, int op_timeout) { GList *iter = NULL; xmlNode *update = NULL; const char *client_node = NULL; const char *client_id = NULL; const char *call_id = NULL; if (op->call_options & st_opt_sync_call) { /* There is no reason to report the timeout for a synchronous call. It * is impossible to use the reported timeout to do anything when the client * is blocking for the response. This update is only important for * async calls that require a callback to report the results in. */ return; } else if (!op->request) { return; } crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id); client_node = crm_element_value(op->request, PCMK__XA_ST_CLIENTNODE); call_id = crm_element_value(op->request, PCMK__XA_ST_CALLID); client_id = crm_element_value(op->request, PCMK__XA_ST_CLIENTID); if (!client_node || !call_id || !client_id) { return; } if (pcmk__str_eq(client_node, stonith_our_uname, pcmk__str_casei)) { // Client is connected to this node, so send update directly to them do_stonith_async_timeout_update(client_id, call_id, op_timeout); return; } /* The client is connected to another node, relay this update to them */ update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0); crm_xml_add(update, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(update, PCMK__XA_ST_CLIENTID, client_id); crm_xml_add(update, PCMK__XA_ST_CALLID, call_id); crm_xml_add_int(update, PCMK__XA_ST_TIMEOUT, op_timeout); pcmk__cluster_send_message(pcmk__get_node(0, client_node, NULL, pcmk__node_search_cluster_member), crm_msg_stonith_ng, update); free_xml(update); for (iter = op->duplicates; iter != NULL; iter = iter->next) { remote_fencing_op_t *dup = iter->data; crm_trace("Reporting timeout for duplicate %.8s to client %s", dup->id, dup->client_name); report_timeout_period(iter->data, op_timeout); } } /*! * \internal * \brief Advance an operation to the next device in its topology * * \param[in,out] op Fencer operation to advance * \param[in] device ID of device that just completed * \param[in,out] msg If not NULL, XML reply of last delegated operation */ static void advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, xmlNode *msg) { /* Advance to the next device at this topology level, if any */ if (op->devices) { op->devices = op->devices->next; } /* Handle automatic unfencing if an "on" action was requested */ if ((op->phase == st_phase_requested) && pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) { /* If the device we just executed was required, it's not anymore */ remove_required_device(op, device); /* If there are no more devices at this topology level, run through any * remaining devices with automatic unfencing */ if (op->devices == NULL) { op->devices = op->automatic_list; } } if ((op->devices == NULL) && (op->phase == st_phase_off)) { /* We're done with this level and with required devices, but we had * remapped "reboot" to "off", so start over with "on". If any devices * need to be turned back on, op->devices will be non-NULL after this. */ op_phase_on(op); } // This function is only called if the previous device succeeded pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (op->devices) { /* Necessary devices remain, so execute the next one */ crm_trace("Next targeting %s on behalf of %s@%s", op->target, op->client_name, op->originator); // The requested delay has been applied for the first device if (op->client_delay > 0) { op->client_delay = 0; } request_peer_fencing(op, NULL); } else { /* We're done with all devices and phases, so finalize operation */ crm_trace("Marking complex fencing op targeting %s as complete", op->target); op->state = st_done; finalize_op(op, msg, false); } } static gboolean check_watchdog_fencing_and_wait(remote_fencing_op_t * op) { if (node_does_watchdog_fencing(op->target)) { guint timeout_ms = QB_MIN(stonith_watchdog_timeout_ms, UINT_MAX); crm_notice("Waiting %s for %s to self-fence (%s) for " "client %s " CRM_XS " id=%.8s", pcmk__readable_interval(timeout_ms), op->target, op->action, op->client_name, op->id); if (op->op_timer_one) { g_source_remove(op->op_timer_one); } op->op_timer_one = g_timeout_add(timeout_ms, remote_op_watchdog_done, op); return TRUE; } else { crm_debug("Skipping fallback to watchdog-fencing as %s is " "not in host-list", op->target); } return FALSE; } /*! * \internal * \brief Ask a peer to execute a fencing operation * * \param[in,out] op Fencing operation to be executed * \param[in,out] peer If NULL or topology is in use, choose best peer to * execute the fencing, otherwise use this peer */ static void request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) { const char *device = NULL; int timeout; CRM_CHECK(op != NULL, return); crm_trace("Action %.8s targeting %s for %s is %s", op->id, op->target, op->client_name, stonith_op_state_str(op->state)); if ((op->phase == st_phase_on) && (op->devices != NULL)) { /* We are in the "on" phase of a remapped topology reboot. If this * device has pcmk_reboot_action="off", or doesn't support the "on" * action, skip it. * * We can't check device properties at this point because we haven't * chosen a peer for this stage yet. Instead, we check the local node's * knowledge about the device. If different versions of the fence agent * are installed on different nodes, there's a chance this could be * mistaken, but the worst that could happen is we don't try turning the * node back on when we should. */ device = op->devices->data; if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF, pcmk__str_none)) { crm_info("Not turning %s back on using %s because the device is " "configured to stay off (pcmk_reboot_action='off')", op->target, device); advance_topology_device_in_level(op, device, NULL); return; } if (!fenced_device_supports_on(device)) { crm_info("Not turning %s back on using %s because the agent " "doesn't support 'on'", op->target, device); advance_topology_device_in_level(op, device, NULL); return; } } timeout = op->base_timeout; if ((peer == NULL) && !pcmk_is_set(op->call_options, st_opt_topology)) { peer = stonith_choose_peer(op); } if (!op->op_timer_total) { op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, peer); op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op); report_timeout_period(op, op->total_timeout); crm_info("Total timeout set to %ds for peer's fencing targeting %s for %s " CRM_XS " id=%.8s", op->total_timeout, op->target, op->client_name, op->id); } if (pcmk_is_set(op->call_options, st_opt_topology) && op->devices) { /* Ignore the caller's peer preference if topology is in use, because * that peer might not have access to the required device. With * topology, stonith_choose_peer() removes the device from further * consideration, so the timeout must be calculated beforehand. * * @TODO Basing the total timeout on the caller's preferred peer (above) * is less than ideal. */ peer = stonith_choose_peer(op); device = op->devices->data; /* Fencing timeout sent to peer takes no delay into account. * The peer will add a dedicated timer for any delay upon * schedule_stonith_command(). */ timeout = get_device_timeout(op, peer, device, false); } if (peer) { int timeout_one = 0; xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0); const crm_node_t *peer_node = pcmk__get_node(0, peer->host, NULL, pcmk__node_search_cluster_member); if (op->client_delay > 0) { /* Take requested fencing delay into account to prevent it from * eating up the timeout. */ timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay; } crm_xml_add(remote_op, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(remote_op, PCMK__XA_ST_TARGET, op->target); crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ACTION, op->action); crm_xml_add(remote_op, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(remote_op, PCMK__XA_ST_CLIENTID, op->client_id); crm_xml_add(remote_op, PCMK__XA_ST_CLIENTNAME, op->client_name); crm_xml_add_int(remote_op, PCMK__XA_ST_TIMEOUT, timeout); crm_xml_add_int(remote_op, PCMK__XA_ST_CALLOPT, op->call_options); crm_xml_add_int(remote_op, PCMK__XA_ST_DELAY, op->client_delay); if (device) { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_device_timeout(op, peer, device, true); crm_notice("Requesting that %s perform '%s' action targeting %s " "using %s " CRM_XS " for client %s (%ds)", peer->host, op->action, op->target, device, op->client_name, timeout_one); crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ID, device); } else { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer); crm_notice("Requesting that %s perform '%s' action targeting %s " CRM_XS " for client %s (%ds, %s)", peer->host, op->action, op->target, op->client_name, timeout_one, pcmk__readable_interval(stonith_watchdog_timeout_ms)); } op->state = st_exec; if (op->op_timer_one) { g_source_remove(op->op_timer_one); op->op_timer_one = 0; } if (!is_watchdog_fencing(op, device) || !check_watchdog_fencing_and_wait(op)) { /* Some thoughts about self-fencing cases reaching this point: - Actually check in check_watchdog_fencing_and_wait shouldn't fail if STONITH_WATCHDOG_ID is chosen as fencing-device and it being present implies watchdog-fencing is enabled anyway - If watchdog-fencing is disabled either in general or for a specific target - detected in check_watchdog_fencing_and_wait - for some other kind of self-fencing we can't expect a success answer but timeout is fine if the node doesn't come back in between - Delicate might be the case where we have watchdog-fencing enabled for a node but the watchdog-fencing-device isn't explicitly chosen for self-fencing. Local scheduler execution in sbd might detect the node as unclean and lead to timely self-fencing. Otherwise the selection of PCMK_OPT_STONITH_WATCHDOG_TIMEOUT at least is questionable. */ /* coming here we're not waiting for watchdog timeout - thus engage timer with timout evaluated before */ op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op); } pcmk__cluster_send_message(peer_node, crm_msg_stonith_ng, remote_op); peer->tried = TRUE; free_xml(remote_op); return; } else if (op->phase == st_phase_on) { /* A remapped "on" cannot be executed, but the node was already * turned off successfully, so ignore the error and continue. */ crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s " "after successful 'off'", device, op->target); advance_topology_device_in_level(op, device, NULL); return; } else if (op->owner == FALSE) { crm_err("Fencing (%s) targeting %s for client %s is not ours to control", op->action, op->target, op->client_name); } else if (op->query_timer == 0) { /* We've exhausted all available peers */ crm_info("No remaining peers capable of fencing (%s) %s for client %s " CRM_XS " state=%s", op->action, op->target, op->client_name, stonith_op_state_str(op->state)); CRM_CHECK(op->state < st_done, return); finalize_timed_out_op(op, "All nodes failed, or are unable, to " "fence target"); } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) { /* if the operation never left the query state, * but we have all the expected replies, then no devices * are available to execute the fencing operation. */ if (is_watchdog_fencing(op, device) && check_watchdog_fencing_and_wait(op)) { /* Consider a watchdog fencing targeting an offline node executing * once it starts waiting for the target to self-fence. So that when * the query timer pops, remote_op_query_timeout() considers the * fencing already in progress. */ op->state = st_exec; return; } if (op->state == st_query) { crm_info("No peers (out of %d) have devices capable of fencing " "(%s) %s for client %s " CRM_XS " state=%s", op->replies, op->action, op->target, op->client_name, stonith_op_state_str(op->state)); pcmk__reset_result(&op->result); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); } else { if (pcmk_is_set(op->call_options, st_opt_topology)) { pcmk__reset_result(&op->result); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); } /* ... else use existing result from previous failed attempt * (topology is not in use, and no devices remain to be attempted). * Overwriting the result with PCMK_EXEC_NO_FENCE_DEVICE would * prevent finalize_op() from setting the correct delegate if * needed. */ crm_info("No peers (out of %d) are capable of fencing (%s) %s " "for client %s " CRM_XS " state=%s", op->replies, op->action, op->target, op->client_name, stonith_op_state_str(op->state)); } op->state = st_failed; finalize_op(op, NULL, false); } else { crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s " "for client %s " CRM_XS " id=%.8s", op->action, op->target, (device? " using " : ""), (device? device : ""), op->client_name, op->id); } } /*! * \internal * \brief Comparison function for sorting query results * * \param[in] a GList item to compare * \param[in] b GList item to compare * * \return Per the glib documentation, "a negative integer if the first value * comes before the second, 0 if they are equal, or a positive integer * if the first value comes after the second." */ static gint sort_peers(gconstpointer a, gconstpointer b) { const peer_device_info_t *peer_a = a; const peer_device_info_t *peer_b = b; return (peer_b->ndevices - peer_a->ndevices); } /*! * \internal * \brief Determine if all the devices in the topology are found or not * * \param[in] op Fencing operation with topology to check */ static gboolean all_topology_devices_found(const remote_fencing_op_t *op) { GList *device = NULL; GList *iter = NULL; device_properties_t *match = NULL; stonith_topology_t *tp = NULL; gboolean skip_target = FALSE; int i; tp = find_topology_for_host(op->target); if (!tp) { return FALSE; } if (pcmk__is_fencing_action(op->action)) { /* Don't count the devices on the target node if we are killing * the target node. */ skip_target = TRUE; } for (i = 0; i < ST__LEVEL_COUNT; i++) { for (device = tp->levels[i]; device; device = device->next) { match = NULL; for (iter = op->query_results; iter && !match; iter = iter->next) { peer_device_info_t *peer = iter->data; if (skip_target && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } match = find_peer_device(op, peer, device->data, st_device_supports_none); } if (!match) { return FALSE; } } } return TRUE; } /*! * \internal * \brief Parse action-specific device properties from XML * * \param[in] xml XML element containing the properties * \param[in] peer Name of peer that sent XML (for logs) * \param[in] device Device ID (for logs) * \param[in] action Action the properties relate to (for logs) * \param[in,out] op Fencing operation that properties are being parsed for * \param[in] phase Phase the properties relate to * \param[in,out] props Device properties to update */ static void parse_action_specific(const xmlNode *xml, const char *peer, const char *device, const char *action, remote_fencing_op_t *op, enum st_remap_phase phase, device_properties_t *props) { props->custom_action_timeout[phase] = 0; crm_element_value_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, &props->custom_action_timeout[phase]); if (props->custom_action_timeout[phase]) { crm_trace("Peer %s with device %s returned %s action timeout %ds", peer, device, action, props->custom_action_timeout[phase]); } props->delay_max[phase] = 0; crm_element_value_int(xml, PCMK__XA_ST_DELAY_MAX, &props->delay_max[phase]); if (props->delay_max[phase]) { crm_trace("Peer %s with device %s returned maximum of random delay %ds for %s", peer, device, props->delay_max[phase], action); } props->delay_base[phase] = 0; crm_element_value_int(xml, PCMK__XA_ST_DELAY_BASE, &props->delay_base[phase]); if (props->delay_base[phase]) { crm_trace("Peer %s with device %s returned base delay %ds for %s", peer, device, props->delay_base[phase], action); } /* Handle devices with automatic unfencing */ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { int required = 0; crm_element_value_int(xml, PCMK__XA_ST_REQUIRED, &required); if (required) { crm_trace("Peer %s requires device %s to execute for action %s", peer, device, action); add_required_device(op, device); } } /* If a reboot is remapped to off+on, it's possible that a node is allowed * to perform one action but not another. */ if (pcmk__xe_attr_is_true(xml, PCMK__XA_ST_ACTION_DISALLOWED)) { props->disallowed[phase] = TRUE; crm_trace("Peer %s is disallowed from executing %s for device %s", peer, action, device); } } /*! * \internal * \brief Parse one device's properties from peer's XML query reply * * \param[in] xml XML node containing device properties * \param[in,out] op Operation that query and reply relate to * \param[in,out] peer Peer's device information * \param[in] device ID of device being parsed */ static void add_device_properties(const xmlNode *xml, remote_fencing_op_t *op, peer_device_info_t *peer, const char *device) { xmlNode *child; int verified = 0; device_properties_t *props = pcmk__assert_alloc(1, sizeof(device_properties_t)); int rc = pcmk_rc_ok; /* Add a new entry to this peer's devices list */ g_hash_table_insert(peer->devices, pcmk__str_copy(device), props); /* Peers with verified (monitored) access will be preferred */ crm_element_value_int(xml, PCMK__XA_ST_MONITOR_VERIFIED, &verified); if (verified) { crm_trace("Peer %s has confirmed a verified device %s", peer->host, device); props->verified = TRUE; } // Nodes <2.1.5 won't set this, so assume unfencing in that case rc = pcmk__xe_get_flags(xml, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, &(props->device_support_flags), st_device_supports_on); if (rc != pcmk_rc_ok) { crm_warn("Couldn't determine device support for %s " "(assuming unfencing): %s", device, pcmk_rc_str(rc)); } /* Parse action-specific device properties */ parse_action_specific(xml, peer->host, device, op_requested_action(op), op, st_phase_requested, props); for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child)) { /* Replies for "reboot" operations will include the action-specific * values for "off" and "on" in child elements, just in case the reboot * winds up getting remapped. */ if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_OFF, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF, op, st_phase_off, props); } else if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_ON, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_ON, op, st_phase_on, props); } } } /*! * \internal * \brief Parse a peer's XML query reply and add it to operation's results * * \param[in,out] op Operation that query and reply relate to * \param[in] host Name of peer that sent this reply * \param[in] ndevices Number of devices expected in reply * \param[in] xml XML node containing device list * * \return Newly allocated result structure with parsed reply */ static peer_device_info_t * add_result(remote_fencing_op_t *op, const char *host, int ndevices, const xmlNode *xml) { peer_device_info_t *peer = pcmk__assert_alloc(1, sizeof(peer_device_info_t)); xmlNode *child; peer->host = pcmk__str_copy(host); peer->devices = pcmk__strkey_table(free, free); /* Each child element describes one capable device available to the peer */ for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child)) { const char *device = pcmk__xe_id(child); if (device) { add_device_properties(child, op, peer, device); } } peer->ndevices = g_hash_table_size(peer->devices); CRM_CHECK(ndevices == peer->ndevices, crm_err("Query claimed to have %d device%s but %d found", ndevices, pcmk__plural_s(ndevices), peer->ndevices)); op->query_results = g_list_insert_sorted(op->query_results, peer, sort_peers); return peer; } /*! * \internal * \brief Handle a peer's reply to our fencing query * * Parse a query result from XML and store it in the remote operation * table, and when enough replies have been received, issue a fencing request. * * \param[in] msg XML reply received * * \return pcmk_ok on success, -errno on error * * \note See initiate_remote_stonith_op() for how the XML query was initially * formed, and stonith_query() for how the peer formed its XML reply. */ int process_remote_stonith_query(xmlNode *msg) { int ndevices = 0; gboolean host_is_target = FALSE; gboolean have_all_replies = FALSE; const char *id = NULL; const char *host = NULL; remote_fencing_op_t *op = NULL; peer_device_info_t *peer = NULL; uint32_t replies_expected; xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return -EPROTO); dev = get_xpath_object("//@" PCMK__XA_ST_AVAILABLE_DEVICES, msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); crm_element_value_int(dev, PCMK__XA_ST_AVAILABLE_DEVICES, &ndevices); op = g_hash_table_lookup(stonith_remote_op_list, id); if (op == NULL) { crm_debug("Received query reply for unknown or expired operation %s", id); return -EOPNOTSUPP; } replies_expected = fencing_active_peers(); if (op->replies_expected < replies_expected) { replies_expected = op->replies_expected; } if ((++op->replies >= replies_expected) && (op->state == st_query)) { have_all_replies = TRUE; } host = crm_element_value(msg, PCMK__XA_SRC); host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei); crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s", op->replies, replies_expected, host, op->target, op->action, ndevices, pcmk__plural_s(ndevices), id); if (ndevices > 0) { peer = add_result(op, host, ndevices, dev); } pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (pcmk_is_set(op->call_options, st_opt_topology)) { /* If we start the fencing before all the topology results are in, * it is possible fencing levels will be skipped because of the missing * query results. */ if (op->state == st_query && all_topology_devices_found(op)) { /* All the query results are in for the topology, start the fencing ops. */ crm_trace("All topology devices found"); request_peer_fencing(op, peer); } else if (have_all_replies) { crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ", replies_expected, op->replies); request_peer_fencing(op, NULL); } } else if (op->state == st_query) { int nverified = count_peer_devices(op, peer, TRUE, fenced_support_flag(op->action)); /* We have a result for a non-topology fencing op that looks promising, * go ahead and start fencing before query timeout */ if ((peer != NULL) && !host_is_target && nverified) { /* we have a verified device living on a peer that is not the target */ crm_trace("Found %d verified device%s", nverified, pcmk__plural_s(nverified)); request_peer_fencing(op, peer); } else if (have_all_replies) { crm_info("All query replies have arrived, continuing (%d expected/%d received) ", replies_expected, op->replies); request_peer_fencing(op, NULL); } else { crm_trace("Waiting for more peer results before launching fencing operation"); } } else if ((peer != NULL) && (op->state == st_done)) { crm_info("Discarding query result from %s (%d device%s): " "Operation is %s", peer->host, peer->ndevices, pcmk__plural_s(peer->ndevices), stonith_op_state_str(op->state)); } return pcmk_ok; } /*! * \internal * \brief Handle a peer's reply to a fencing request * * Parse a fencing reply from XML, and either finalize the operation * or attempt another device as appropriate. * * \param[in] msg XML reply received */ void fenced_process_fencing_reply(xmlNode *msg) { const char *id = NULL; const char *device = NULL; remote_fencing_op_t *op = NULL; xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR); pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(dev != NULL, return); id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return); dev = stonith__find_xe_with_result(msg); CRM_CHECK(dev != NULL, return); stonith__xe_get_result(dev, &result); device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (stonith_remote_op_list) { op = g_hash_table_lookup(stonith_remote_op_list, id); } if ((op == NULL) && pcmk__result_ok(&result)) { /* Record successful fencing operations */ const char *client_id = crm_element_value(dev, PCMK__XA_ST_CLIENTID); op = create_remote_stonith_op(client_id, dev, TRUE); } if (op == NULL) { /* Could be for an event that began before we started */ /* TODO: Record the op for later querying */ crm_info("Received peer result of unknown or expired operation %s", id); pcmk__reset_result(&result); return; } pcmk__reset_result(&op->result); op->result = result; // The operation takes ownership of the result if (op->devices && device && !pcmk__str_eq(op->devices->data, device, pcmk__str_casei)) { crm_err("Received outdated reply for device %s (instead of %s) to " "fence (%s) %s. Operation already timed out at peer level.", device, (const char *) op->devices->data, op->action, op->target); return; } if (pcmk__str_eq(crm_element_value(msg, PCMK__XA_SUBT), PCMK__VALUE_BROADCAST, pcmk__str_none)) { if (pcmk__result_ok(&op->result)) { op->state = st_done; } else { op->state = st_failed; } finalize_op(op, msg, false); return; } else if (!pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) { /* If this isn't a remote level broadcast, and we are not the * originator of the operation, we should not be receiving this msg. */ crm_err("Received non-broadcast fencing result for operation %.8s " "we do not own (device %s targeting %s)", op->id, device, op->target); return; } if (pcmk_is_set(op->call_options, st_opt_topology)) { const char *device = NULL; const char *reason = op->result.exit_reason; /* We own the op, and it is complete. broadcast the result to all nodes * and notify our local clients. */ if (op->state == st_done) { finalize_op(op, msg, false); return; } device = crm_element_value(msg, PCMK__XA_ST_DEVICE_ID); if ((op->phase == 2) && !pcmk__result_ok(&op->result)) { /* A remapped "on" failed, but the node was already turned off * successfully, so ignore the error and continue. */ crm_warn("Ignoring %s 'on' failure (%s%s%s) targeting %s " "after successful 'off'", device, pcmk_exec_status_str(op->result.execution_status), (reason == NULL)? "" : ": ", (reason == NULL)? "" : reason, op->target); pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { crm_notice("Action '%s' targeting %s%s%s on behalf of %s@%s: " "%s%s%s%s", op->action, op->target, ((device == NULL)? "" : " using "), ((device == NULL)? "" : device), op->client_name, op->originator, pcmk_exec_status_str(op->result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); } if (pcmk__result_ok(&op->result)) { /* An operation completed successfully. Try another device if * necessary, otherwise mark the operation as done. */ advance_topology_device_in_level(op, device, msg); return; } else { /* This device failed, time to try another topology level. If no other * levels are available, mark this operation as failed and report results. */ if (advance_topology_level(op, false) != pcmk_rc_ok) { op->state = st_failed; finalize_op(op, msg, false); return; } } } else if (pcmk__result_ok(&op->result) && (op->devices == NULL)) { op->state = st_done; finalize_op(op, msg, false); return; } else if ((op->result.execution_status == PCMK_EXEC_TIMEOUT) && (op->devices == NULL)) { /* If the operation timed out don't bother retrying other peers. */ op->state = st_failed; finalize_op(op, msg, false); return; } else { /* fall-through and attempt other fencing action using another peer */ } /* Retry on failure */ crm_trace("Next for %s on behalf of %s@%s (result was: %s)", op->target, op->originator, op->client_name, pcmk_exec_status_str(op->result.execution_status)); request_peer_fencing(op, NULL); } gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action) { GHashTableIter iter; time_t now = time(NULL); remote_fencing_op_t *rop = NULL; if (tolerance <= 0 || !stonith_remote_op_list || target == NULL || action == NULL) { return FALSE; } g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) { if (strcmp(rop->target, target) != 0) { continue; } else if (rop->state != st_done) { continue; /* We don't have to worry about remapped reboots here * because if state is done, any remapping has been undone */ } else if (strcmp(rop->action, action) != 0) { continue; } else if ((rop->completed + tolerance) < now) { continue; } crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s", target, action, tolerance, rop->delegate, rop->originator); return TRUE; } return FALSE; } diff --git a/lib/cluster/election.c b/lib/cluster/election.c index c3e4cd126c..6ce672edc2 100644 --- a/lib/cluster/election.c +++ b/lib/cluster/election.c @@ -1,727 +1,727 @@ /* - * Copyright 2004-2024 the Pacemaker project contributors + * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #define STORM_INTERVAL 2 /* in seconds */ struct election_s { enum election_result state; guint count; // How many times local node has voted char *name; // Descriptive name for this election char *uname; // Local node's name GSourceFunc cb; // Function to call if election is won GHashTable *voted; // Key = node name, value = how node voted mainloop_timer_t *timeout; // When to abort if all votes not received int election_wins; // Track wins, for storm detection bool wrote_blackbox; // Write a storm blackbox at most once time_t expires; // When storm detection period ends time_t last_election_loss; // When dampening period ends }; static void election_complete(election_t *e) { e->state = election_won; if (e->cb != NULL) { e->cb(e); } election_reset(e); } static gboolean election_timer_cb(gpointer user_data) { election_t *e = user_data; crm_info("%s timed out, declaring local node as winner", e->name); election_complete(e); return FALSE; } /*! * \brief Get current state of an election * * \param[in] e Election object * * \return Current state of \e */ enum election_result election_state(const election_t *e) { return (e == NULL)? election_error : e->state; } /*! * \brief Create a new election object * * Every node that wishes to participate in an election must create an election * object. Typically, this should be done once, at start-up. A caller should * only create a single election object. * * \param[in] name Label for election (for logging) * \param[in] uname Local node's name * \param[in] period_ms How long to wait for all peers to vote * \param[in] cb Function to call if local node wins election * * \return Newly allocated election object on success, NULL on error * \note The caller is responsible for freeing the returned value using * election_fini(). */ election_t * election_init(const char *name, const char *uname, guint period_ms, GSourceFunc cb) { election_t *e = NULL; static guint count = 0; CRM_CHECK(uname != NULL, return NULL); e = calloc(1, sizeof(election_t)); if (e == NULL) { crm_perror(LOG_CRIT, "Cannot create election"); return NULL; } e->uname = strdup(uname); if (e->uname == NULL) { crm_perror(LOG_CRIT, "Cannot create election"); free(e); return NULL; } e->name = name? crm_strdup_printf("election-%s", name) : crm_strdup_printf("election-%u", count++); e->cb = cb; e->timeout = mainloop_timer_add(e->name, period_ms, FALSE, election_timer_cb, e); crm_trace("Created %s", e->name); return e; } /*! * \brief Disregard any previous vote by specified peer * * This discards any recorded vote from a specified peer. Election users should * call this whenever a voting peer becomes inactive. * * \param[in,out] e Election object * \param[in] uname Name of peer to disregard */ void election_remove(election_t *e, const char *uname) { if ((e != NULL) && (uname != NULL) && (e->voted != NULL)) { crm_trace("Discarding %s (no-)vote from lost peer %s", e->name, uname); g_hash_table_remove(e->voted, uname); } } /*! * \brief Stop election timer and disregard all votes * * \param[in,out] e Election object */ void election_reset(election_t *e) { if (e != NULL) { crm_trace("Resetting election %s", e->name); mainloop_timer_stop(e->timeout); if (e->voted) { crm_trace("Destroying voted cache with %d members", g_hash_table_size(e->voted)); g_hash_table_destroy(e->voted); e->voted = NULL; } } } /*! * \brief Free an election object * * Free all memory associated with an election object, stopping its * election timer (if running). * * \param[in,out] e Election object */ void election_fini(election_t *e) { if (e != NULL) { election_reset(e); crm_trace("Destroying %s", e->name); mainloop_timer_del(e->timeout); free(e->uname); free(e->name); free(e); } } static void election_timeout_start(election_t *e) { if (e != NULL) { mainloop_timer_start(e->timeout); } } /*! * \brief Stop an election's timer, if running * * \param[in,out] e Election object */ void election_timeout_stop(election_t *e) { if (e != NULL) { mainloop_timer_stop(e->timeout); } } /*! * \brief Change an election's timeout (restarting timer if running) * * \param[in,out] e Election object * \param[in] period New timeout */ void election_timeout_set_period(election_t *e, guint period) { if (e != NULL) { mainloop_timer_set_period(e->timeout, period); } else { crm_err("No election defined"); } } static int get_uptime(struct timeval *output) { static time_t expires = 0; static struct rusage info; time_t tm_now = time(NULL); if (expires < tm_now) { int rc = 0; info.ru_utime.tv_sec = 0; info.ru_utime.tv_usec = 0; rc = getrusage(RUSAGE_SELF, &info); output->tv_sec = 0; output->tv_usec = 0; if (rc < 0) { crm_perror(LOG_ERR, "Could not calculate the current uptime"); expires = 0; return -1; } crm_debug("Current CPU usage is: %lds, %ldus", (long)info.ru_utime.tv_sec, (long)info.ru_utime.tv_usec); } expires = tm_now + STORM_INTERVAL; /* N seconds after the last _access_ */ output->tv_sec = info.ru_utime.tv_sec; output->tv_usec = info.ru_utime.tv_usec; return 1; } static int compare_age(struct timeval your_age) { struct timeval our_age; get_uptime(&our_age); /* If an error occurred, our_age will be compared as {0,0} */ if (our_age.tv_sec > your_age.tv_sec) { crm_debug("Win: %ld vs %ld (seconds)", (long)our_age.tv_sec, (long)your_age.tv_sec); return 1; } else if (our_age.tv_sec < your_age.tv_sec) { crm_debug("Lose: %ld vs %ld (seconds)", (long)our_age.tv_sec, (long)your_age.tv_sec); return -1; } else if (our_age.tv_usec > your_age.tv_usec) { crm_debug("Win: %ld.%06ld vs %ld.%06ld (usec)", (long)our_age.tv_sec, (long)our_age.tv_usec, (long)your_age.tv_sec, (long)your_age.tv_usec); return 1; } else if (our_age.tv_usec < your_age.tv_usec) { crm_debug("Lose: %ld.%06ld vs %ld.%06ld (usec)", (long)our_age.tv_sec, (long)our_age.tv_usec, (long)your_age.tv_sec, (long)your_age.tv_usec); return -1; } return 0; } /*! * \brief Start a new election by offering local node's candidacy * * Broadcast a "vote" election message containing the local node's ID, * (incremented) election counter, and uptime, and start the election timer. * * \param[in,out] e Election object * * \note Any nodes agreeing to the candidacy will send a "no-vote" reply, and if * all active peers do so, or if the election times out, the local node * wins the election. (If we lose to any peer vote, we will stop the * timer, so a timeout means we did not lose -- either some peer did not * vote, or we did not call election_check() in time.) */ void election_vote(election_t *e) { struct timeval age; xmlNode *vote = NULL; crm_node_t *our_node; if (e == NULL) { crm_trace("Election vote requested, but no election available"); return; } our_node = pcmk__get_node(0, e->uname, NULL, pcmk__node_search_cluster_member); if (!pcmk__cluster_is_node_active(our_node)) { crm_trace("Cannot vote in %s yet: local node not connected to cluster", e->name); return; } election_reset(e); e->state = election_in_progress; vote = create_request(CRM_OP_VOTE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); e->count++; crm_xml_add(vote, PCMK__XA_ELECTION_OWNER, pcmk__cluster_get_xml_id(our_node)); crm_xml_add_int(vote, PCMK__XA_ELECTION_ID, e->count); // Warning: PCMK__XA_ELECTION_AGE_NANO_SEC value is actually microseconds get_uptime(&age); crm_xml_add_timeval(vote, PCMK__XA_ELECTION_AGE_SEC, PCMK__XA_ELECTION_AGE_NANO_SEC, &age); pcmk__cluster_send_message(NULL, crm_msg_crmd, vote); free_xml(vote); crm_debug("Started %s round %d", e->name, e->count); election_timeout_start(e); return; } /*! * \brief Check whether local node has won an election * * If all known peers have sent no-vote messages, stop the election timer, set * the election state to won, and call any registered win callback. * * \param[in,out] e Election object * * \return TRUE if local node has won, FALSE otherwise * \note If all known peers have sent no-vote messages, but the election owner * does not call this function, the election will not be won (and the * callback will not be called) until the election times out. * \note This should be called when election_count_vote() returns * \c election_in_progress. */ bool election_check(election_t *e) { int voted_size = 0; int num_members = 0; if (e == NULL) { crm_trace("Election check requested, but no election available"); return FALSE; } if (e->voted == NULL) { crm_trace("%s check requested, but no votes received yet", e->name); return FALSE; } voted_size = g_hash_table_size(e->voted); num_members = pcmk__cluster_num_active_nodes(); /* in the case of #voted > #members, it is better to * wait for the timeout and give the cluster time to * stabilize */ if (voted_size >= num_members) { /* we won and everyone has voted */ election_timeout_stop(e); if (voted_size > num_members) { GHashTableIter gIter; const crm_node_t *node; char *key = NULL; crm_warn("Received too many votes in %s", e->name); g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) & node)) { if (pcmk__cluster_is_node_active(node)) { crm_warn("* expected vote: %s", node->uname); } } g_hash_table_iter_init(&gIter, e->voted); while (g_hash_table_iter_next(&gIter, (gpointer *) & key, NULL)) { crm_warn("* actual vote: %s", key); } } crm_info("%s won by local node", e->name); election_complete(e); return TRUE; } else { crm_debug("%s still waiting on %d of %d votes", e->name, num_members - voted_size, num_members); } return FALSE; } #define LOSS_DAMPEN 2 /* in seconds */ struct vote { const char *op; const char *from; const char *version; const char *election_owner; int election_id; struct timeval age; }; /*! * \brief Unpack an election message * * \param[in] e Election object (for logging only) * \param[in] message Election message XML * \param[out] vote Parsed fields from message * * \return TRUE if election message and election are valid, FALSE otherwise * \note The parsed struct's pointer members are valid only for the lifetime of * the message argument. */ static bool parse_election_message(const election_t *e, const xmlNode *message, struct vote *vote) { CRM_CHECK(message && vote, return FALSE); vote->election_id = -1; vote->age.tv_sec = -1; vote->age.tv_usec = -1; vote->op = crm_element_value(message, PCMK__XA_CRM_TASK); vote->from = crm_element_value(message, PCMK__XA_SRC); vote->version = crm_element_value(message, PCMK_XA_VERSION); vote->election_owner = crm_element_value(message, PCMK__XA_ELECTION_OWNER); crm_element_value_int(message, PCMK__XA_ELECTION_ID, &(vote->election_id)); if ((vote->op == NULL) || (vote->from == NULL) || (vote->version == NULL) || (vote->election_owner == NULL) || (vote->election_id < 0)) { crm_warn("Invalid %s message from %s in %s ", (vote->op? vote->op : "election"), (vote->from? vote->from : "unspecified node"), (e? e->name : "election")); return FALSE; } // Op-specific validation if (pcmk__str_eq(vote->op, CRM_OP_VOTE, pcmk__str_none)) { /* Only vote ops have uptime. Warning: PCMK__XA_ELECTION_AGE_NANO_SEC value is in microseconds. */ crm_element_value_timeval(message, PCMK__XA_ELECTION_AGE_SEC, PCMK__XA_ELECTION_AGE_NANO_SEC, &(vote->age)); if ((vote->age.tv_sec < 0) || (vote->age.tv_usec < 0)) { crm_warn("Cannot count %s %s from %s because it is missing uptime", (e? e->name : "election"), vote->op, vote->from); return FALSE; } } else if (!pcmk__str_eq(vote->op, CRM_OP_NOVOTE, pcmk__str_none)) { crm_info("Cannot process %s message from %s because %s is not a known election op", (e? e->name : "election"), vote->from, vote->op); return FALSE; } // Election validation if (e == NULL) { crm_info("Cannot count %s from %s because no election available", vote->op, vote->from); return FALSE; } /* If the membership cache is NULL, we REALLY shouldn't be voting -- * the question is how we managed to get here. */ if (crm_peer_cache == NULL) { crm_info("Cannot count %s %s from %s because no peer information available", e->name, vote->op, vote->from); return FALSE; } return TRUE; } static void record_vote(election_t *e, struct vote *vote) { pcmk__assert(e && vote && vote->from && vote->op); if (e->voted == NULL) { e->voted = pcmk__strkey_table(free, free); } pcmk__insert_dup(e->voted, vote->from, vote->op); } static void send_no_vote(crm_node_t *peer, struct vote *vote) { // @TODO probably shouldn't hardcode CRM_SYSTEM_CRMD and crm_msg_crmd xmlNode *novote = create_request(CRM_OP_NOVOTE, NULL, vote->from, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); crm_xml_add(novote, PCMK__XA_ELECTION_OWNER, vote->election_owner); crm_xml_add_int(novote, PCMK__XA_ELECTION_ID, vote->election_id); pcmk__cluster_send_message(peer, crm_msg_crmd, novote); free_xml(novote); } /*! * \brief Process an election message (vote or no-vote) from a peer * * \param[in,out] e Election object * \param[in] message Election message XML from peer * \param[in] can_win Whether local node is eligible to win * * \return Election state after new vote is considered * \note If the peer message is a vote, and we prefer the peer to win, this will * send a no-vote reply to the peer. * \note The situations "we lost to this vote" from "this is a late no-vote * after we've already lost" both return election_lost. If a caller needs * to distinguish them, it should save the current state before calling * this function, and then compare the result. */ enum election_result election_count_vote(election_t *e, const xmlNode *message, bool can_win) { int log_level = LOG_INFO; gboolean done = FALSE; gboolean we_lose = FALSE; - const char *reason = "unknown"; + const char *reason = NULL; bool we_are_owner = FALSE; crm_node_t *our_node = NULL, *your_node = NULL; time_t tm_now = time(NULL); struct vote vote; CRM_CHECK(message != NULL, return election_error); if (parse_election_message(e, message, &vote) == FALSE) { return election_error; } your_node = pcmk__get_node(0, vote.from, NULL, pcmk__node_search_cluster_member); our_node = pcmk__get_node(0, e->uname, NULL, pcmk__node_search_cluster_member); we_are_owner = (our_node != NULL) && pcmk__str_eq(pcmk__cluster_get_xml_id(our_node), vote.election_owner, pcmk__str_none); if (!can_win) { reason = "Not eligible"; we_lose = TRUE; } else if (!pcmk__cluster_is_node_active(our_node)) { reason = "We are not part of the cluster"; log_level = LOG_ERR; we_lose = TRUE; } else if (we_are_owner && (vote.election_id != e->count)) { log_level = LOG_TRACE; reason = "Superseded"; done = TRUE; } else if (!pcmk__cluster_is_node_active(your_node)) { /* Possibly we cached the message in the FSA queue at a point that it wasn't */ reason = "Peer is not part of our cluster"; log_level = LOG_WARNING; done = TRUE; } else if (pcmk__str_eq(vote.op, CRM_OP_NOVOTE, pcmk__str_none) || pcmk__str_eq(vote.from, e->uname, pcmk__str_none)) { /* Receiving our own broadcast vote, or a no-vote from peer, is a vote * for us to win */ if (!we_are_owner) { crm_warn("Cannot count %s round %d %s from %s because we are not election owner (%s)", e->name, vote.election_id, vote.op, vote.from, vote.election_owner); return election_error; } if (e->state != election_in_progress) { // Should only happen if we already lost crm_debug("Not counting %s round %d %s from %s because no election in progress", e->name, vote.election_id, vote.op, vote.from); return e->state; } record_vote(e, &vote); reason = "Recorded"; done = TRUE; } else { // A peer vote requires a comparison to determine which node is better int age_result = compare_age(vote.age); int version_result = compare_version(vote.version, CRM_FEATURE_SET); if (version_result < 0) { reason = "Version"; we_lose = TRUE; } else if (version_result > 0) { reason = "Version"; } else if (age_result < 0) { reason = "Uptime"; we_lose = TRUE; } else if (age_result > 0) { reason = "Uptime"; } else if (strcasecmp(e->uname, vote.from) > 0) { reason = "Host name"; we_lose = TRUE; } else { reason = "Host name"; } } if (e->expires < tm_now) { e->election_wins = 0; e->expires = tm_now + STORM_INTERVAL; } else if (done == FALSE && we_lose == FALSE) { int peers = 1 + g_hash_table_size(crm_peer_cache); /* If every node has to vote down every other node, thats N*(N-1) total elections * Allow some leeway before _really_ complaining */ e->election_wins++; if (e->election_wins > (peers * peers)) { crm_warn("%s election storm detected: %d wins in %d seconds", e->name, e->election_wins, STORM_INTERVAL); e->election_wins = 0; e->expires = tm_now + STORM_INTERVAL; if (e->wrote_blackbox == FALSE) { /* It's questionable whether a black box (from every node in the * cluster) would be truly helpful in diagnosing an election * storm. It's also highly doubtful a production environment * would get multiple election storms from distinct causes, so * saving one blackbox per process lifetime should be * sufficient. Alternatives would be to save a timestamp of the * last blackbox write instead of a boolean, and write a new one * if some amount of time has passed; or to save a storm count, * write a blackbox on every Nth occurrence. */ crm_write_blackbox(0, NULL); e->wrote_blackbox = TRUE; } } } if (done) { do_crm_log(log_level + 1, "Processed %s round %d %s (current round %d) from %s (%s)", e->name, vote.election_id, vote.op, e->count, vote.from, reason); return e->state; } else if (we_lose == FALSE) { /* We track the time of the last election loss to implement an election * dampening period, reducing the likelihood of an election storm. If * this node has lost within the dampening period, don't start a new * election, even if we win against a peer's vote -- the peer we lost to * should win again. * * @TODO This has a problem case: if an election winner immediately * leaves the cluster, and a new election is immediately called, all * nodes could lose, with no new winner elected. The ideal solution * would be to tie the election structure with the peer caches, which * would allow us to clear the dampening when the previous winner * leaves (and would allow other improvements as well). */ if ((e->last_election_loss == 0) || ((tm_now - e->last_election_loss) > (time_t) LOSS_DAMPEN)) { do_crm_log(log_level, "%s round %d (owner node ID %s) pass: %s from %s (%s)", e->name, vote.election_id, vote.election_owner, vote.op, vote.from, reason); e->last_election_loss = 0; election_timeout_stop(e); /* Start a new election by voting down this, and other, peers */ e->state = election_start; return e->state; } else { char *loss_time = ctime(&e->last_election_loss); if (loss_time) { // Show only HH:MM:SS loss_time += 11; loss_time[8] = '\0'; } crm_info("Ignoring %s round %d (owner node ID %s) pass vs %s because we lost less than %ds ago at %s", e->name, vote.election_id, vote.election_owner, vote.from, LOSS_DAMPEN, (loss_time? loss_time : "unknown")); } } e->last_election_loss = tm_now; do_crm_log(log_level, "%s round %d (owner node ID %s) lost: %s from %s (%s)", e->name, vote.election_id, vote.election_owner, vote.op, vote.from, reason); election_reset(e); send_no_vote(your_node, &vote); e->state = election_lost; return e->state; } /*! * \brief Reset any election dampening currently in effect * * \param[in,out] e Election object to clear */ void election_clear_dampening(election_t *e) { e->last_election_loss = 0; } diff --git a/lib/common/io.c b/lib/common/io.c index 6951b1fb40..9c1b767a45 100644 --- a/lib/common/io.c +++ b/lib/common/io.c @@ -1,662 +1,669 @@ /* - * Copyright 2004-2024 the Pacemaker project contributors + * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*! * \internal * \brief Create a directory, including any parent directories needed * * \param[in] path_c Pathname of the directory to create * \param[in] mode Permissions to be used (with current umask) when creating * * \return Standard Pacemaker return code */ int pcmk__build_path(const char *path_c, mode_t mode) { int offset = 1, len = 0; int rc = pcmk_rc_ok; char *path = strdup(path_c); // cppcheck seems not to understand the abort logic in CRM_CHECK // cppcheck-suppress memleak CRM_CHECK(path != NULL, return -ENOMEM); for (len = strlen(path); offset < len; offset++) { if (path[offset] == '/') { path[offset] = 0; if ((mkdir(path, mode) < 0) && (errno != EEXIST)) { rc = errno; goto done; } path[offset] = '/'; } } if ((mkdir(path, mode) < 0) && (errno != EEXIST)) { rc = errno; } done: free(path); return rc; } /*! * \internal * \brief Return canonicalized form of a path name * * \param[in] path Pathname to canonicalize * \param[out] resolved_path Where to store canonicalized pathname * * \return Standard Pacemaker return code * \note The caller is responsible for freeing \p resolved_path on success. * \note This function exists because not all C library versions of * realpath(path, resolved_path) support a NULL resolved_path. */ int pcmk__real_path(const char *path, char **resolved_path) { CRM_CHECK((path != NULL) && (resolved_path != NULL), return EINVAL); #if _POSIX_VERSION >= 200809L /* Recent C libraries can dynamically allocate memory as needed */ *resolved_path = realpath(path, NULL); return (*resolved_path == NULL)? errno : pcmk_rc_ok; #elif defined(PATH_MAX) /* Older implementations require pre-allocated memory */ /* (this is less desirable because PATH_MAX may be huge or not defined) */ *resolved_path = malloc(PATH_MAX); if ((*resolved_path == NULL) || (realpath(path, *resolved_path) == NULL)) { return errno; } return pcmk_rc_ok; #else *resolved_path = NULL; return ENOTSUP; #endif } /*! * \internal * \brief Create a file name using a sequence number * * \param[in] directory Directory that contains the file series * \param[in] series Start of file name * \param[in] sequence Sequence number * \param[in] bzip Whether to use ".bz2" instead of ".raw" as extension * * \return Newly allocated file path (asserts on error, so always non-NULL) * \note The caller is responsible for freeing the return value. */ char * pcmk__series_filename(const char *directory, const char *series, unsigned int sequence, bool bzip) { pcmk__assert((directory != NULL) && (series != NULL)); return crm_strdup_printf("%s/%s-%u.%s", directory, series, sequence, (bzip? "bz2" : "raw")); } /*! * \internal * \brief Read sequence number stored in a file series' .last file * * \param[in] directory Directory that contains the file series * \param[in] series Start of file name * \param[out] seq Where to store the sequence number * * \return Standard Pacemaker return code */ int pcmk__read_series_sequence(const char *directory, const char *series, unsigned int *seq) { int rc; FILE *fp = NULL; char *series_file = NULL; if ((directory == NULL) || (series == NULL) || (seq == NULL)) { return EINVAL; } series_file = crm_strdup_printf("%s/%s.last", directory, series); fp = fopen(series_file, "r"); if (fp == NULL) { rc = errno; crm_debug("Could not open series file %s: %s", series_file, strerror(rc)); free(series_file); return rc; } errno = 0; if (fscanf(fp, "%u", seq) != 1) { rc = (errno == 0)? ENODATA : errno; crm_debug("Could not read sequence number from series file %s: %s", series_file, pcmk_rc_str(rc)); fclose(fp); return rc; } fclose(fp); crm_trace("Found last sequence number %u in series file %s", *seq, series_file); free(series_file); return pcmk_rc_ok; } /*! * \internal * \brief Write sequence number to a file series' .last file * * \param[in] directory Directory that contains the file series * \param[in] series Start of file name * \param[in] sequence Sequence number to write * \param[in] max Maximum sequence value, after which it is reset to 0 * * \note This function logs some errors but does not return any to the caller */ void pcmk__write_series_sequence(const char *directory, const char *series, unsigned int sequence, int max) { int rc = 0; FILE *file_strm = NULL; char *series_file = NULL; CRM_CHECK(directory != NULL, return); CRM_CHECK(series != NULL, return); if (max == 0) { return; } if (max > 0 && sequence >= max) { sequence = 0; } series_file = crm_strdup_printf("%s/%s.last", directory, series); file_strm = fopen(series_file, "w"); if (file_strm != NULL) { rc = fprintf(file_strm, "%u", sequence); if (rc < 0) { crm_perror(LOG_ERR, "Cannot write to series file %s", series_file); } } else { crm_err("Cannot open series file %s for writing", series_file); } if (file_strm != NULL) { fflush(file_strm); fclose(file_strm); } crm_trace("Wrote %d to %s", sequence, series_file); free(series_file); } /*! * \internal * \brief Change the owner and group of a file series' .last file * * \param[in] directory Directory that contains series * \param[in] series Series to change * \param[in] uid User ID of desired file owner * \param[in] gid Group ID of desired file group * * \return Standard Pacemaker return code * \note The caller must have the appropriate privileges. */ int pcmk__chown_series_sequence(const char *directory, const char *series, uid_t uid, gid_t gid) { char *series_file = NULL; int rc = pcmk_rc_ok; if ((directory == NULL) || (series == NULL)) { return EINVAL; } series_file = crm_strdup_printf("%s/%s.last", directory, series); if (chown(series_file, uid, gid) < 0) { rc = errno; } free(series_file); return rc; } static bool pcmk__daemon_user_can_write(const char *target_name, struct stat *target_stat) { struct passwd *sys_user = NULL; errno = 0; sys_user = getpwnam(CRM_DAEMON_USER); if (sys_user == NULL) { crm_notice("Could not find user %s: %s", CRM_DAEMON_USER, pcmk_rc_str(errno)); return FALSE; } if (target_stat->st_uid != sys_user->pw_uid) { crm_notice("%s is not owned by user %s " CRM_XS " uid %d != %d", target_name, CRM_DAEMON_USER, sys_user->pw_uid, target_stat->st_uid); return FALSE; } if ((target_stat->st_mode & (S_IRUSR | S_IWUSR)) == 0) { crm_notice("%s is not readable and writable by user %s " CRM_XS " st_mode=0%lo", target_name, CRM_DAEMON_USER, (unsigned long) target_stat->st_mode); return FALSE; } return TRUE; } static bool pcmk__daemon_group_can_write(const char *target_name, struct stat *target_stat) { struct group *sys_grp = NULL; errno = 0; sys_grp = getgrnam(CRM_DAEMON_GROUP); if (sys_grp == NULL) { crm_notice("Could not find group %s: %s", CRM_DAEMON_GROUP, pcmk_rc_str(errno)); return FALSE; } if (target_stat->st_gid != sys_grp->gr_gid) { crm_notice("%s is not owned by group %s " CRM_XS " uid %d != %d", target_name, CRM_DAEMON_GROUP, sys_grp->gr_gid, target_stat->st_gid); return FALSE; } if ((target_stat->st_mode & (S_IRGRP | S_IWGRP)) == 0) { crm_notice("%s is not readable and writable by group %s " CRM_XS " st_mode=0%lo", target_name, CRM_DAEMON_GROUP, (unsigned long) target_stat->st_mode); return FALSE; } return TRUE; } /*! * \internal * \brief Check whether a directory or file is writable by the cluster daemon * * Return true if either the cluster daemon user or cluster daemon group has * write permission on a specified file or directory. * * \param[in] dir Directory to check (this argument must be specified, and * the directory must exist) * \param[in] file File to check (only the directory will be checked if this * argument is not specified or the file does not exist) * * \return true if target is writable by cluster daemon, false otherwise */ bool pcmk__daemon_can_write(const char *dir, const char *file) { int s_res = 0; struct stat buf; char *full_file = NULL; const char *target = NULL; // Caller must supply directory pcmk__assert(dir != NULL); // If file is given, check whether it exists as a regular file if (file != NULL) { full_file = crm_strdup_printf("%s/%s", dir, file); target = full_file; s_res = stat(full_file, &buf); if (s_res < 0) { crm_notice("%s not found: %s", target, pcmk_rc_str(errno)); free(full_file); full_file = NULL; target = NULL; } else if (S_ISREG(buf.st_mode) == FALSE) { crm_err("%s must be a regular file " CRM_XS " st_mode=0%lo", target, (unsigned long) buf.st_mode); free(full_file); return false; } } // If file is not given, ensure dir exists as directory if (target == NULL) { target = dir; s_res = stat(dir, &buf); if (s_res < 0) { crm_err("%s not found: %s", dir, pcmk_rc_str(errno)); return false; } else if (S_ISDIR(buf.st_mode) == FALSE) { crm_err("%s must be a directory " CRM_XS " st_mode=0%lo", dir, (unsigned long) buf.st_mode); return false; } } if (!pcmk__daemon_user_can_write(target, &buf) && !pcmk__daemon_group_can_write(target, &buf)) { crm_err("%s must be owned and writable by either user %s or group %s " CRM_XS " st_mode=0%lo", target, CRM_DAEMON_USER, CRM_DAEMON_GROUP, (unsigned long) buf.st_mode); free(full_file); return false; } free(full_file); return true; } /*! * \internal * \brief Flush and sync a directory to disk * * \param[in] name Directory to flush and sync * \note This function logs errors but does not return them to the caller */ void pcmk__sync_directory(const char *name) { int fd; DIR *directory; directory = opendir(name); if (directory == NULL) { crm_perror(LOG_ERR, "Could not open %s for syncing", name); return; } fd = dirfd(directory); if (fd < 0) { crm_perror(LOG_ERR, "Could not obtain file descriptor for %s", name); return; } if (fsync(fd) < 0) { crm_perror(LOG_ERR, "Could not sync %s", name); } if (closedir(directory) < 0) { crm_perror(LOG_ERR, "Could not close %s after fsync", name); } } /*! * \internal * \brief Read the contents of a file * * \param[in] filename Name of file to read * \param[out] contents Where to store file contents * * \return Standard Pacemaker return code * \note On success, the caller is responsible for freeing contents. */ int pcmk__file_contents(const char *filename, char **contents) { FILE *fp; int length, read_len; int rc = pcmk_rc_ok; if ((filename == NULL) || (contents == NULL)) { return EINVAL; } fp = fopen(filename, "r"); if ((fp == NULL) || (fseek(fp, 0L, SEEK_END) < 0)) { rc = errno; goto bail; } length = ftell(fp); if (length < 0) { rc = errno; goto bail; } if (length == 0) { *contents = NULL; } else { *contents = calloc(length + 1, sizeof(char)); if (*contents == NULL) { rc = errno; goto bail; } + + errno = 0; + rewind(fp); + if (errno != 0) { + rc = errno; + goto bail; + } read_len = fread(*contents, 1, length, fp); if (read_len != length) { free(*contents); *contents = NULL; rc = EIO; } else { /* Coverity thinks *contents isn't null-terminated. It doesn't * understand calloc(). */ (*contents)[length] = '\0'; } } bail: if (fp != NULL) { fclose(fp); } return rc; } /*! * \internal * \brief Write text to a file, flush and sync it to disk, then close the file * * \param[in] fd File descriptor opened for writing * \param[in] contents String to write to file * * \return Standard Pacemaker return code */ int pcmk__write_sync(int fd, const char *contents) { int rc = 0; FILE *fp = fdopen(fd, "w"); if (fp == NULL) { return errno; } if ((contents != NULL) && (fprintf(fp, "%s", contents) < 0)) { rc = EIO; } if (fflush(fp) != 0) { rc = errno; } if (fsync(fileno(fp)) < 0) { rc = errno; } fclose(fp); return rc; } /*! * \internal * \brief Set a file descriptor to non-blocking * * \param[in] fd File descriptor to use * * \return Standard Pacemaker return code */ int pcmk__set_nonblocking(int fd) { int flag = fcntl(fd, F_GETFL); if (flag < 0) { return errno; } if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) { return errno; } return pcmk_rc_ok; } /*! * \internal * \brief Get directory name for temporary files * * Return the value of the TMPDIR environment variable if it is set to a * full path, otherwise return "/tmp". * * \return Name of directory to be used for temporary files */ const char * pcmk__get_tmpdir(void) { const char *dir = getenv("TMPDIR"); return (dir && (*dir == '/'))? dir : "/tmp"; } /*! * \internal * \brief Close open file descriptors * * Close all file descriptors (except optionally stdin, stdout, and stderr), * which is a best practice for a new child process forked for the purpose of * executing an external program. * * \param[in] bool If true, close stdin, stdout, and stderr as well */ void pcmk__close_fds_in_child(bool all) { DIR *dir; struct rlimit rlim; rlim_t max_fd; int min_fd = (all? 0 : (STDERR_FILENO + 1)); /* Find the current process's (soft) limit for open files. getrlimit() * should always work, but have a fallback just in case. */ if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { max_fd = rlim.rlim_cur - 1; } else { long conf_max = sysconf(_SC_OPEN_MAX); max_fd = (conf_max > 0)? conf_max : 1024; } /* /proc/self/fd (on Linux) or /dev/fd (on most OSes) contains symlinks to * all open files for the current process, named as the file descriptor. * Use this if available, because it's more efficient than a shotgun * approach to closing descriptors. */ #if HAVE_LINUX_PROCFS dir = opendir("/proc/self/fd"); if (dir == NULL) { dir = opendir("/dev/fd"); } #else dir = opendir("/dev/fd"); #endif // HAVE_LINUX_PROCFS if (dir != NULL) { struct dirent *entry; int dir_fd = dirfd(dir); while ((entry = readdir(dir)) != NULL) { int lpc = atoi(entry->d_name); /* How could one of these entries be higher than max_fd, you ask? * It isn't possible in normal operation, but when run under * valgrind, valgrind can open high-numbered file descriptors for * its own use that are higher than the process's soft limit. * These will show up in the fd directory but aren't closable. */ if ((lpc >= min_fd) && (lpc <= max_fd) && (lpc != dir_fd)) { close(lpc); } } closedir(dir); return; } /* If no fd directory is available, iterate over all possible descriptors. * This is less efficient due to the overhead of many system calls. */ for (int lpc = max_fd; lpc >= min_fd; lpc--) { close(lpc); } } /*! * \brief Duplicate a file path, inserting a prefix if not absolute * * \param[in] filename File path to duplicate * \param[in] dirname If filename is not absolute, prefix to add * * \return Newly allocated memory with full path (guaranteed non-NULL) */ char * pcmk__full_path(const char *filename, const char *dirname) { pcmk__assert(filename != NULL); if (filename[0] == '/') { return pcmk__str_copy(filename); } pcmk__assert(dirname != NULL); return crm_strdup_printf("%s/%s", dirname, filename); } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include void crm_build_path(const char *path_c, mode_t mode) { int rc = pcmk__build_path(path_c, mode); if (rc != pcmk_rc_ok) { crm_err("Could not create directory '%s': %s", path_c, pcmk_rc_str(rc)); } } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/pacemaker/pcmk_simulate.c b/lib/pacemaker/pcmk_simulate.c index 3c19bcf847..82ceb32fcb 100644 --- a/lib/pacemaker/pcmk_simulate.c +++ b/lib/pacemaker/pcmk_simulate.c @@ -1,1024 +1,1024 @@ /* - * Copyright 2021-2024 the Pacemaker project contributors + * Copyright 2021-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include "libpacemaker_private.h" static pcmk__output_t *out = NULL; static cib_t *fake_cib = NULL; static GList *fake_resource_list = NULL; static const GList *fake_op_fail_list = NULL; static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original, const char *use_date); /*! * \internal * \brief Create an action name for use in a dot graph * * \param[in] action Action to create name for * \param[in] verbose If true, add action ID to name * * \return Newly allocated string with action name * \note It is the caller's responsibility to free the result. */ static char * create_action_name(const pcmk_action_t *action, bool verbose) { char *action_name = NULL; const char *prefix = ""; const char *action_host = NULL; const char *clone_name = NULL; const char *task = action->task; if (action->node != NULL) { action_host = action->node->details->uname; } else if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) { action_host = ""; } if (pcmk__str_eq(action->task, PCMK_ACTION_CANCEL, pcmk__str_none)) { prefix = "Cancel "; task = action->cancel_task; } if (action->rsc != NULL) { clone_name = action->rsc->clone_name; } if (clone_name != NULL) { char *key = NULL; guint interval_ms = 0; if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0, &interval_ms) != pcmk_rc_ok) { interval_ms = 0; } if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY, PCMK_ACTION_NOTIFIED, NULL)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_key_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_key_operation"); pcmk__assert(n_type != NULL); pcmk__assert(n_task != NULL); key = pcmk__notify_key(clone_name, n_type, n_task); } else { key = pcmk__op_key(clone_name, task, interval_ms); } if (action_host != NULL) { action_name = crm_strdup_printf("%s%s %s", prefix, key, action_host); } else { action_name = crm_strdup_printf("%s%s", prefix, key); } free(key); } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) { const char *op = g_hash_table_lookup(action->meta, PCMK__META_STONITH_ACTION); action_name = crm_strdup_printf("%s%s '%s' %s", prefix, action->task, op, action_host); } else if (action->rsc && action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, action->uuid, action_host); } else if (action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, action->task, action_host); } else { action_name = crm_strdup_printf("%s", action->uuid); } if (verbose) { char *with_id = crm_strdup_printf("%s (%d)", action_name, action->id); free(action_name); action_name = with_id; } return action_name; } /*! * \internal * \brief Display the status of a cluster * * \param[in,out] scheduler Scheduler data * \param[in] show_opts How to modify display (as pcmk_show_opt_e flags) * \param[in] section_opts Sections to display (as pcmk_section_e flags) * \param[in] title What to use as list title * \param[in] print_spacer Whether to display a spacer first */ static void print_cluster_status(pcmk_scheduler_t *scheduler, uint32_t show_opts, uint32_t section_opts, const char *title, bool print_spacer) { pcmk__output_t *out = scheduler->priv; GList *all = NULL; crm_exit_t stonith_rc = 0; enum pcmk_pacemakerd_state state = pcmk_pacemakerd_state_invalid; section_opts |= pcmk_section_nodes | pcmk_section_resources; show_opts |= pcmk_show_inactive_rscs | pcmk_show_failed_detail; all = g_list_prepend(all, (gpointer) "*"); PCMK__OUTPUT_SPACER_IF(out, print_spacer); out->begin_list(out, NULL, NULL, "%s", title); out->message(out, "cluster-status", scheduler, state, stonith_rc, NULL, pcmk__fence_history_none, section_opts, show_opts, NULL, all, all); out->end_list(out); g_list_free(all); } /*! * \internal * \brief Display a summary of all actions scheduled in a transition * * \param[in,out] scheduler Scheduler data (fully scheduled) * \param[in] print_spacer Whether to display a spacer first */ static void print_transition_summary(pcmk_scheduler_t *scheduler, bool print_spacer) { pcmk__output_t *out = scheduler->priv; PCMK__OUTPUT_SPACER_IF(out, print_spacer); out->begin_list(out, NULL, NULL, "Transition Summary"); pcmk__output_actions(scheduler); out->end_list(out); } /*! * \internal * \brief Reset scheduler input, output, date, and flags * * \param[in,out] scheduler Scheduler data * \param[in] input What to set as cluster input * \param[in] out What to set as cluster output object * \param[in] use_date What to set as cluster's current timestamp * \param[in] flags Group of enum pcmk_scheduler_flags to set */ static void reset(pcmk_scheduler_t *scheduler, xmlNodePtr input, pcmk__output_t *out, const char *use_date, unsigned int flags) { scheduler->input = input; scheduler->priv = out; set_effective_date(scheduler, true, use_date); if (pcmk_is_set(flags, pcmk_sim_sanitized)) { pcmk__set_scheduler_flags(scheduler, pcmk_sched_sanitized); } if (pcmk_is_set(flags, pcmk_sim_show_scores)) { pcmk__set_scheduler_flags(scheduler, pcmk_sched_output_scores); } if (pcmk_is_set(flags, pcmk_sim_show_utilization)) { pcmk__set_scheduler_flags(scheduler, pcmk_sched_show_utilization); } } /*! * \brief Write out a file in dot(1) format describing the actions that will * be taken by the scheduler in response to an input CIB file. * * \param[in,out] scheduler Scheduler data * \param[in] dot_file The filename to write * \param[in] all_actions Write all actions, even those that are optional * or are on unmanaged resources * \param[in] verbose Add extra information, such as action IDs, to the * output * * \return Standard Pacemaker return code */ static int write_sim_dotfile(pcmk_scheduler_t *scheduler, const char *dot_file, bool all_actions, bool verbose) { GList *iter = NULL; FILE *dot_strm = fopen(dot_file, "w"); if (dot_strm == NULL) { return errno; } fprintf(dot_strm, " digraph \"g\" {\n"); for (iter = scheduler->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = (pcmk_action_t *) iter->data; const char *style = "dashed"; const char *font = "black"; - const char *color = "black"; + const char *color = NULL; char *action_name = create_action_name(action, verbose); if (pcmk_is_set(action->flags, pcmk_action_pseudo)) { font = "orange"; } if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)) { style = PCMK__VALUE_BOLD; color = "green"; } else if ((action->rsc != NULL) && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)) { color = "red"; font = "purple"; if (!all_actions) { goto do_not_write; } } else if (pcmk_is_set(action->flags, pcmk_action_optional)) { color = "blue"; if (!all_actions) { goto do_not_write; } } else { color = "red"; CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pcmk_action_runnable)); } pcmk__set_action_flags(action, pcmk_action_added_to_graph); fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n", action_name, style, color, font); do_not_write: free(action_name); } for (iter = scheduler->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = (pcmk_action_t *) iter->data; for (GList *before_iter = action->actions_before; before_iter != NULL; before_iter = before_iter->next) { pcmk__related_action_t *before = before_iter->data; char *before_name = NULL; char *after_name = NULL; const char *style = "dashed"; bool optional = true; if (before->state == pe_link_dumped) { optional = false; style = PCMK__VALUE_BOLD; } else if ((uint32_t) before->type == pcmk__ar_none) { continue; } else if (pcmk_is_set(before->action->flags, pcmk_action_added_to_graph) && pcmk_is_set(action->flags, pcmk_action_added_to_graph) && (uint32_t) before->type != pcmk__ar_if_on_same_node_or_target) { optional = false; } if (all_actions || !optional) { before_name = create_action_name(before->action, verbose); after_name = create_action_name(action, verbose); fprintf(dot_strm, "\"%s\" -> \"%s\" [ style = %s]\n", before_name, after_name, style); free(before_name); free(after_name); } } } fprintf(dot_strm, "}\n"); fflush(dot_strm); fclose(dot_strm); return pcmk_rc_ok; } /*! * \brief Profile the configuration updates and scheduler actions in a single * CIB file, printing the profiling timings. * * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t * object before this function is called. * * \param[in] xml_file The CIB file to profile * \param[in] repeat Number of times to run * \param[in,out] scheduler Scheduler data * \param[in] use_date The date to set the cluster's time to (may be NULL) */ static void profile_file(const char *xml_file, long long repeat, pcmk_scheduler_t *scheduler, const char *use_date) { pcmk__output_t *out = scheduler->priv; xmlNode *cib_object = NULL; clock_t start = 0; clock_t end; unsigned long long scheduler_flags = pcmk_sched_no_compat; pcmk__assert(out != NULL); cib_object = pcmk__xml_read(xml_file); start = clock(); if (pcmk_find_cib_element(cib_object, PCMK_XE_STATUS) == NULL) { pcmk__xe_create(cib_object, PCMK_XE_STATUS); } if (pcmk__update_configured_schema(&cib_object, false) != pcmk_rc_ok) { free_xml(cib_object); return; } if (!pcmk__validate_xml(cib_object, NULL, NULL, NULL)) { free_xml(cib_object); return; } if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) { scheduler_flags |= pcmk_sched_output_scores; } if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) { scheduler_flags |= pcmk_sched_show_utilization; } for (int i = 0; i < repeat; ++i) { xmlNode *input = cib_object; if (repeat > 1) { input = pcmk__xml_copy(NULL, cib_object); } scheduler->input = input; set_effective_date(scheduler, false, use_date); pcmk__schedule_actions(input, scheduler_flags, scheduler); pe_reset_working_set(scheduler); } end = clock(); out->message(out, "profile", xml_file, start, end); } void pcmk__profile_dir(const char *dir, long long repeat, pcmk_scheduler_t *scheduler, const char *use_date) { pcmk__output_t *out = scheduler->priv; struct dirent **namelist; int file_num = scandir(dir, &namelist, 0, alphasort); pcmk__assert(out != NULL); if (file_num > 0) { struct stat prop; char buffer[FILENAME_MAX]; out->begin_list(out, NULL, NULL, "Timings"); while (file_num--) { if ('.' == namelist[file_num]->d_name[0]) { free(namelist[file_num]); continue; } else if (!pcmk__ends_with_ext(namelist[file_num]->d_name, ".xml")) { free(namelist[file_num]); continue; } snprintf(buffer, sizeof(buffer), "%s/%s", dir, namelist[file_num]->d_name); if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) { profile_file(buffer, repeat, scheduler, use_date); } free(namelist[file_num]); } free(namelist); out->end_list(out); } } /*! * \brief Set the date of the cluster, either to the value given by * \p use_date, or to the \c PCMK_XA_EXECUTION_DATE value in the CIB. * * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t * object before this function is called. * * \param[in,out] scheduler Scheduler data * \param[in] print_original If \p true, the \c PCMK_XA_EXECUTION_DATE * should also be printed * \param[in] use_date The date to set the cluster's time to * (may be NULL) */ static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original, const char *use_date) { pcmk__output_t *out = scheduler->priv; time_t original_date = 0; pcmk__assert(out != NULL); crm_element_value_epoch(scheduler->input, PCMK_XA_EXECUTION_DATE, &original_date); if (use_date) { scheduler->now = crm_time_new(use_date); out->info(out, "Setting effective cluster time: %s", use_date); crm_time_log(LOG_NOTICE, "Pretending 'now' is", scheduler->now, crm_time_log_date | crm_time_log_timeofday); } else if (original_date != 0) { scheduler->now = pcmk__copy_timet(original_date); if (print_original) { char *when = crm_time_as_string(scheduler->now, crm_time_log_date|crm_time_log_timeofday); out->info(out, "Using the original execution date of: %s", when); free(when); } } } /*! * \internal * \brief Simulate successfully executing a pseudo-action in a graph * * \param[in,out] graph Graph to update with pseudo-action result * \param[in,out] action Pseudo-action to simulate executing * * \return Standard Pacemaker return code */ static int simulate_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *node = crm_element_value(action->xml, PCMK__META_ON_NODE); const char *task = crm_element_value(action->xml, PCMK__XA_OPERATION_KEY); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); out->message(out, "inject-pseudo-action", node, task); pcmk__update_graph(graph, action); return pcmk_rc_ok; } /*! * \internal * \brief Simulate executing a resource action in a graph * * \param[in,out] graph Graph to update with resource action result * \param[in,out] action Resource action to simulate executing * * \return Standard Pacemaker return code */ static int simulate_resource_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { int rc; lrmd_event_data_t *op = NULL; int target_outcome = PCMK_OCF_OK; const char *rtype = NULL; const char *rclass = NULL; const char *resource = NULL; const char *rprovider = NULL; const char *resource_config_name = NULL; const char *operation = crm_element_value(action->xml, PCMK_XA_OPERATION); const char *target_rc_s = crm_meta_value(action->params, PCMK__META_OP_TARGET_RC); xmlNode *cib_node = NULL; xmlNode *cib_resource = NULL; xmlNode *action_rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL, NULL); char *node = crm_element_value_copy(action->xml, PCMK__META_ON_NODE); char *uuid = NULL; const char *router_node = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); // Certain actions don't need to be displayed or history entries if (pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_none)) { crm_debug("No history injection for %s op on %s", operation, node); goto done; // Confirm action and update graph } if (action_rsc == NULL) { // Shouldn't be possible crm_log_xml_err(action->xml, "Bad"); free(node); return EPROTO; } /* A resource might be known by different names in the configuration and in * the action (for example, a clone instance). Grab the configuration name * (which is preferred when writing history), and if necessary, the instance * name. */ resource_config_name = crm_element_value(action_rsc, PCMK_XA_ID); if (resource_config_name == NULL) { // Shouldn't be possible crm_log_xml_err(action->xml, "No ID"); free(node); return EPROTO; } resource = resource_config_name; if (pe_find_resource(fake_resource_list, resource) == NULL) { const char *longname = crm_element_value(action_rsc, PCMK__XA_LONG_ID); if ((longname != NULL) && (pe_find_resource(fake_resource_list, longname) != NULL)) { resource = longname; } } // Certain actions need to be displayed but don't need history entries if (pcmk__strcase_any_of(operation, PCMK_ACTION_DELETE, PCMK_ACTION_META_DATA, NULL)) { out->message(out, "inject-rsc-action", resource, operation, node, (guint) 0); goto done; // Confirm action and update graph } rclass = crm_element_value(action_rsc, PCMK_XA_CLASS); rtype = crm_element_value(action_rsc, PCMK_XA_TYPE); rprovider = crm_element_value(action_rsc, PCMK_XA_PROVIDER); pcmk__scan_min_int(target_rc_s, &target_outcome, 0); pcmk__assert(fake_cib->cmds->query(fake_cib, NULL, NULL, cib_sync_call|cib_scope_local) == pcmk_ok); // Ensure the action node is in the CIB uuid = crm_element_value_copy(action->xml, PCMK__META_ON_NODE_UUID); cib_node = pcmk__inject_node(fake_cib, node, ((router_node == NULL)? uuid: node)); free(uuid); pcmk__assert(cib_node != NULL); // Add a history entry for the action cib_resource = pcmk__inject_resource_history(out, cib_node, resource, resource_config_name, rclass, rtype, rprovider); if (cib_resource == NULL) { crm_err("Could not simulate action %d history for resource %s", action->id, resource); free(node); free_xml(cib_node); return EINVAL; } // Simulate and display an executor event for the action result op = pcmk__event_from_graph_action(cib_resource, action, PCMK_EXEC_DONE, target_outcome, "User-injected result"); out->message(out, "inject-rsc-action", resource, op->op_type, node, op->interval_ms); // Check whether action is in a list of desired simulated failures for (const GList *iter = fake_op_fail_list; iter != NULL; iter = iter->next) { const char *spec = (const char *) iter->data; char *key = NULL; const char *match_name = NULL; const char *offset = NULL; // Allow user to specify anonymous clone with or without instance number key = crm_strdup_printf(PCMK__OP_FMT "@%s=", resource, op->op_type, op->interval_ms, node); if (strncasecmp(key, spec, strlen(key)) == 0) { match_name = resource; } free(key); // If not found, try the resource's name in the configuration if ((match_name == NULL) && (strcmp(resource, resource_config_name) != 0)) { key = crm_strdup_printf(PCMK__OP_FMT "@%s=", resource_config_name, op->op_type, op->interval_ms, node); if (strncasecmp(key, spec, strlen(key)) == 0) { match_name = resource_config_name; } free(key); } if (match_name == NULL) { continue; // This failed action entry doesn't match } // ${match_name}_${task}_${interval_in_ms}@${node}=${rc} rc = sscanf(spec, "%*[^=]=%d", (int *) &op->rc); if (rc != 1) { out->err(out, "Invalid failed operation '%s' " "(result code must be integer)", spec); continue; // Keep checking other list entries } out->info(out, "Pretending action %d failed with rc=%d", action->id, op->rc); pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); graph->abort_priority = PCMK_SCORE_INFINITY; if (pcmk__str_eq(op->op_type, PCMK_ACTION_START, pcmk__str_none)) { offset = pcmk__s(graph->failed_start_offset, PCMK_VALUE_INFINITY); } else if (pcmk__str_eq(op->op_type, PCMK_ACTION_STOP, pcmk__str_none)) { offset = pcmk__s(graph->failed_stop_offset, PCMK_VALUE_INFINITY); } pcmk__inject_failcount(out, fake_cib, cib_node, match_name, op->op_type, op->interval_ms, op->rc, pcmk_str_is_infinity(offset)); break; } pcmk__inject_action_result(cib_resource, op, target_outcome); lrmd_free_event(op); rc = fake_cib->cmds->modify(fake_cib, PCMK_XE_STATUS, cib_node, cib_sync_call|cib_scope_local); pcmk__assert(rc == pcmk_ok); done: free(node); free_xml(cib_node); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); pcmk__update_graph(graph, action); return pcmk_rc_ok; } /*! * \internal * \brief Simulate successfully executing a cluster action * * \param[in,out] graph Graph to update with action result * \param[in,out] action Cluster action to simulate * * \return Standard Pacemaker return code */ static int simulate_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *node = crm_element_value(action->xml, PCMK__META_ON_NODE); const char *task = crm_element_value(action->xml, PCMK_XA_OPERATION); xmlNode *rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL, NULL); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); out->message(out, "inject-cluster-action", node, task, rsc); pcmk__update_graph(graph, action); return pcmk_rc_ok; } /*! * \internal * \brief Simulate successfully executing a fencing action * * \param[in,out] graph Graph to update with action result * \param[in,out] action Fencing action to simulate * * \return Standard Pacemaker return code */ static int simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *op = crm_meta_value(action->params, PCMK__META_STONITH_ACTION); char *target = crm_element_value_copy(action->xml, PCMK__META_ON_NODE); out->message(out, "inject-fencing-action", target, op); if (!pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) { int rc = pcmk_ok; GString *xpath = g_string_sized_new(512); // Set node state to offline xmlNode *cib_node = pcmk__inject_node_state_change(fake_cib, target, false); pcmk__assert(cib_node != NULL); crm_xml_add(cib_node, PCMK_XA_CRM_DEBUG_ORIGIN, __func__); rc = fake_cib->cmds->replace(fake_cib, PCMK_XE_STATUS, cib_node, cib_sync_call|cib_scope_local); pcmk__assert(rc == pcmk_ok); // Simulate controller clearing node's resource history and attributes pcmk__g_strcat(xpath, "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='", target, "']/" PCMK__XE_LRM, NULL); fake_cib->cmds->remove(fake_cib, (const char *) xpath->str, NULL, cib_xpath|cib_sync_call|cib_scope_local); g_string_truncate(xpath, 0); pcmk__g_strcat(xpath, "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='", target, "']" "/" PCMK__XE_TRANSIENT_ATTRIBUTES, NULL); fake_cib->cmds->remove(fake_cib, (const char *) xpath->str, NULL, cib_xpath|cib_sync_call|cib_scope_local); free_xml(cib_node); g_string_free(xpath, TRUE); } pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); pcmk__update_graph(graph, action); free(target); return pcmk_rc_ok; } enum pcmk__graph_status pcmk__simulate_transition(pcmk_scheduler_t *scheduler, cib_t *cib, const GList *op_fail_list) { pcmk__graph_t *transition = NULL; enum pcmk__graph_status graph_rc; pcmk__graph_functions_t simulation_fns = { simulate_pseudo_action, simulate_resource_action, simulate_cluster_action, simulate_fencing_action, }; out = scheduler->priv; fake_cib = cib; fake_op_fail_list = op_fail_list; if (!out->is_quiet(out)) { out->begin_list(out, NULL, NULL, "Executing Cluster Transition"); } pcmk__set_graph_functions(&simulation_fns); transition = pcmk__unpack_graph(scheduler->graph, crm_system_name); pcmk__log_graph(LOG_DEBUG, transition); fake_resource_list = scheduler->resources; do { graph_rc = pcmk__execute_graph(transition); } while (graph_rc == pcmk__graph_active); fake_resource_list = NULL; if (graph_rc != pcmk__graph_complete) { out->err(out, "Transition failed: %s", pcmk__graph_status2text(graph_rc)); pcmk__log_graph(LOG_ERR, transition); out->err(out, "An invalid transition was produced"); } pcmk__free_graph(transition); if (!out->is_quiet(out)) { // If not quiet, we'll need the resulting CIB for later display xmlNode *cib_object = NULL; int rc = fake_cib->cmds->query(fake_cib, NULL, &cib_object, cib_sync_call|cib_scope_local); pcmk__assert(rc == pcmk_ok); pe_reset_working_set(scheduler); scheduler->input = cib_object; out->end_list(out); } return graph_rc; } int pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out, const pcmk_injections_t *injections, unsigned int flags, uint32_t section_opts, const char *use_date, const char *input_file, const char *graph_file, const char *dot_file) { int printed = pcmk_rc_no_output; int rc = pcmk_rc_ok; xmlNodePtr input = NULL; cib_t *cib = NULL; rc = cib__signon_query(out, &cib, &input); if (rc != pcmk_rc_ok) { goto simulate_done; } reset(scheduler, input, out, use_date, flags); cluster_status(scheduler); if ((cib->variant == cib_native) && pcmk_is_set(section_opts, pcmk_section_times)) { if (pcmk__our_nodename == NULL) { // Currently used only in the times section pcmk__query_node_name(out, 0, &pcmk__our_nodename, 0); } scheduler->localhost = pcmk__our_nodename; } if (!out->is_quiet(out)) { const bool show_pending = pcmk_is_set(flags, pcmk_sim_show_pending); if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) { printed = out->message(out, "maint-mode", scheduler->flags); } if (scheduler->disabled_resources || scheduler->blocked_resources) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); printed = out->info(out, "%d of %d resource instances DISABLED and " "%d BLOCKED from further action due to failure", scheduler->disabled_resources, scheduler->ninstances, scheduler->blocked_resources); } /* Most formatted output headers use caps for each word, but this one * only has the first word capitalized for compatibility with pcs. */ print_cluster_status(scheduler, (show_pending? pcmk_show_pending : 0), section_opts, "Current cluster status", (printed == pcmk_rc_ok)); printed = pcmk_rc_ok; } // If the user requested any injections, handle them if ((injections->node_down != NULL) || (injections->node_fail != NULL) || (injections->node_up != NULL) || (injections->op_inject != NULL) || (injections->ticket_activate != NULL) || (injections->ticket_grant != NULL) || (injections->ticket_revoke != NULL) || (injections->ticket_standby != NULL) || (injections->watchdog != NULL)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); pcmk__inject_scheduler_input(scheduler, cib, injections); printed = pcmk_rc_ok; rc = cib->cmds->query(cib, NULL, &input, cib_sync_call); if (rc != pcmk_rc_ok) { rc = pcmk_legacy2rc(rc); goto simulate_done; } cleanup_calculations(scheduler); reset(scheduler, input, out, use_date, flags); cluster_status(scheduler); } if (input_file != NULL) { rc = pcmk__xml_write_file(input, input_file, false, NULL); if (rc != pcmk_rc_ok) { goto simulate_done; } } if (pcmk_any_flags_set(flags, pcmk_sim_process | pcmk_sim_simulate)) { pcmk__output_t *logger_out = NULL; unsigned long long scheduler_flags = pcmk_sched_no_compat; if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) { scheduler_flags |= pcmk_sched_output_scores; } if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) { scheduler_flags |= pcmk_sched_show_utilization; } if (pcmk_all_flags_set(scheduler->flags, pcmk_sched_output_scores |pcmk_sched_show_utilization)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Assignment Scores and Utilization Information"); printed = pcmk_rc_ok; } else if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Assignment Scores"); printed = pcmk_rc_ok; } else if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Utilization Information"); printed = pcmk_rc_ok; } else { rc = pcmk__log_output_new(&logger_out); if (rc != pcmk_rc_ok) { goto simulate_done; } pe__register_messages(logger_out); pcmk__register_lib_messages(logger_out); scheduler->priv = logger_out; } pcmk__schedule_actions(input, scheduler_flags, scheduler); if (logger_out == NULL) { out->end_list(out); } else { logger_out->finish(logger_out, CRM_EX_OK, true, NULL); pcmk__output_free(logger_out); scheduler->priv = out; } input = NULL; /* Don't try and free it twice */ if (graph_file != NULL) { rc = pcmk__xml_write_file(scheduler->graph, graph_file, false, NULL); if (rc != pcmk_rc_ok) { rc = pcmk_rc_graph_error; goto simulate_done; } } if (dot_file != NULL) { rc = write_sim_dotfile(scheduler, dot_file, pcmk_is_set(flags, pcmk_sim_all_actions), pcmk_is_set(flags, pcmk_sim_verbose)); if (rc != pcmk_rc_ok) { rc = pcmk_rc_dot_error; goto simulate_done; } } if (!out->is_quiet(out)) { print_transition_summary(scheduler, printed == pcmk_rc_ok); } } rc = pcmk_rc_ok; if (!pcmk_is_set(flags, pcmk_sim_simulate)) { goto simulate_done; } PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); if (pcmk__simulate_transition(scheduler, cib, injections->op_fail) != pcmk__graph_complete) { rc = pcmk_rc_invalid_transition; } if (out->is_quiet(out)) { goto simulate_done; } set_effective_date(scheduler, true, use_date); if (pcmk_is_set(flags, pcmk_sim_show_scores)) { pcmk__set_scheduler_flags(scheduler, pcmk_sched_output_scores); } if (pcmk_is_set(flags, pcmk_sim_show_utilization)) { pcmk__set_scheduler_flags(scheduler, pcmk_sched_show_utilization); } cluster_status(scheduler); print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status", true); simulate_done: cib__clean_up_connection(&cib); return rc; } int pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler, const pcmk_injections_t *injections, unsigned int flags, unsigned int section_opts, const char *use_date, const char *input_file, const char *graph_file, const char *dot_file) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; rc = pcmk__xml_output_new(&out, xml); if (rc != pcmk_rc_ok) { return rc; } pe__register_messages(out); pcmk__register_lib_messages(out); rc = pcmk__simulate(scheduler, out, injections, flags, section_opts, use_date, input_file, graph_file, dot_file); pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml); return rc; } diff --git a/lib/services/services_nagios.c b/lib/services/services_nagios.c index 69b1611139..5b4dbe7db3 100644 --- a/lib/services/services_nagios.c +++ b/lib/services/services_nagios.c @@ -1,220 +1,221 @@ /* - * Copyright 2010-2024 the Pacemaker project contributors + * Copyright 2010-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include "crm/crm.h" #include #include "crm/common/mainloop.h" #include "crm/services.h" #include "services_private.h" #include "services_nagios.h" /*! * \internal * \brief Prepare a Nagios action * * \param[in,out] op Action to prepare * * \return Standard Pacemaker return code */ int services__nagios_prepare(svc_action_t *op) { op->opaque->exec = pcmk__full_path(op->agent, NAGIOS_PLUGIN_DIR); op->opaque->args[0] = strdup(op->opaque->exec); if (op->opaque->args[0] == NULL) { return ENOMEM; } if (pcmk__str_eq(op->action, PCMK_ACTION_MONITOR, pcmk__str_casei) && (op->interval_ms == 0)) { // Invoke --version for a nagios probe op->opaque->args[1] = strdup("--version"); if (op->opaque->args[1] == NULL) { return ENOMEM; } } else if (op->params != NULL) { GHashTableIter iter; char *key = NULL; char *value = NULL; int index = 1; // 0 is already set to executable name g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { if (index > (PCMK__NELEM(op->opaque->args) - 2)) { return E2BIG; } if (pcmk__str_eq(key, PCMK_XA_CRM_FEATURE_SET, pcmk__str_casei) || strstr(key, CRM_META "_")) { continue; } op->opaque->args[index++] = crm_strdup_printf("--%s", key); op->opaque->args[index++] = strdup(value); if (op->opaque->args[index - 1] == NULL) { return ENOMEM; } } } // Nagios actions don't need to keep the parameters if (op->params != NULL) { g_hash_table_destroy(op->params); op->params = NULL; } return pcmk_rc_ok; } /*! * \internal * \brief Map a Nagios result to a standard OCF result * * \param[in] exit_status Nagios exit status * * \return Standard OCF result */ enum ocf_exitcode services__nagios2ocf(int exit_status) { switch (exit_status) { case NAGIOS_STATE_OK: return PCMK_OCF_OK; case NAGIOS_INSUFFICIENT_PRIV: return PCMK_OCF_INSUFFICIENT_PRIV; case NAGIOS_STATE_WARNING: return PCMK_OCF_DEGRADED; case NAGIOS_STATE_CRITICAL: case NAGIOS_STATE_UNKNOWN: default: return PCMK_OCF_UNKNOWN_ERROR; } } static inline char * nagios_metadata_name(const char *plugin) { return crm_strdup_printf(NAGIOS_METADATA_DIR "/%s.xml", plugin); } GList * services__list_nagios_agents(void) { GList *plugin_list = NULL; GList *result = NULL; plugin_list = services_os_get_directory_list(NAGIOS_PLUGIN_DIR, TRUE, TRUE); // Return only the plugins that have metadata for (GList *gIter = plugin_list; gIter != NULL; gIter = gIter->next) { struct stat st; const char *plugin = gIter->data; char *metadata = nagios_metadata_name(plugin); if (stat(metadata, &st) == 0) { result = g_list_append(result, strdup(plugin)); } free(metadata); } g_list_free_full(plugin_list, free); return result; } gboolean services__nagios_agent_exists(const char *name) { char *buf = NULL; gboolean rc = FALSE; struct stat st; if (name == NULL) { return rc; } buf = crm_strdup_printf(NAGIOS_PLUGIN_DIR "/%s", name); if (stat(buf, &st) == 0) { rc = TRUE; } free(buf); return rc; } int services__get_nagios_metadata(const char *type, char **output) { int rc = pcmk_ok; FILE *file_strm = NULL; int start = 0, length = 0, read_len = 0; char *metadata_file = nagios_metadata_name(type); file_strm = fopen(metadata_file, "r"); if (file_strm == NULL) { crm_err("Metadata file %s does not exist", metadata_file); free(metadata_file); return -EIO; } /* see how big the file is */ start = ftell(file_strm); fseek(file_strm, 0L, SEEK_END); - length = ftell(file_strm); - fseek(file_strm, 0L, start); + length = ftell(file_strm); pcmk__assert(length >= 0); + fseek(file_strm, 0L, SEEK_SET); + pcmk__assert(start == ftell(file_strm)); - if (length <= 0) { + if (length == 0) { crm_info("%s was not valid", metadata_file); free(*output); *output = NULL; rc = -EIO; } else { crm_trace("Reading %d bytes from file", length); *output = pcmk__assert_alloc(1, (length + 1)); read_len = fread(*output, 1, length, file_strm); if (read_len != length) { crm_err("Calculated and read bytes differ: %d vs. %d", length, read_len); free(*output); *output = NULL; rc = -EIO; } } fclose(file_strm); free(metadata_file); return rc; } diff --git a/tools/cibadmin.c b/tools/cibadmin.c index 74a142c9f3..c6004ccdce 100644 --- a/tools/cibadmin.c +++ b/tools/cibadmin.c @@ -1,997 +1,1003 @@ /* - * Copyright 2004-2024 the Pacemaker project contributors + * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #define SUMMARY "query and edit the Pacemaker configuration" #define INDENT " " enum cibadmin_section_type { cibadmin_section_all = 0, cibadmin_section_scope, cibadmin_section_xpath, }; static int request_id = 0; static cib_t *the_cib = NULL; static GMainLoop *mainloop = NULL; static crm_exit_t exit_code = CRM_EX_OK; static struct { const char *cib_action; int cmd_options; enum cibadmin_section_type section_type; char *cib_section; char *validate_with; gint message_timeout_sec; enum pcmk__acl_render_how acl_render_mode; gchar *cib_user; gchar *dest_node; gchar *input_file; gchar *input_xml; gboolean input_stdin; bool delete_all; gboolean allow_create; gboolean force; gboolean get_node_path; gboolean local; gboolean no_children; gboolean score_update; gboolean sync_call; /* @COMPAT: For "-!" version option. Not advertised nor marked as * deprecated, but accepted. */ gboolean extended_version; //! \deprecated gboolean no_bcast; } options; int do_init(void); static int do_work(xmlNode *input, xmlNode **output); void cibadmin_op_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); static void print_xml_output(xmlNode * xml) { if (!xml) { return; } else if (xml->type != XML_ELEMENT_NODE) { return; } if (pcmk_is_set(options.cmd_options, cib_xpath_address)) { const char *id = crm_element_value(xml, PCMK_XA_ID); if (pcmk__xe_is(xml, PCMK__XE_XPATH_QUERY)) { xmlNode *child = NULL; for (child = xml->children; child; child = child->next) { print_xml_output(child); } } else if (id) { printf("%s\n", id); } } else { GString *buf = g_string_sized_new(1024); pcmk__xml_string(xml, pcmk__xml_fmt_pretty, buf, 0); fprintf(stdout, "%s", buf->str); g_string_free(buf, TRUE); } } // Upgrade requested but already at latest schema static void report_schema_unchanged(void) { const char *err = pcmk_rc_str(pcmk_rc_schema_unchanged); crm_info("Upgrade unnecessary: %s\n", err); printf("Upgrade unnecessary: %s\n", err); exit_code = CRM_EX_OK; } /*! * \internal * \brief Check whether the current CIB action is dangerous * \return true if \p options.cib_action is dangerous, or false otherwise */ static inline bool cib_action_is_dangerous(void) { return options.no_bcast || options.delete_all || pcmk__str_any_of(options.cib_action, PCMK__CIB_REQUEST_UPGRADE, PCMK__CIB_REQUEST_ERASE, NULL); } /*! * \internal * \brief Determine whether the given CIB scope is valid for \p cibadmin * * \param[in] scope Scope to validate * * \return true if \p scope is valid, or false otherwise * \note An invalid scope applies the operation to the entire CIB. */ static inline bool scope_is_valid(const char *scope) { return pcmk__str_any_of(scope, PCMK_XE_CONFIGURATION, PCMK_XE_NODES, PCMK_XE_RESOURCES, PCMK_XE_CONSTRAINTS, PCMK_XE_CRM_CONFIG, PCMK_XE_RSC_DEFAULTS, PCMK_XE_OP_DEFAULTS, PCMK_XE_ACLS, PCMK_XE_FENCING_TOPOLOGY, PCMK_XE_TAGS, PCMK_XE_ALERTS, PCMK_XE_STATUS, NULL); } static gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.delete_all = false; if (pcmk__str_any_of(option_name, "-u", "--upgrade", NULL)) { options.cib_action = PCMK__CIB_REQUEST_UPGRADE; } else if (pcmk__str_any_of(option_name, "-Q", "--query", NULL)) { options.cib_action = PCMK__CIB_REQUEST_QUERY; } else if (pcmk__str_any_of(option_name, "-E", "--erase", NULL)) { options.cib_action = PCMK__CIB_REQUEST_ERASE; } else if (pcmk__str_any_of(option_name, "-B", "--bump", NULL)) { options.cib_action = PCMK__CIB_REQUEST_BUMP; } else if (pcmk__str_any_of(option_name, "-C", "--create", NULL)) { options.cib_action = PCMK__CIB_REQUEST_CREATE; } else if (pcmk__str_any_of(option_name, "-M", "--modify", NULL)) { options.cib_action = PCMK__CIB_REQUEST_MODIFY; } else if (pcmk__str_any_of(option_name, "-P", "--patch", NULL)) { options.cib_action = PCMK__CIB_REQUEST_APPLY_PATCH; } else if (pcmk__str_any_of(option_name, "-R", "--replace", NULL)) { options.cib_action = PCMK__CIB_REQUEST_REPLACE; } else if (pcmk__str_any_of(option_name, "-D", "--delete", NULL)) { options.cib_action = PCMK__CIB_REQUEST_DELETE; } else if (pcmk__str_any_of(option_name, "-d", "--delete-all", NULL)) { options.cib_action = PCMK__CIB_REQUEST_DELETE; options.delete_all = true; } else if (pcmk__str_any_of(option_name, "-a", "--empty", NULL)) { options.cib_action = "empty"; pcmk__str_update(&options.validate_with, optarg); } else if (pcmk__str_any_of(option_name, "-5", "--md5-sum", NULL)) { options.cib_action = "md5-sum"; } else if (pcmk__str_any_of(option_name, "-6", "--md5-sum-versioned", NULL)) { options.cib_action = "md5-sum-versioned"; } else { // Should be impossible return FALSE; } return TRUE; } static gboolean show_access_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_eq(optarg, "auto", pcmk__str_null_matches)) { options.acl_render_mode = pcmk__acl_render_default; } else if (g_strcmp0(optarg, "namespace") == 0) { options.acl_render_mode = pcmk__acl_render_namespace; } else if (g_strcmp0(optarg, "text") == 0) { options.acl_render_mode = pcmk__acl_render_text; } else if (g_strcmp0(optarg, "color") == 0) { options.acl_render_mode = pcmk__acl_render_color; } else { g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Invalid value '%s' for option '%s'", optarg, option_name); return FALSE; } return TRUE; } static gboolean section_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-o", "--scope", NULL)) { options.section_type = cibadmin_section_scope; } else if (pcmk__str_any_of(option_name, "-A", "--xpath", NULL)) { options.section_type = cibadmin_section_xpath; } else { // Should be impossible return FALSE; } pcmk__str_update(&options.cib_section, optarg); return TRUE; } static GOptionEntry command_entries[] = { { "upgrade", 'u', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Upgrade the configuration to the latest syntax", NULL }, { "query", 'Q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Query the contents of the CIB", NULL }, { "erase", 'E', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Erase the contents of the whole CIB", NULL }, { "bump", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Increase the CIB's epoch value by 1", NULL }, { "create", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Create an object in the CIB (will fail if object already exists)", NULL }, { "modify", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Find object somewhere in CIB's XML tree and update it (fails if object " "does not exist unless -c is also specified)", NULL }, { "patch", 'P', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Supply an update in the form of an XML diff (see crm_diff(8))", NULL }, { "replace", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Recursively replace an object in the CIB", NULL }, { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Delete first object matching supplied criteria (for example, " "<" PCMK_XE_OP " " PCMK_XA_ID "=\"rsc1_op1\" " PCMK_XA_NAME "=\"monitor\"/>).\n" INDENT "The XML element name and all attributes must match in order for " "the element to be deleted.", NULL }, { "delete-all", 'd', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "When used with --xpath, remove all matching objects in the " "configuration instead of just the first one", NULL }, { "empty", 'a', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Output an empty CIB. Accepts an optional schema name argument to use as " "the " PCMK_XA_VALIDATE_WITH " value.\n" INDENT "If no schema is given, the latest will be used.", "[schema]" }, { "md5-sum", '5', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Calculate the on-disk CIB digest", NULL }, { "md5-sum-versioned", '6', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Calculate an on-the-wire versioned CIB digest", NULL }, { NULL } }; static GOptionEntry data_entries[] = { /* @COMPAT: These arguments should be last-wins. We can have an enum option * that stores the input type, along with a single string option that stores * the XML string for --xml-text, filename for --xml-file, or NULL for * --xml-pipe. */ { "xml-text", 'X', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.input_xml, "Retrieve XML from the supplied string", "value" }, { "xml-file", 'x', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME, &options.input_file, "Retrieve XML from the named file", "value" }, { "xml-pipe", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.input_stdin, "Retrieve XML from stdin", NULL }, { NULL } }; static GOptionEntry addl_entries[] = { { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force, "Force the action to be performed", NULL }, { "timeout", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_INT, &options.message_timeout_sec, "Time (in seconds) to wait before declaring the operation failed", "value" }, { "user", 'U', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.cib_user, "Run the command with permissions of the named user (valid only for the " "root and " CRM_DAEMON_USER " accounts)", "value" }, { "sync-call", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.sync_call, "Wait for call to complete before returning", NULL }, { "local", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.local, "Command takes effect locally (should be used only for queries)", NULL }, { "scope", 'o', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, section_cb, "Limit scope of operation to specific section of CIB\n" INDENT "Valid values: " PCMK_XE_CONFIGURATION ", " PCMK_XE_NODES ", " PCMK_XE_RESOURCES ", " PCMK_XE_CONSTRAINTS ", " PCMK_XE_CRM_CONFIG ", " PCMK_XE_RSC_DEFAULTS ",\n" INDENT " " PCMK_XE_OP_DEFAULTS ", " PCMK_XE_ACLS ", " PCMK_XE_FENCING_TOPOLOGY ", " PCMK_XE_TAGS ", " PCMK_XE_ALERTS ", " PCMK_XE_STATUS "\n" INDENT "If both --scope/-o and --xpath/-a are specified, the last one to " "appear takes effect", "value" }, { "xpath", 'A', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, section_cb, "A valid XPath to use instead of --scope/-o\n" INDENT "If both --scope/-o and --xpath/-a are specified, the last one to " "appear takes effect", "value" }, { "node-path", 'e', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.get_node_path, "When performing XPath queries, return paths of any matches found\n" INDENT "(for example, " "\"/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES "/" PCMK_XE_CLONE "[@" PCMK_XA_ID "='dummy-clone']" "/" PCMK_XE_PRIMITIVE "[@" PCMK_XA_ID "='dummy']\")", NULL }, { "show-access", 'S', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, show_access_cb, "Whether to use syntax highlighting for ACLs (with -Q/--query and " "-U/--user)\n" INDENT "Allowed values: 'color' (default for terminal), 'text' (plain text, " "default for non-terminal),\n" INDENT " 'namespace', or 'auto' (use default value)\n" INDENT "Default value: 'auto'", "[value]" }, { "score", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.score_update, "Treat new attribute values as atomic score updates where possible " "(with --modify/-M).\n" INDENT "This currently happens by default and cannot be disabled, but\n" INDENT "this default behavior is deprecated and will be removed in a\n" INDENT "future release. Set this flag if this behavior is desired.\n" INDENT "This option takes effect when updating XML attributes. For an\n" INDENT "attribute named \"name\", if the new value is \"name++\" or\n" INDENT "\"name+=X\" for some score X, the new value is set as follows:\n" INDENT "If attribute \"name\" is not already set to some value in\n" INDENT "the element being updated, the new value is set as a literal\n" INDENT "string.\n" INDENT "If the new value is \"name++\", then the attribute is set to \n" INDENT "its existing value (parsed as a score) plus 1.\n" INDENT "If the new value is \"name+=X\" for some score X, then the\n" INDENT "attribute is set to its existing value plus X, where the\n" INDENT "existing value and X are parsed and added as scores.\n" INDENT "Scores are integer values capped at INFINITY and -INFINITY.\n" INDENT "Refer to Pacemaker Explained and to the char2score() function\n" INDENT "for more details on scores, including how they're parsed and\n" INDENT "added.", NULL }, { "allow-create", 'c', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.allow_create, "(Advanced) Allow target of --modify/-M to be created if it does not " "exist", NULL }, { "no-children", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.no_children, "(Advanced) When querying an object, do not include its children in the " "result", NULL }, { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.dest_node, "(Advanced) Send command to the specified host", "value" }, // @COMPAT: Deprecated { "no-bcast", 'b', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.no_bcast, "deprecated", NULL }, // @COMPAT: Deprecated { "host", 'h', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.dest_node, "deprecated", NULL }, { NULL } }; static GOptionContext * build_arg_context(pcmk__common_args_t *args) { const char *desc = NULL; GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { // @COMPAT: Deprecated { "extended-version", '!', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.extended_version, "deprecated", NULL }, { NULL } }; desc = "Examples:\n\n" "Query the configuration from the local node:\n\n" "\t# cibadmin --query --local\n\n" "Query just the cluster options configuration:\n\n" "\t# cibadmin --query --scope " PCMK_XE_CRM_CONFIG "\n\n" "Query all '" PCMK_META_TARGET_ROLE "' settings:\n\n" "\t# cibadmin --query --xpath " "\"//" PCMK_XE_NVPAIR "[@" PCMK_XA_NAME "='" PCMK_META_TARGET_ROLE"']\"\n\n" "Remove all '" PCMK_META_IS_MANAGED "' settings:\n\n" "\t# cibadmin --delete-all --xpath " "\"//" PCMK_XE_NVPAIR "[@" PCMK_XA_NAME "='" PCMK_META_IS_MANAGED "']\"\n\n" "Remove the resource named 'old':\n\n" "\t# cibadmin --delete --xml-text " "'<" PCMK_XE_PRIMITIVE " " PCMK_XA_ID "=\"old\"/>'\n\n" "Remove all resources from the configuration:\n\n" "\t# cibadmin --replace --scope " PCMK_XE_RESOURCES " --xml-text '<" PCMK_XE_RESOURCES "/>'\n\n" "Replace complete configuration with contents of " "$HOME/pacemaker.xml:\n\n" "\t# cibadmin --replace --xml-file $HOME/pacemaker.xml\n\n" "Replace " PCMK_XE_CONSTRAINTS " section of configuration with " "contents of $HOME/constraints.xml:\n\n" "\t# cibadmin --replace --scope " PCMK_XE_CONSTRAINTS " --xml-file $HOME/constraints.xml\n\n" "Increase configuration version to prevent old configurations from " "being loaded accidentally:\n\n" "\t# cibadmin --modify --score --xml-text " "'<" PCMK_XE_CIB " " PCMK_XA_ADMIN_EPOCH "=\"" PCMK_XA_ADMIN_EPOCH "++\"/>'\n\n" "Edit the configuration with your favorite $EDITOR:\n\n" "\t# cibadmin --query > $HOME/local.xml\n\n" "\t# $EDITOR $HOME/local.xml\n\n" "\t# cibadmin --replace --xml-file $HOME/local.xml\n\n" "Assuming terminal, render configuration in color (green for " "writable, blue for readable, red for\n" "denied) to visualize permissions for user tony:\n\n" "\t# cibadmin --show-access=color --query --user tony | less -r\n\n" "SEE ALSO:\n" " crm(8), pcs(8), crm_shadow(8), crm_diff(8)\n"; context = pcmk__build_arg_context(args, NULL, NULL, ""); g_option_context_set_description(context, desc); pcmk__add_main_args(context, extra_prog_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command help", command_entries); pcmk__add_arg_group(context, "data", "Data:", "Show data help", data_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { int rc = pcmk_rc_ok; const char *source = NULL; xmlNode *output = NULL; xmlNode *input = NULL; gchar *acl_cred = NULL; GError *error = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, "ANSUXhotx"); GOptionContext *context = build_arg_context(args); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } if (g_strv_length(processed_args) > 1) { gchar *help = g_option_context_get_help(context, TRUE, NULL); GString *extra = g_string_sized_new(128); for (int lpc = 1; processed_args[lpc] != NULL; lpc++) { if (extra->len > 0) { g_string_append_c(extra, ' '); } g_string_append(extra, processed_args[lpc]); } exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "non-option ARGV-elements: %s\n\n%s", extra->str, help); g_free(help); g_string_free(extra, TRUE); goto done; } if (args->version || options.extended_version) { g_strfreev(processed_args); pcmk__free_arg_context(context); /* FIXME: When cibadmin is converted to use formatted output, this can * be replaced by out->version with the appropriate boolean flag. * * options.extended_version is deprecated and will be removed in a * future release. */ pcmk__cli_help(options.extended_version? '!' : 'v'); } /* At LOG_ERR, stderr for CIB calls is rather verbose. Several lines like * * (func@file:line) error: CIB failures * * In cibadmin we explicitly output the XML portion without the prefixes. So * we default to LOG_CRIT. */ pcmk__cli_init_logging("cibadmin", 0); set_crm_log_level(LOG_CRIT); if (args->verbosity > 0) { cib__set_call_options(options.cmd_options, crm_system_name, cib_verbose); for (int i = 0; i < args->verbosity; i++) { crm_bump_log_level(argc, argv); } } if (options.cib_action == NULL) { // @COMPAT: Create a default command if other tools have one gchar *help = g_option_context_get_help(context, TRUE, NULL); exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must specify a command option\n\n%s", help); g_free(help); goto done; } if (strcmp(options.cib_action, "empty") == 0) { // Output an empty CIB GString *buf = g_string_sized_new(1024); output = createEmptyCib(1); crm_xml_add(output, PCMK_XA_VALIDATE_WITH, options.validate_with); pcmk__xml_string(output, pcmk__xml_fmt_pretty, buf, 0); fprintf(stdout, "%s", buf->str); g_string_free(buf, TRUE); goto done; } if (cib_action_is_dangerous() && !options.force) { exit_code = CRM_EX_UNSAFE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "The supplied command is considered dangerous. To prevent " "accidental destruction of the cluster, the --force flag " "is required in order to proceed."); goto done; } if (options.message_timeout_sec < 1) { // Set default timeout options.message_timeout_sec = 30; } if (options.section_type == cibadmin_section_xpath) { // Enable getting section by XPath cib__set_call_options(options.cmd_options, crm_system_name, cib_xpath); } else if (options.section_type == cibadmin_section_scope) { if (!scope_is_valid(options.cib_section)) { // @COMPAT: Consider requiring --force to proceed fprintf(stderr, "Invalid value '%s' for '--scope'. Operation will apply " "to the entire CIB.\n", options.cib_section); } } if (options.allow_create) { // Allow target of --modify/-M to be created if it does not exist cib__set_call_options(options.cmd_options, crm_system_name, cib_can_create); } if (options.delete_all) { // With cibadmin_section_xpath, remove all matching objects cib__set_call_options(options.cmd_options, crm_system_name, cib_multiple); } if (options.get_node_path) { /* Enable getting node path of XPath query matches. * Meaningful only if options.section_type == cibadmin_section_xpath. */ cib__set_call_options(options.cmd_options, crm_system_name, cib_xpath_address); } if (options.local) { // Configure command to take effect only locally cib__set_call_options(options.cmd_options, crm_system_name, cib_scope_local); } // @COMPAT: Deprecated option if (options.no_bcast) { // Configure command to take effect only locally and not to broadcast cib__set_call_options(options.cmd_options, crm_system_name, cib_inhibit_bcast|cib_scope_local); } if (options.no_children) { // When querying an object, don't include its children in the result cib__set_call_options(options.cmd_options, crm_system_name, cib_no_children); } if (options.sync_call || (options.acl_render_mode != pcmk__acl_render_none)) { /* Wait for call to complete before returning. * * The ACL render modes work only with sync calls due to differences in * output handling between sync/async. It shouldn't matter to the user * whether the call is synchronous; for a CIB query, we have to wait for * the result in order to display it in any case. */ cib__set_call_options(options.cmd_options, crm_system_name, cib_sync_call); } if (options.input_file != NULL) { input = pcmk__xml_read(options.input_file); source = options.input_file; } else if (options.input_xml != NULL) { input = pcmk__xml_parse(options.input_xml); source = "input string"; } else if (options.input_stdin) { input = pcmk__xml_read(NULL); source = "STDIN"; } else if (options.acl_render_mode != pcmk__acl_render_none) { char *username = pcmk__uid2username(geteuid()); bool required = pcmk_acl_required(username); free(username); if (required) { if (options.force) { fprintf(stderr, "The supplied command can provide skewed" " result since it is run under user that also" " gets guarded per ACLs on their own right." " Continuing since --force flag was" " provided.\n"); } else { exit_code = CRM_EX_UNSAFE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "The supplied command can provide skewed result " "since it is run under user that also gets guarded " "per ACLs in their own right. To accept the risk " "of such a possible distortion (without even " "knowing it at this time), use the --force flag."); goto done; } } if (options.cib_user == NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "The supplied command requires -U user specified."); goto done; } /* We already stopped/warned ACL-controlled users about consequences. * * Note: acl_cred takes ownership of options.cib_user here. * options.cib_user is set to NULL so that the CIB is obtained as the * user running the cibadmin command. The CIB must be obtained as a user * with full permissions in order to show the CIB correctly annotated * for the options.cib_user's permissions. */ acl_cred = options.cib_user; options.cib_user = NULL; } if (input != NULL) { crm_log_xml_debug(input, "[admin input]"); } else if (source != NULL) { exit_code = CRM_EX_CONFIG; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Couldn't parse input from %s.", source); goto done; } if (pcmk__str_eq(options.cib_action, "md5-sum", pcmk__str_casei)) { char *digest = NULL; if (input == NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Please supply XML to process with -X, -x, or -p"); goto done; } digest = calculate_on_disk_digest(input); fprintf(stderr, "Digest: "); fprintf(stdout, "%s\n", pcmk__s(digest, "")); free(digest); goto done; } else if (strcmp(options.cib_action, "md5-sum-versioned") == 0) { char *digest = NULL; const char *version = NULL; if (input == NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Please supply XML to process with -X, -x, or -p"); goto done; } version = crm_element_value(input, PCMK_XA_CRM_FEATURE_SET); digest = calculate_xml_versioned_digest(input, FALSE, TRUE, version); fprintf(stderr, "Versioned (%s) digest: ", version); fprintf(stdout, "%s\n", pcmk__s(digest, "")); free(digest); goto done; } else if (pcmk__str_eq(options.cib_action, PCMK__CIB_REQUEST_MODIFY, pcmk__str_none)) { /* @COMPAT When we drop default support for expansion in cibadmin, guard * with `if (options.score_update)` */ cib__set_call_options(options.cmd_options, crm_system_name, cib_score_update); } rc = do_init(); if (rc != pcmk_ok) { rc = pcmk_legacy2rc(rc); exit_code = pcmk_rc2exitc(rc); crm_err("Init failed, could not perform requested operations: %s", pcmk_rc_str(rc)); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Init failed, could not perform requested operations: %s", pcmk_rc_str(rc)); goto done; } rc = do_work(input, &output); if (!pcmk_is_set(options.cmd_options, cib_sync_call) && (the_cib->variant != cib_file) && (rc >= 0)) { /* For async call, positive rc is the call ID (file always synchronous). * * Wait for the reply by creating a mainloop and running it until the * callbacks are invoked. */ request_id = rc; the_cib->cmds->register_callback(the_cib, request_id, options.message_timeout_sec, FALSE, NULL, "cibadmin_op_callback", cibadmin_op_callback); mainloop = g_main_loop_new(NULL, FALSE); crm_trace("%s waiting for reply from the local CIB", crm_system_name); crm_info("Starting mainloop"); g_main_loop_run(mainloop); } else { rc = pcmk_legacy2rc(rc); if ((rc == pcmk_rc_schema_unchanged) && (strcmp(options.cib_action, PCMK__CIB_REQUEST_UPGRADE) == 0)) { report_schema_unchanged(); } else if (rc != pcmk_rc_ok) { crm_err("Call failed: %s", pcmk_rc_str(rc)); fprintf(stderr, "Call failed: %s\n", pcmk_rc_str(rc)); exit_code = pcmk_rc2exitc(rc); if (rc == pcmk_rc_schema_validation) { if (strcmp(options.cib_action, PCMK__CIB_REQUEST_UPGRADE) == 0) { xmlNode *obj = NULL; if (the_cib->cmds->query(the_cib, NULL, &obj, options.cmd_options) == pcmk_ok) { pcmk__update_schema(&obj, NULL, true, false); } free_xml(obj); } else if (output != NULL) { // Show validation errors to stderr pcmk__validate_xml(output, NULL, NULL, NULL); } } } } if ((output != NULL) && (options.acl_render_mode != pcmk__acl_render_none)) { xmlDoc *acl_evaled_doc; rc = pcmk__acl_annotate_permissions(acl_cred, output->doc, &acl_evaled_doc); if (rc == pcmk_rc_ok) { xmlChar *rendered = NULL; rc = pcmk__acl_evaled_render(acl_evaled_doc, options.acl_render_mode, &rendered); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_CONFIG; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not render evaluated access: %s", pcmk_rc_str(rc)); goto done; } printf("%s\n", (char *) rendered); free(rendered); } else { exit_code = CRM_EX_CONFIG; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not evaluate access per request (%s, error: %s)", acl_cred, pcmk_rc_str(rc)); goto done; } } else if (output != NULL) { print_xml_output(output); } crm_trace("%s exiting normally", crm_system_name); done: g_strfreev(processed_args); pcmk__free_arg_context(context); g_free(options.cib_user); g_free(options.dest_node); g_free(options.input_file); g_free(options.input_xml); free(options.cib_section); free(options.validate_with); g_free(acl_cred); free_xml(input); free_xml(output); rc = cib__clean_up_connection(&the_cib); if (exit_code == CRM_EX_OK) { exit_code = pcmk_rc2exitc(rc); } pcmk__output_and_clear_error(&error, NULL); crm_exit(exit_code); } static int do_work(xmlNode *input, xmlNode **output) { /* construct the request */ the_cib->call_timeout = options.message_timeout_sec; if ((strcmp(options.cib_action, PCMK__CIB_REQUEST_REPLACE) == 0) && pcmk__xe_is(input, PCMK_XE_CIB)) { xmlNode *status = pcmk_find_cib_element(input, PCMK_XE_STATUS); if (status == NULL) { pcmk__xe_create(input, PCMK_XE_STATUS); } } crm_trace("Passing \"%s\" to variant_op...", options.cib_action); return cib_internal_op(the_cib, options.cib_action, options.dest_node, options.cib_section, input, output, options.cmd_options, options.cib_user); } int do_init(void) { int rc = pcmk_ok; the_cib = cib_new(); - rc = cib__signon_attempts(the_cib, crm_system_name, cib_command, 5); + + if (the_cib == NULL) { + rc = ENOTCONN; + } else { + rc = cib__signon_attempts(the_cib, crm_system_name, cib_command, 5); + } + if (rc != pcmk_ok) { crm_err("Could not connect to the CIB: %s", pcmk_strerror(rc)); fprintf(stderr, "Could not connect to the CIB: %s\n", pcmk_strerror(rc)); } return rc; } void cibadmin_op_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { rc = pcmk_legacy2rc(rc); exit_code = pcmk_rc2exitc(rc); if (rc == pcmk_rc_schema_unchanged) { report_schema_unchanged(); } else if (rc != pcmk_rc_ok) { crm_warn("Call %s failed: %s " CRM_XS " rc=%d", options.cib_action, pcmk_rc_str(rc), rc); fprintf(stderr, "Call %s failed: %s\n", options.cib_action, pcmk_rc_str(rc)); print_xml_output(output); } else if ((strcmp(options.cib_action, PCMK__CIB_REQUEST_QUERY) == 0) && (output == NULL)) { crm_err("Query returned no output"); crm_log_xml_err(msg, "no output"); } else if (output == NULL) { crm_info("Call passed"); } else { crm_info("Call passed"); print_xml_output(output); } if (call_id == request_id) { g_main_loop_quit(mainloop); } else { crm_info("Message was not the response we were looking for (%d vs. %d)", call_id, request_id); } }