diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c index 500f64e575..866d61c7f0 100644 --- a/daemons/controld/controld_fencing.c +++ b/daemons/controld/controld_fencing.c @@ -1,1115 +1,1115 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include static void tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event); /* * stonith failure counting * * We don't want to get stuck in a permanent fencing loop. Keep track of the * number of fencing failures for each target node, and the most we'll restart a * transition for. */ struct st_fail_rec { int count; }; static bool fence_reaction_panic = false; static unsigned long int stonith_max_attempts = 10; static GHashTable *stonith_failures = NULL; /*! * \internal * \brief Update max fencing attempts before giving up * * \param[in] value New max fencing attempts */ static void update_stonith_max_attempts(const char *value) { stonith_max_attempts = char2score(value); if (stonith_max_attempts < 1UL) { stonith_max_attempts = 10UL; } } /*! * \internal * \brief Configure reaction to notification of local node being fenced * * \param[in] reaction_s Reaction type */ static void set_fence_reaction(const char *reaction_s) { if (pcmk__str_eq(reaction_s, "panic", pcmk__str_casei)) { fence_reaction_panic = true; } else { - if (!pcmk__str_eq(reaction_s, "stop", pcmk__str_casei)) { + if (!pcmk__str_eq(reaction_s, PCMK_VALUE_STOP, pcmk__str_casei)) { crm_warn("Invalid value '%s' for %s, using 'stop'", reaction_s, PCMK_OPT_FENCE_REACTION); } fence_reaction_panic = false; } } /*! * \internal * \brief Configure fencing options based on the CIB * * \param[in,out] options Name/value pairs for configured options */ void controld_configure_fencing(GHashTable *options) { const char *value = NULL; value = g_hash_table_lookup(options, PCMK_OPT_FENCE_REACTION); set_fence_reaction(value); value = g_hash_table_lookup(options, PCMK_OPT_STONITH_MAX_ATTEMPTS); update_stonith_max_attempts(value); } static gboolean too_many_st_failures(const char *target) { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *value = NULL; if (stonith_failures == NULL) { return FALSE; } if (target == NULL) { g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { if (value->count >= stonith_max_attempts) { target = (const char*)key; goto too_many; } } } else { value = g_hash_table_lookup(stonith_failures, target); if ((value != NULL) && (value->count >= stonith_max_attempts)) { goto too_many; } } return FALSE; too_many: crm_warn("Too many failures (%d) to fence %s, giving up", value->count, target); return TRUE; } /*! * \internal * \brief Reset a stonith fail count * * \param[in] target Name of node to reset, or NULL for all */ void st_fail_count_reset(const char *target) { if (stonith_failures == NULL) { return; } if (target) { struct st_fail_rec *rec = NULL; rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count = 0; } } else { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *rec = NULL; g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rec)) { rec->count = 0; } } } static void st_fail_count_increment(const char *target) { struct st_fail_rec *rec = NULL; if (stonith_failures == NULL) { stonith_failures = pcmk__strkey_table(free, free); } rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count++; } else { rec = malloc(sizeof(struct st_fail_rec)); if(rec == NULL) { return; } rec->count = 1; g_hash_table_insert(stonith_failures, strdup(target), rec); } } /* end stonith fail count functions */ static void cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if (rc < pcmk_ok) { crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char *)user_data, pcmk_strerror(rc), rc); crm_log_xml_warn(msg, "Failed update"); abort_transition(INFINITY, pcmk__graph_shutdown, "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); } } static void send_stonith_update(pcmk__graph_action_t *action, const char *target, const char *uuid) { int rc = pcmk_ok; crm_node_t *peer = NULL; /* We (usually) rely on the membership layer to do node_update_cluster, * and the peer status callback to do node_update_peer, because the node * might have already rejoined before we get the stonith result here. */ int flags = node_update_join | node_update_expected; /* zero out the node-status & remove all LRM status info */ xmlNode *node_state = NULL; CRM_CHECK(target != NULL, return); CRM_CHECK(uuid != NULL, return); /* Make sure the membership and join caches are accurate. * Try getting any existing node cache entry also by node uuid in case it * doesn't have an uname yet. */ peer = pcmk__get_node(0, target, uuid, pcmk__node_search_any); CRM_CHECK(peer != NULL, return); if (peer->state == NULL) { /* Usually, we rely on the membership layer to update the cluster state * in the CIB. However, if the node has never been seen, do it here, so * the node is not considered unclean. */ flags |= node_update_cluster; } if (peer->uuid == NULL) { crm_info("Recording uuid '%s' for node '%s'", uuid, target); peer->uuid = strdup(uuid); } crmd_peer_down(peer, TRUE); /* Generate a node state update for the CIB */ node_state = create_node_state_update(peer, flags, NULL, __func__); /* we have to mark whether or not remote nodes have already been fenced */ if (peer->flags & crm_remote_node) { char *now_s = pcmk__ttoa(time(NULL)); crm_xml_add(node_state, PCMK__XA_NODE_FENCED, now_s); free(now_s); } /* Force our known ID */ crm_xml_add(node_state, PCMK_XA_ID, uuid); rc = controld_globals.cib_conn->cmds->modify(controld_globals.cib_conn, PCMK_XE_STATUS, node_state, cib_scope_local |cib_can_create); /* Delay processing the trigger until the update completes */ crm_debug("Sending fencing update %d for %s", rc, target); fsa_register_cib_callback(rc, strdup(target), cib_fencing_updated); // Make sure it sticks /* controld_globals.cib_conn->cmds->bump_epoch(controld_globals.cib_conn, * cib_scope_local); */ controld_delete_node_state(peer->uname, controld_section_all, cib_scope_local); free_xml(node_state); return; } /*! * \internal * \brief Abort transition due to stonith failure * * \param[in] abort_action Whether to restart or stop transition * \param[in] target Don't restart if this (NULL for any) has too many failures * \param[in] reason Log this stonith action XML as abort reason (or NULL) */ static void abort_for_stonith_failure(enum pcmk__graph_next abort_action, const char *target, const xmlNode *reason) { /* If stonith repeatedly fails, we eventually give up on starting a new * transition for that reason. */ if ((abort_action != pcmk__graph_wait) && too_many_st_failures(target)) { abort_action = pcmk__graph_wait; } abort_transition(INFINITY, abort_action, "Stonith failed", reason); } /* * stonith cleanup list * * If the DC is shot, proper notifications might not go out. * The stonith cleanup list allows the cluster to (re-)send * notifications once a new DC is elected. */ static GList *stonith_cleanup_list = NULL; /*! * \internal * \brief Add a node to the stonith cleanup list * * \param[in] target Name of node to add */ void add_stonith_cleanup(const char *target) { stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target)); } /*! * \internal * \brief Remove a node from the stonith cleanup list * * \param[in] Name of node to remove */ void remove_stonith_cleanup(const char *target) { GList *iter = stonith_cleanup_list; while (iter != NULL) { GList *tmp = iter; char *iter_name = tmp->data; iter = iter->next; if (pcmk__str_eq(target, iter_name, pcmk__str_casei)) { crm_trace("Removing %s from the cleanup list", iter_name); stonith_cleanup_list = g_list_delete_link(stonith_cleanup_list, tmp); free(iter_name); } } } /*! * \internal * \brief Purge all entries from the stonith cleanup list */ void purge_stonith_cleanup(void) { if (stonith_cleanup_list) { GList *iter = NULL; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_info("Purging %s from stonith cleanup list", target); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } } /*! * \internal * \brief Send stonith updates for all entries in cleanup list, then purge it */ void execute_stonith_cleanup(void) { GList *iter; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_node_t *target_node = pcmk__get_node(0, target, NULL, pcmk__node_search_cluster); const char *uuid = crm_peer_uuid(target_node); crm_notice("Marking %s, target of a previous stonith action, as clean", target); send_stonith_update(NULL, target, uuid); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } /* end stonith cleanup list functions */ /* stonith API client * * Functions that need to interact directly with the fencer via its API */ static stonith_t *stonith_api = NULL; static mainloop_timer_t *controld_fencer_connect_timer = NULL; static char *te_client_id = NULL; static gboolean fail_incompletable_stonith(pcmk__graph_t *graph) { GList *lpc = NULL; const char *task = NULL; xmlNode *last_action = NULL; if (graph == NULL) { return FALSE; } for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { GList *lpc2 = NULL; pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) { continue; } for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc2->data; if ((action->type != pcmk__cluster_graph_action) || pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { continue; } task = crm_element_value(action->xml, PCMK_XA_OPERATION); if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); last_action = action->xml; pcmk__update_graph(graph, action); crm_notice("Failing action %d (%s): fencer terminated", action->id, ID(action->xml)); } } } if (last_action != NULL) { crm_warn("Fencer failure resulted in unrunnable actions"); abort_for_stonith_failure(pcmk__graph_restart, NULL, last_action); return TRUE; } return FALSE; } static void tengine_stonith_connection_destroy(stonith_t *st, stonith_event_t *e) { te_cleanup_stonith_history_sync(st, FALSE); if (pcmk_is_set(controld_globals.fsa_input_register, R_ST_REQUIRED)) { crm_err("Lost fencer connection (will attempt to reconnect)"); if (!mainloop_timer_running(controld_fencer_connect_timer)) { mainloop_timer_start(controld_fencer_connect_timer); } } else { crm_info("Disconnected from fencer"); } if (stonith_api) { /* the client API won't properly reconnect notifications * if they are still in the table - so remove them */ if (stonith_api->state != stonith_disconnected) { stonith_api->cmds->disconnect(st); } stonith_api->cmds->remove_notification(stonith_api, NULL); } if (AM_I_DC) { fail_incompletable_stonith(controld_globals.transition_graph); trigger_graph(); } } /*! * \internal * \brief Handle an event notification from the fencing API * * \param[in] st Fencing API connection (ignored) * \param[in] event Fencing API event notification */ static void handle_fence_notification(stonith_t *st, stonith_event_t *event) { bool succeeded = true; const char *executioner = "the cluster"; const char *client = "a client"; const char *reason = NULL; int exec_status; if (te_client_id == NULL) { te_client_id = crm_strdup_printf("%s.%lu", crm_system_name, (unsigned long) getpid()); } if (event == NULL) { crm_err("Notify data not found"); return; } if (event->executioner != NULL) { executioner = event->executioner; } if (event->client_origin != NULL) { client = event->client_origin; } exec_status = stonith__event_execution_status(event); if ((stonith__event_exit_status(event) != CRM_EX_OK) || (exec_status != PCMK_EXEC_DONE)) { succeeded = false; if (exec_status == PCMK_EXEC_DONE) { exec_status = PCMK_EXEC_ERROR; } } reason = stonith__event_exit_reason(event); crmd_alert_fencing_op(event); if (pcmk__str_eq(PCMK_ACTION_ON, event->action, pcmk__str_none)) { // Unfencing doesn't need special handling, just a log message if (succeeded) { crm_notice("%s was unfenced by %s at the request of %s@%s", event->target, executioner, client, event->origin); } else { crm_err("Unfencing of %s by %s failed (%s%s%s) with exit status %d", event->target, executioner, pcmk_exec_status_str(exec_status), ((reason == NULL)? "" : ": "), ((reason == NULL)? "" : reason), stonith__event_exit_status(event)); } return; } if (succeeded && pcmk__str_eq(event->target, controld_globals.our_nodename, pcmk__str_casei)) { /* We were notified of our own fencing. Most likely, either fencing was * misconfigured, or fabric fencing that doesn't cut cluster * communication is in use. * * Either way, shutting down the local host is a good idea, to require * administrator intervention. Also, other nodes would otherwise likely * set our status to lost because of the fencing callback and discard * our subsequent election votes as "not part of our cluster". */ crm_crit("We were allegedly just fenced by %s for %s!", executioner, event->origin); // Dumps blackbox if enabled if (fence_reaction_panic) { pcmk__panic(__func__); } else { crm_exit(CRM_EX_FATAL); } return; // Should never get here } /* Update the count of fencing failures for this target, in case we become * DC later. The current DC has already updated its fail count in * tengine_stonith_callback(). */ if (!AM_I_DC) { if (succeeded) { st_fail_count_reset(event->target); } else { st_fail_count_increment(event->target); } } crm_notice("Peer %s was%s terminated (%s) by %s on behalf of %s@%s: " "%s%s%s%s " CRM_XS " event=%s", event->target, (succeeded? "" : " not"), event->action, executioner, client, event->origin, (succeeded? "OK" : pcmk_exec_status_str(exec_status)), ((reason == NULL)? "" : " ("), ((reason == NULL)? "" : reason), ((reason == NULL)? "" : ")"), event->id); if (succeeded) { crm_node_t *peer = pcmk__search_node_caches(0, event->target, pcmk__node_search_any |pcmk__node_search_known); const char *uuid = NULL; if (peer == NULL) { return; } uuid = crm_peer_uuid(peer); if (AM_I_DC) { /* The DC always sends updates */ send_stonith_update(NULL, event->target, uuid); /* @TODO Ideally, at this point, we'd check whether the fenced node * hosted any guest nodes, and call remote_node_down() for them. * Unfortunately, the controller doesn't have a simple, reliable way * to map hosts to guests. It might be possible to track this in the * peer cache via crm_remote_peer_cache_refresh(). For now, we rely * on the scheduler creating fence pseudo-events for the guests. */ if (!pcmk__str_eq(client, te_client_id, pcmk__str_casei)) { /* Abort the current transition if it wasn't the cluster that * initiated fencing. */ crm_info("External fencing operation from %s fenced %s", client, event->target); abort_transition(INFINITY, pcmk__graph_restart, "External Fencing Operation", NULL); } } else if (pcmk__str_eq(controld_globals.dc_name, event->target, pcmk__str_null_matches|pcmk__str_casei) && !pcmk_is_set(peer->flags, crm_remote_node)) { // Assume the target was our DC if we don't currently have one if (controld_globals.dc_name != NULL) { crm_notice("Fencing target %s was our DC", event->target); } else { crm_notice("Fencing target %s may have been our DC", event->target); } /* Given the CIB resyncing that occurs around elections, * have one node update the CIB now and, if the new DC is different, * have them do so too after the election */ if (pcmk__str_eq(event->executioner, controld_globals.our_nodename, pcmk__str_casei)) { send_stonith_update(NULL, event->target, uuid); } add_stonith_cleanup(event->target); } /* If the target is a remote node, and we host its connection, * immediately fail all monitors so it can be recovered quickly. * The connection won't necessarily drop when a remote node is fenced, * so the failure might not otherwise be detected until the next poke. */ if (pcmk_is_set(peer->flags, crm_remote_node)) { remote_ra_fail(event->target); } crmd_peer_down(peer, TRUE); } } /*! * \brief Connect to fencer * * \param[in] user_data If NULL, retry failures now, otherwise retry in mainloop timer * * \return G_SOURCE_REMOVE on success, G_SOURCE_CONTINUE to retry * \note If user_data is NULL, this will wait 2s between attempts, for up to * 30 attempts, meaning the controller could be blocked as long as 58s. */ gboolean controld_timer_fencer_connect(gpointer user_data) { int rc = pcmk_ok; if (stonith_api == NULL) { stonith_api = stonith_api_new(); if (stonith_api == NULL) { crm_err("Could not connect to fencer: API memory allocation failed"); return G_SOURCE_REMOVE; } } if (stonith_api->state != stonith_disconnected) { crm_trace("Already connected to fencer, no need to retry"); return G_SOURCE_REMOVE; } if (user_data == NULL) { // Blocking (retry failures now until successful) rc = stonith_api_connect_retry(stonith_api, crm_system_name, 30); if (rc != pcmk_ok) { crm_err("Could not connect to fencer in 30 attempts: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } } else { // Non-blocking (retry failures later in main loop) rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL); if (controld_fencer_connect_timer == NULL) { controld_fencer_connect_timer = mainloop_timer_add("controld_fencer_connect", 1000, TRUE, controld_timer_fencer_connect, GINT_TO_POINTER(TRUE)); } if (rc != pcmk_ok) { if (pcmk_is_set(controld_globals.fsa_input_register, R_ST_REQUIRED)) { crm_notice("Fencer connection failed (will retry): %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); if (!mainloop_timer_running(controld_fencer_connect_timer)) { mainloop_timer_start(controld_fencer_connect_timer); } return G_SOURCE_CONTINUE; } else { crm_info("Fencer connection failed (ignoring because no longer required): %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } return G_SOURCE_REMOVE; } } if (rc == pcmk_ok) { stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT, tengine_stonith_connection_destroy); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE, handle_fence_notification); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED, tengine_stonith_history_synced); te_trigger_stonith_history_sync(TRUE); crm_notice("Fencer successfully connected"); } return G_SOURCE_REMOVE; } void controld_disconnect_fencer(bool destroy) { if (stonith_api) { // Prevent fencer connection from coming up again controld_clear_fsa_input_flags(R_ST_REQUIRED); if (stonith_api->state != stonith_disconnected) { stonith_api->cmds->disconnect(stonith_api); } stonith_api->cmds->remove_notification(stonith_api, NULL); } if (destroy) { if (stonith_api) { stonith_api->cmds->free(stonith_api); stonith_api = NULL; } if (controld_fencer_connect_timer) { mainloop_timer_del(controld_fencer_connect_timer); controld_fencer_connect_timer = NULL; } if (te_client_id) { free(te_client_id); te_client_id = NULL; } } } static gboolean do_stonith_history_sync(gpointer user_data) { if (stonith_api && (stonith_api->state != stonith_disconnected)) { stonith_history_t *history = NULL; te_cleanup_stonith_history_sync(stonith_api, FALSE); stonith_api->cmds->history(stonith_api, st_opt_sync_call | st_opt_broadcast, NULL, &history, 5); stonith_history_free(history); return TRUE; } else { crm_info("Skip triggering stonith history-sync as stonith is disconnected"); return FALSE; } } static void tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) { char *uuid = NULL; int stonith_id = -1; int transition_id = -1; pcmk__graph_action_t *action = NULL; const char *target = NULL; if ((data == NULL) || (data->userdata == NULL)) { crm_err("Ignoring fence operation %d result: " "No transition key given (bug?)", ((data == NULL)? -1 : data->call_id)); return; } if (!AM_I_DC) { const char *reason = stonith__exit_reason(data); if (reason == NULL) { reason = pcmk_exec_status_str(stonith__execution_status(data)); } crm_notice("Result of fence operation %d: %d (%s) " CRM_XS " key=%s", data->call_id, stonith__exit_status(data), reason, (const char *) data->userdata); return; } CRM_CHECK(decode_transition_key(data->userdata, &uuid, &transition_id, &stonith_id, NULL), goto bail); if (controld_globals.transition_graph->complete || (stonith_id < 0) || !pcmk__str_eq(uuid, controld_globals.te_uuid, pcmk__str_none) || (controld_globals.transition_graph->id != transition_id)) { crm_info("Ignoring fence operation %d result: " "Not from current transition " CRM_XS " complete=%s action=%d uuid=%s (vs %s) transition=%d (vs %d)", data->call_id, pcmk__btoa(controld_globals.transition_graph->complete), stonith_id, uuid, controld_globals.te_uuid, transition_id, controld_globals.transition_graph->id); goto bail; } action = controld_get_action(stonith_id); if (action == NULL) { crm_err("Ignoring fence operation %d result: " "Action %d not found in transition graph (bug?) " CRM_XS " uuid=%s transition=%d", data->call_id, stonith_id, uuid, transition_id); goto bail; } target = crm_element_value(action->xml, PCMK__META_ON_NODE); if (target == NULL) { crm_err("Ignoring fence operation %d result: No target given (bug?)", data->call_id); goto bail; } stop_te_timer(action); if (stonith__exit_status(data) == CRM_EX_OK) { const char *uuid = crm_element_value(action->xml, PCMK__META_ON_NODE_UUID); const char *op = crm_meta_value(action->params, "stonith_action"); crm_info("Fence operation %d for %s succeeded", data->call_id, target); if (!(pcmk_is_set(action->flags, pcmk__graph_action_confirmed))) { te_action_confirmed(action, NULL); if (pcmk__str_eq(PCMK_ACTION_ON, op, pcmk__str_casei)) { const char *value = NULL; char *now = pcmk__ttoa(time(NULL)); gboolean is_remote_node = FALSE; /* This check is not 100% reliable, since this node is not * guaranteed to have the remote node cached. However, it * doesn't have to be reliable, since the attribute manager can * learn a node's "remoteness" by other means sooner or later. * This allows it to learn more quickly if this node does have * the information. */ if (g_hash_table_lookup(crm_remote_peer_cache, uuid) != NULL) { is_remote_node = TRUE; } update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, is_remote_node); free(now); value = crm_meta_value(action->params, PCMK__META_DIGESTS_ALL); update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, is_remote_node); value = crm_meta_value(action->params, PCMK__META_DIGESTS_SECURE); update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, is_remote_node); } else if (!(pcmk_is_set(action->flags, pcmk__graph_action_sent_update))) { send_stonith_update(action, target, uuid); pcmk__set_graph_action_flags(action, pcmk__graph_action_sent_update); } } st_fail_count_reset(target); } else { enum pcmk__graph_next abort_action = pcmk__graph_restart; int status = stonith__execution_status(data); const char *reason = stonith__exit_reason(data); if (reason == NULL) { if (status == PCMK_EXEC_DONE) { reason = "Agent returned error"; } else { reason = pcmk_exec_status_str(status); } } pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); /* If no fence devices were available, there's no use in immediately * checking again, so don't start a new transition in that case. */ if (status == PCMK_EXEC_NO_FENCE_DEVICE) { crm_warn("Fence operation %d for %s failed: %s " "(aborting transition and giving up for now)", data->call_id, target, reason); abort_action = pcmk__graph_wait; } else { crm_notice("Fence operation %d for %s failed: %s " "(aborting transition)", data->call_id, target, reason); } /* Increment the fail count now, so abort_for_stonith_failure() can * check it. Non-DC nodes will increment it in * handle_fence_notification(). */ st_fail_count_increment(target); abort_for_stonith_failure(abort_action, target, NULL); } pcmk__update_graph(controld_globals.transition_graph, action); trigger_graph(); bail: free(data->userdata); free(uuid); return; } static int fence_with_delay(const char *target, const char *type, int delay) { uint32_t options = st_opt_none; // Group of enum stonith_call_options int timeout_sec = (int) (controld_globals.transition_graph->stonith_timeout / 1000); if (crmd_join_phase_count(crm_join_confirmed) == 1) { stonith__set_call_options(options, target, st_opt_allow_suicide); } return stonith_api->cmds->fence_with_delay(stonith_api, options, target, type, timeout_sec, 0, delay); } /*! * \internal * \brief Execute a fencing action from a transition graph * * \param[in] graph Transition graph being executed (ignored) * \param[in] action Fencing action to execute * * \return Standard Pacemaker return code */ int controld_execute_fence_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { int rc = 0; const char *id = ID(action->xml); const char *uuid = crm_element_value(action->xml, PCMK__META_ON_NODE_UUID); const char *target = crm_element_value(action->xml, PCMK__META_ON_NODE); const char *type = crm_meta_value(action->params, "stonith_action"); char *transition_key = NULL; const char *priority_delay = NULL; int delay_i = 0; gboolean invalid_action = FALSE; int stonith_timeout = (int) (controld_globals.transition_graph->stonith_timeout / 1000); CRM_CHECK(id != NULL, invalid_action = TRUE); CRM_CHECK(uuid != NULL, invalid_action = TRUE); CRM_CHECK(type != NULL, invalid_action = TRUE); CRM_CHECK(target != NULL, invalid_action = TRUE); if (invalid_action) { crm_log_xml_warn(action->xml, "BadAction"); return EPROTO; } priority_delay = crm_meta_value(action->params, PCMK_OPT_PRIORITY_FENCING_DELAY); crm_notice("Requesting fencing (%s) targeting node %s " CRM_XS " action=%s timeout=%i%s%s", type, target, id, stonith_timeout, priority_delay ? " priority_delay=" : "", priority_delay ? priority_delay : ""); /* Passing NULL means block until we can connect... */ controld_timer_fencer_connect(NULL); pcmk__scan_min_int(priority_delay, &delay_i, 0); rc = fence_with_delay(target, type, delay_i); transition_key = pcmk__transition_key(controld_globals.transition_graph->id, action->id, 0, controld_globals.te_uuid), stonith_api->cmds->register_callback(stonith_api, rc, (stonith_timeout + (delay_i > 0 ? delay_i : 0)), st_opt_timeout_updates, transition_key, "tengine_stonith_callback", tengine_stonith_callback); return pcmk_rc_ok; } bool controld_verify_stonith_watchdog_timeout(const char *value) { long st_timeout = value? crm_get_msec(value) : 0; const char *our_nodename = controld_globals.our_nodename; if (st_timeout == 0 || (stonith_api && (stonith_api->state != stonith_disconnected) && stonith__watchdog_fencing_enabled_for_node_api(stonith_api, our_nodename))) { return pcmk__valid_stonith_watchdog_timeout(value); } return true; } /* end stonith API client functions */ /* * stonith history synchronization * * Each node's fencer keeps track of a cluster-wide fencing history. When a node * joins or leaves, we need to synchronize the history across all nodes. */ static crm_trigger_t *stonith_history_sync_trigger = NULL; static mainloop_timer_t *stonith_history_sync_timer_short = NULL; static mainloop_timer_t *stonith_history_sync_timer_long = NULL; void te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers) { if (free_timers) { mainloop_timer_del(stonith_history_sync_timer_short); stonith_history_sync_timer_short = NULL; mainloop_timer_del(stonith_history_sync_timer_long); stonith_history_sync_timer_long = NULL; } else { mainloop_timer_stop(stonith_history_sync_timer_short); mainloop_timer_stop(stonith_history_sync_timer_long); } if (st) { st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY_SYNCED); } } static void tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event) { te_cleanup_stonith_history_sync(st, FALSE); crm_debug("Fence-history synced - cancel all timers"); } static gboolean stonith_history_sync_set_trigger(gpointer user_data) { mainloop_set_trigger(stonith_history_sync_trigger); return FALSE; } void te_trigger_stonith_history_sync(bool long_timeout) { /* trigger a sync in 5s to give more nodes the * chance to show up so that we don't create * unnecessary stonith-history-sync traffic * * the long timeout of 30s is there as a fallback * so that after a successful connection to fenced * we will wait for 30s for the DC to trigger a * history-sync * if this doesn't happen we trigger a sync locally * (e.g. fenced segfaults and is restarted by pacemakerd) */ /* as we are finally checking the stonith-connection * in do_stonith_history_sync we should be fine * leaving stonith_history_sync_time & stonith_history_sync_trigger * around */ if (stonith_history_sync_trigger == NULL) { stonith_history_sync_trigger = mainloop_add_trigger(G_PRIORITY_LOW, do_stonith_history_sync, NULL); } if (long_timeout) { if(stonith_history_sync_timer_long == NULL) { stonith_history_sync_timer_long = mainloop_timer_add("history_sync_long", 30000, FALSE, stonith_history_sync_set_trigger, NULL); } crm_info("Fence history will be synchronized cluster-wide within 30 seconds"); mainloop_timer_start(stonith_history_sync_timer_long); } else { if(stonith_history_sync_timer_short == NULL) { stonith_history_sync_timer_short = mainloop_timer_add("history_sync_short", 5000, FALSE, stonith_history_sync_set_trigger, NULL); } crm_info("Fence history will be synchronized cluster-wide within 5 seconds"); mainloop_timer_start(stonith_history_sync_timer_short); } } /* end stonith history synchronization functions */ diff --git a/include/crm/common/options.h b/include/crm/common/options.h index 1d8b28ceeb..bd047e18a6 100644 --- a/include/crm/common/options.h +++ b/include/crm/common/options.h @@ -1,155 +1,156 @@ /* * Copyright 2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_OPTIONS__H # define PCMK__CRM_COMMON_OPTIONS__H #ifdef __cplusplus extern "C" { #endif /** * \file * \brief API related to options * \ingroup core */ /* * Cluster options */ #define PCMK_OPT_BATCH_LIMIT "batch-limit" #define PCMK_OPT_CLUSTER_DELAY "cluster-delay" #define PCMK_OPT_CLUSTER_INFRASTRUCTURE "cluster-infrastructure" #define PCMK_OPT_CLUSTER_IPC_LIMIT "cluster-ipc-limit" #define PCMK_OPT_CLUSTER_NAME "cluster-name" #define PCMK_OPT_CLUSTER_RECHECK_INTERVAL "cluster-recheck-interval" #define PCMK_OPT_CONCURRENT_FENCING "concurrent-fencing" #define PCMK_OPT_DC_DEADTIME "dc-deadtime" #define PCMK_OPT_DC_VERSION "dc-version" #define PCMK_OPT_ELECTION_TIMEOUT "election-timeout" #define PCMK_OPT_ENABLE_ACL "enable-acl" #define PCMK_OPT_ENABLE_STARTUP_PROBES "enable-startup-probes" #define PCMK_OPT_FENCE_REACTION "fence-reaction" #define PCMK_OPT_HAVE_WATCHDOG "have-watchdog" #define PCMK_OPT_JOIN_FINALIZATION_TIMEOUT "join-finalization-timeout" #define PCMK_OPT_JOIN_INTEGRATION_TIMEOUT "join-integration-timeout" #define PCMK_OPT_LOAD_THRESHOLD "load-threshold" #define PCMK_OPT_MAINTENANCE_MODE "maintenance-mode" #define PCMK_OPT_MIGRATION_LIMIT "migration-limit" #define PCMK_OPT_NO_QUORUM_POLICY "no-quorum-policy" #define PCMK_OPT_NODE_ACTION_LIMIT "node-action-limit" #define PCMK_OPT_NODE_HEALTH_BASE "node-health-base" #define PCMK_OPT_NODE_HEALTH_GREEN "node-health-green" #define PCMK_OPT_NODE_HEALTH_RED "node-health-red" #define PCMK_OPT_NODE_HEALTH_STRATEGY "node-health-strategy" #define PCMK_OPT_NODE_HEALTH_YELLOW "node-health-yellow" #define PCMK_OPT_NODE_PENDING_TIMEOUT "node-pending-timeout" #define PCMK_OPT_PE_ERROR_SERIES_MAX "pe-error-series-max" #define PCMK_OPT_PE_INPUT_SERIES_MAX "pe-input-series-max" #define PCMK_OPT_PE_WARN_SERIES_MAX "pe-warn-series-max" #define PCMK_OPT_PLACEMENT_STRATEGY "placement-strategy" #define PCMK_OPT_PRIORITY_FENCING_DELAY "priority-fencing-delay" #define PCMK_OPT_SHUTDOWN_ESCALATION "shutdown-escalation" #define PCMK_OPT_SHUTDOWN_LOCK "shutdown-lock" #define PCMK_OPT_SHUTDOWN_LOCK_LIMIT "shutdown-lock-limit" #define PCMK_OPT_START_FAILURE_IS_FATAL "start-failure-is-fatal" #define PCMK_OPT_STARTUP_FENCING "startup-fencing" #define PCMK_OPT_STONITH_ACTION "stonith-action" #define PCMK_OPT_STONITH_ENABLED "stonith-enabled" #define PCMK_OPT_STONITH_MAX_ATTEMPTS "stonith-max-attempts" #define PCMK_OPT_STONITH_TIMEOUT "stonith-timeout" #define PCMK_OPT_STONITH_WATCHDOG_TIMEOUT "stonith-watchdog-timeout" #define PCMK_OPT_STOP_ALL_RESOURCES "stop-all-resources" #define PCMK_OPT_STOP_ORPHAN_ACTIONS "stop-orphan-actions" #define PCMK_OPT_STOP_ORPHAN_RESOURCES "stop-orphan-resources" #define PCMK_OPT_SYMMETRIC_CLUSTER "symmetric-cluster" #define PCMK_OPT_TRANSITION_DELAY "transition-delay" /* * Meta-attributes */ #define PCMK_META_ALLOW_MIGRATE "allow-migrate" #define PCMK_META_ALLOW_UNHEALTHY_NODES "allow-unhealthy-nodes" #define PCMK_META_CLONE_MAX "clone-max" #define PCMK_META_CLONE_MIN "clone-min" #define PCMK_META_CLONE_NODE_MAX "clone-node-max" #define PCMK_META_CONTAINER_ATTR_TARGET "container-attribute-target" #define PCMK_META_CRITICAL "critical" #define PCMK_META_ENABLED "enabled" #define PCMK_META_FAILURE_TIMEOUT "failure-timeout" #define PCMK_META_GLOBALLY_UNIQUE "globally-unique" #define PCMK_META_INTERLEAVE "interleave" #define PCMK_META_INTERVAL "interval" #define PCMK_META_IS_MANAGED "is-managed" #define PCMK_META_INTERVAL_ORIGIN "interval-origin" #define PCMK_META_MAINTENANCE "maintenance" #define PCMK_META_MIGRATION_THRESHOLD "migration-threshold" #define PCMK_META_MULTIPLE_ACTIVE "multiple-active" #define PCMK_META_NOTIFY "notify" #define PCMK_META_ON_FAIL "on-fail" #define PCMK_META_ORDERED "ordered" #define PCMK_META_PRIORITY "priority" #define PCMK_META_PROMOTABLE "promotable" #define PCMK_META_PROMOTED_MAX "promoted-max" #define PCMK_META_PROMOTED_NODE_MAX "promoted-node-max" #define PCMK_META_RECORD_PENDING "record-pending" #define PCMK_META_REMOTE_ADDR "remote-addr" #define PCMK_META_REMOTE_ALLOW_MIGRATE "remote-allow-migrate" #define PCMK_META_REMOTE_CONNECT_TIMEOUT "remote-connect-timeout" #define PCMK_META_REMOTE_NODE "remote-node" #define PCMK_META_REMOTE_PORT "remote-port" #define PCMK_META_REQUIRES "requires" #define PCMK_META_RESOURCE_STICKINESS "resource-stickiness" #define PCMK_META_START_DELAY "start-delay" #define PCMK_META_TARGET_ROLE "target-role" #define PCMK_META_TIMEOUT "timeout" #define PCMK_META_TIMESTAMP_FORMAT "timestamp-format" /* * Remote resource instance attributes */ #define PCMK_REMOTE_RA_ADDR "addr" #define PCMK_REMOTE_RA_PORT "port" #define PCMK_REMOTE_RA_RECONNECT_INTERVAL "reconnect_interval" #define PCMK_REMOTE_RA_SERVER "server" /* * Enumerated values */ #define PCMK_VALUE_ALWAYS "always" #define PCMK_VALUE_AND "and" #define PCMK_VALUE_BLOCK "block" #define PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS "cib-bootstrap-options" #define PCMK_VALUE_DEMOTE "demote" #define PCMK_VALUE_DENY "deny" #define PCMK_VALUE_EXCLUSIVE "exclusive" #define PCMK_VALUE_FALSE "false" #define PCMK_VALUE_IGNORE "ignore" #define PCMK_VALUE_LITERAL "literal" #define PCMK_VALUE_META "meta" #define PCMK_VALUE_NEVER "never" #define PCMK_VALUE_OR "or" #define PCMK_VALUE_PARAM "param" #define PCMK_VALUE_READ "read" +#define PCMK_VALUE_STOP "stop" #define PCMK_VALUE_TRUE "true" #define PCMK_VALUE_WRITE "write" #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_OPTIONS__H diff --git a/lib/common/options.c b/lib/common/options.c index 46eee377ec..afecd4de25 100644 --- a/lib/common/options.c +++ b/lib/common/options.c @@ -1,1153 +1,1153 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include void pcmk__cli_help(char cmd) { if (cmd == 'v' || cmd == '$') { printf("Pacemaker %s\n", PACEMAKER_VERSION); printf("Written by Andrew Beekhof and " "the Pacemaker project contributors\n"); } else if (cmd == '!') { printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); } crm_exit(CRM_EX_OK); while(1); // above does not return } /* * Option metadata */ static pcmk__cluster_option_t cluster_options[] = { /* name, old name, type, allowed values, * default value, validator, * context, * short description, * long description */ { PCMK_OPT_DC_VERSION, NULL, "string", NULL, PCMK__VALUE_NONE, NULL, pcmk__opt_context_controld, N_("Pacemaker version on cluster node elected Designated Controller " "(DC)"), N_("Includes a hash which identifies the exact changeset the code was " "built from. Used for diagnostic purposes."), }, { PCMK_OPT_CLUSTER_INFRASTRUCTURE, NULL, "string", NULL, "corosync", NULL, pcmk__opt_context_controld, N_("The messaging stack on which Pacemaker is currently running"), N_("Used for informational and diagnostic purposes."), }, { PCMK_OPT_CLUSTER_NAME, NULL, "string", NULL, NULL, NULL, pcmk__opt_context_controld, N_("An arbitrary name for the cluster"), N_("This optional value is mostly for users' convenience as desired " "in administration, but may also be used in Pacemaker " "configuration rules via the #cluster-name node attribute, and " "by higher-level tools and resource agents."), }, { PCMK_OPT_DC_DEADTIME, NULL, "time", NULL, "20s", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("How long to wait for a response from other nodes during start-up"), N_("The optimal value will depend on the speed and load of your " "network and the type of switches used."), }, { PCMK_OPT_CLUSTER_RECHECK_INTERVAL, NULL, "time", N_("Zero disables polling, while positive values are an interval in " "seconds (unless other units are specified, for example \"5min\")"), "15min", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("Polling interval to recheck cluster state and evaluate rules " "with date specifications"), N_("Pacemaker is primarily event-driven, and looks ahead to know when " "to recheck cluster state for failure timeouts and most time-based " "rules. However, it will also recheck the cluster after this " "amount of inactivity, to evaluate rules with date specifications " "and serve as a fail-safe for certain types of scheduler bugs."), }, { - PCMK_OPT_FENCE_REACTION, NULL, "select", "stop, panic", - "stop", NULL, + PCMK_OPT_FENCE_REACTION, NULL, "select", PCMK_VALUE_STOP ", panic", + PCMK_VALUE_STOP, NULL, pcmk__opt_context_controld, N_("How a cluster node should react if notified of its own fencing"), N_("A cluster node may receive notification of its own fencing if " "fencing is misconfigured, or if fabric fencing is in use that " "doesn't cut cluster communication. Use \"stop\" to attempt to " "immediately stop Pacemaker and stay stopped, or \"panic\" to " "attempt to immediately reboot the local node, falling back to " "stop on failure."), }, { PCMK_OPT_ELECTION_TIMEOUT, NULL, "time", NULL, "2min", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("*** Advanced Use Only ***"), N_("Declare an election failed if it is not decided within this much " "time. If you need to adjust this value, it probably indicates " "the presence of a bug."), }, { PCMK_OPT_SHUTDOWN_ESCALATION, NULL, "time", NULL, "20min", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("*** Advanced Use Only ***"), N_("Exit immediately if shutdown does not complete within this much " "time. If you need to adjust this value, it probably indicates " "the presence of a bug."), }, { PCMK_OPT_JOIN_INTEGRATION_TIMEOUT, "crmd-integration-timeout", "time", NULL, "3min", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("*** Advanced Use Only ***"), N_("If you need to adjust this value, it probably indicates " "the presence of a bug."), }, { PCMK_OPT_JOIN_FINALIZATION_TIMEOUT, "crmd-finalization-timeout", "time", NULL, "30min", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("*** Advanced Use Only ***"), N_("If you need to adjust this value, it probably indicates " "the presence of a bug."), }, { PCMK_OPT_TRANSITION_DELAY, "crmd-transition-delay", "time", NULL, "0s", pcmk__valid_interval_spec, pcmk__opt_context_controld, N_("*** Advanced Use Only *** " "Enabling this option will slow down cluster recovery under all " "conditions"), N_("Delay cluster recovery for this much time to allow for additional " "events to occur. Useful if your configuration is sensitive to " "the order in which ping updates arrive."), }, { PCMK_OPT_NO_QUORUM_POLICY, NULL, "select", - "stop, freeze, " PCMK_VALUE_IGNORE ", " PCMK_VALUE_DEMOTE - ", suicide", - "stop", pcmk__valid_no_quorum_policy, + PCMK_VALUE_STOP ", freeze, " PCMK_VALUE_IGNORE + ", " PCMK_VALUE_DEMOTE ", suicide", + PCMK_VALUE_STOP, pcmk__valid_no_quorum_policy, pcmk__opt_context_schedulerd, N_("What to do when the cluster does not have quorum"), NULL, }, { PCMK_OPT_SHUTDOWN_LOCK, NULL, "boolean", NULL, PCMK_VALUE_FALSE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether to lock resources to a cleanly shut down node"), N_("When true, resources active on a node when it is cleanly shut down " "are kept \"locked\" to that node (not allowed to run elsewhere) " "until they start again on that node after it rejoins (or for at " "most shutdown-lock-limit, if set). Stonith resources and " "Pacemaker Remote connections are never locked. Clone and bundle " "instances and the promoted role of promotable clones are " "currently never locked, though support could be added in a future " "release."), }, { PCMK_OPT_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL, "0", pcmk__valid_interval_spec, pcmk__opt_context_schedulerd, N_("Do not lock resources to a cleanly shut down node longer than " "this"), N_("If shutdown-lock is true and this is set to a nonzero time " "duration, shutdown locks will expire after this much time has " "passed since the shutdown was initiated, even if the node has not " "rejoined."), }, { PCMK_OPT_ENABLE_ACL, NULL, "boolean", NULL, PCMK_VALUE_FALSE, pcmk__valid_boolean, pcmk__opt_context_based, N_("Enable Access Control Lists (ACLs) for the CIB"), NULL, }, { PCMK_OPT_SYMMETRIC_CLUSTER, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether resources can run on any node by default"), NULL, }, { PCMK_OPT_MAINTENANCE_MODE, NULL, "boolean", NULL, PCMK_VALUE_FALSE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether the cluster should refrain from monitoring, starting, and " "stopping resources"), NULL, }, { PCMK_OPT_START_FAILURE_IS_FATAL, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether a start failure should prevent a resource from being " "recovered on the same node"), N_("When true, the cluster will immediately ban a resource from a node " "if it fails to start there. When false, the cluster will instead " "check the resource's fail count against its migration-threshold.") }, { PCMK_OPT_ENABLE_STARTUP_PROBES, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether the cluster should check for active resources during " "start-up"), NULL, }, // Fencing-related options { PCMK_OPT_STONITH_ENABLED, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("*** Advanced Use Only *** " "Whether nodes may be fenced as part of recovery"), N_("If false, unresponsive nodes are immediately assumed to be " "harmless, and resources that were active on them may be recovered " "elsewhere. This can result in a \"split-brain\" situation, " "potentially leading to data loss and/or service unavailability."), }, { PCMK_OPT_STONITH_ACTION, NULL, "select", "reboot, off, poweroff", PCMK_ACTION_REBOOT, pcmk__is_fencing_action, pcmk__opt_context_schedulerd, N_("Action to send to fence device when a node needs to be fenced " "(\"poweroff\" is a deprecated alias for \"off\")"), NULL, }, { PCMK_OPT_STONITH_TIMEOUT, NULL, "time", NULL, "60s", pcmk__valid_interval_spec, pcmk__opt_context_schedulerd, N_("How long to wait for on, off, and reboot fence actions to complete " "by default"), NULL, }, { PCMK_OPT_HAVE_WATCHDOG, NULL, "boolean", NULL, PCMK_VALUE_FALSE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether watchdog integration is enabled"), N_("This is set automatically by the cluster according to whether SBD " "is detected to be in use. User-configured values are ignored. " "The value `true` is meaningful if diskless SBD is used and " "`stonith-watchdog-timeout` is nonzero. In that case, if fencing " "is required, watchdog-based self-fencing will be performed via " "SBD without requiring a fencing resource explicitly configured."), }, { /* @COMPAT Currently, unparsable values default to -1 (auto-calculate), * while missing values default to 0 (disable). All values are accepted * (unless the controller finds that the value conflicts with the * SBD_WATCHDOG_TIMEOUT). * * At a compatibility break: properly validate as a timeout, let * either negative values or a particular string like "auto" mean auto- * calculate, and use 0 as the single default for when the option either * is unset or fails to validate. */ PCMK_OPT_STONITH_WATCHDOG_TIMEOUT, NULL, "time", NULL, "0", NULL, pcmk__opt_context_controld, N_("How long before nodes can be assumed to be safely down when " "watchdog-based self-fencing via SBD is in use"), N_("If this is set to a positive value, lost nodes are assumed to " "self-fence using watchdog-based SBD within this much time. This " "does not require a fencing resource to be explicitly configured, " "though a fence_watchdog resource can be configured, to limit use " "to specific nodes. If this is set to 0 (the default), the cluster " "will never assume watchdog-based self-fencing. If this is set to a " "negative value, the cluster will use twice the local value of the " "`SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, " "or otherwise treat this as 0. WARNING: When used, this timeout " "must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use " "watchdog-based SBD, and Pacemaker will refuse to start on any of " "those nodes where this is not true for the local value or SBD is " "not active. When this is set to a negative value, " "`SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes " "that use SBD, otherwise data corruption or loss could occur."), }, { PCMK_OPT_STONITH_MAX_ATTEMPTS, NULL, "integer", NULL, "10", pcmk__valid_positive_int, pcmk__opt_context_controld, N_("How many times fencing can fail before it will no longer be " "immediately re-attempted on a target"), NULL, }, { PCMK_OPT_CONCURRENT_FENCING, NULL, "boolean", NULL, PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Allow performing fencing operations in parallel"), NULL, }, { PCMK_OPT_STARTUP_FENCING, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("*** Advanced Use Only *** " "Whether to fence unseen nodes at start-up"), N_("Setting this to false may lead to a \"split-brain\" situation, " "potentially leading to data loss and/or service unavailability."), }, { PCMK_OPT_PRIORITY_FENCING_DELAY, NULL, "time", NULL, "0", pcmk__valid_interval_spec, pcmk__opt_context_schedulerd, N_("Apply fencing delay targeting the lost nodes with the highest " "total resource priority"), N_("Apply specified delay for the fencings that are targeting the lost " "nodes with the highest total resource priority in case we don't " "have the majority of the nodes in our cluster partition, so that " "the more significant nodes potentially win any fencing match, " "which is especially meaningful under split-brain of 2-node " "cluster. A promoted resource instance takes the base priority + 1 " "on calculation if the base priority is not 0. Any static/random " "delays that are introduced by `pcmk_delay_base/max` configured " "for the corresponding fencing resources will be added to this " "delay. This delay should be significantly greater than, safely " "twice, the maximum `pcmk_delay_base/max`. By default, priority " "fencing delay is disabled."), }, { PCMK_OPT_NODE_PENDING_TIMEOUT, NULL, "time", NULL, "0", pcmk__valid_interval_spec, pcmk__opt_context_schedulerd, N_("How long to wait for a node that has joined the cluster to join " "the controller process group"), N_("Fence nodes that do not join the controller process group within " "this much time after joining the cluster, to allow the cluster " "to continue managing resources. A value of 0 means never fence " "pending nodes. Setting the value to 2h means fence nodes after " "2 hours."), }, { PCMK_OPT_CLUSTER_DELAY, NULL, "time", NULL, "60s", pcmk__valid_interval_spec, pcmk__opt_context_schedulerd, N_("Maximum time for node-to-node communication"), N_("The node elected Designated Controller (DC) will consider an action " "failed if it does not get a response from the node executing the " "action within this time (after considering the action's own " "timeout). The \"correct\" value will depend on the speed and " "load of your network and cluster nodes.") }, // Limits { PCMK_OPT_LOAD_THRESHOLD, NULL, "percentage", NULL, "80%", pcmk__valid_percentage, pcmk__opt_context_controld, N_("Maximum amount of system load that should be used by cluster " "nodes"), N_("The cluster will slow down its recovery process when the amount of " "system resources used (currently CPU) approaches this limit"), }, { PCMK_OPT_NODE_ACTION_LIMIT, NULL, "integer", NULL, "0", pcmk__valid_int, pcmk__opt_context_controld, N_("Maximum number of jobs that can be scheduled per node (defaults to " "2x cores)"), NULL, }, { PCMK_OPT_BATCH_LIMIT, NULL, "integer", NULL, "0", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("Maximum number of jobs that the cluster may execute in parallel " "across all nodes"), N_("The \"correct\" value will depend on the speed and load of your " "network and cluster nodes. If set to 0, the cluster will " "impose a dynamically calculated limit when any node has a " "high load."), }, { PCMK_OPT_MIGRATION_LIMIT, NULL, "integer", NULL, "-1", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The number of live migration actions that the cluster is allowed " "to execute in parallel on a node (-1 means no limit)"), NULL, }, { PCMK_OPT_CLUSTER_IPC_LIMIT, NULL, "integer", NULL, "500", pcmk__valid_positive_int, pcmk__opt_context_based, N_("Maximum IPC message backlog before disconnecting a cluster daemon"), N_("Raise this if log has \"Evicting client\" messages for cluster " "daemon PIDs (a good value is the number of resources in the " "cluster multiplied by the number of nodes)."), }, // Orphans and stopping { PCMK_OPT_STOP_ALL_RESOURCES, NULL, "boolean", NULL, PCMK_VALUE_FALSE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether the cluster should stop all active resources"), NULL, }, { PCMK_OPT_STOP_ORPHAN_RESOURCES, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether to stop resources that were removed from the " "configuration"), NULL, }, { PCMK_OPT_STOP_ORPHAN_ACTIONS, NULL, "boolean", NULL, PCMK_VALUE_TRUE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("Whether to cancel recurring actions removed from the " "configuration"), NULL, }, { PCMK__OPT_REMOVE_AFTER_STOP, NULL, "boolean", NULL, PCMK_VALUE_FALSE, pcmk__valid_boolean, pcmk__opt_context_schedulerd, N_("*** Deprecated *** " "Whether to remove stopped resources from the executor"), N_("Values other than default are poorly tested and potentially " "dangerous. This option will be removed in a future release."), }, // Storing inputs { PCMK_OPT_PE_ERROR_SERIES_MAX, NULL, "integer", NULL, "-1", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The number of scheduler inputs resulting in errors to save"), N_("Zero to disable, -1 to store unlimited."), }, { PCMK_OPT_PE_WARN_SERIES_MAX, NULL, "integer", NULL, "5000", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The number of scheduler inputs resulting in warnings to save"), N_("Zero to disable, -1 to store unlimited."), }, { PCMK_OPT_PE_INPUT_SERIES_MAX, NULL, "integer", NULL, "4000", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The number of scheduler inputs without errors or warnings to save"), N_("Zero to disable, -1 to store unlimited."), }, // Node health { PCMK_OPT_NODE_HEALTH_STRATEGY, NULL, "select", PCMK__VALUE_NONE ", " PCMK__VALUE_MIGRATE_ON_RED ", " PCMK__VALUE_ONLY_GREEN ", " PCMK__VALUE_PROGRESSIVE ", " PCMK__VALUE_CUSTOM, PCMK__VALUE_NONE, pcmk__validate_health_strategy, pcmk__opt_context_schedulerd, N_("How cluster should react to node health attributes"), N_("Requires external entities to create node attributes (named with " "the prefix \"#health\") with values \"red\", \"yellow\", or " "\"green\".") }, { PCMK_OPT_NODE_HEALTH_BASE, NULL, "integer", NULL, "0", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("Base health score assigned to a node"), N_("Only used when \"node-health-strategy\" is set to " "\"progressive\"."), }, { PCMK_OPT_NODE_HEALTH_GREEN, NULL, "integer", NULL, "0", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The score to use for a node health attribute whose value is " "\"green\""), N_("Only used when \"node-health-strategy\" is set to \"custom\" or " "\"progressive\"."), }, { PCMK_OPT_NODE_HEALTH_YELLOW, NULL, "integer", NULL, "0", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The score to use for a node health attribute whose value is " "\"yellow\""), N_("Only used when \"node-health-strategy\" is set to \"custom\" or " "\"progressive\"."), }, { PCMK_OPT_NODE_HEALTH_RED, NULL, "integer", NULL, "-INFINITY", pcmk__valid_int, pcmk__opt_context_schedulerd, N_("The score to use for a node health attribute whose value is " "\"red\""), N_("Only used when \"node-health-strategy\" is set to \"custom\" or " "\"progressive\".") }, // Placement strategy { PCMK_OPT_PLACEMENT_STRATEGY, NULL, "select", "default, utilization, minimal, balanced", "default", pcmk__valid_placement_strategy, pcmk__opt_context_schedulerd, N_("How the cluster should allocate resources to nodes"), NULL, }, }; /* * Environment variable option handling */ /*! * \internal * \brief Get the value of a Pacemaker environment variable option * * If an environment variable option is set, with either a PCMK_ or (for * backward compatibility) HA_ prefix, log and return the value. * * \param[in] option Environment variable name (without prefix) * * \return Value of environment variable option, or NULL in case of * option name too long or value not found */ const char * pcmk__env_option(const char *option) { const char *const prefixes[] = {"PCMK_", "HA_"}; char env_name[NAME_MAX]; const char *value = NULL; CRM_CHECK(!pcmk__str_empty(option), return NULL); for (int i = 0; i < PCMK__NELEM(prefixes); i++) { int rv = snprintf(env_name, NAME_MAX, "%s%s", prefixes[i], option); if (rv < 0) { crm_err("Failed to write %s%s to buffer: %s", prefixes[i], option, strerror(errno)); return NULL; } if (rv >= sizeof(env_name)) { crm_trace("\"%s%s\" is too long", prefixes[i], option); continue; } value = getenv(env_name); if (value != NULL) { crm_trace("Found %s = %s", env_name, value); return value; } } crm_trace("Nothing found for %s", option); return NULL; } /*! * \brief Set or unset a Pacemaker environment variable option * * Set an environment variable option with a \c "PCMK_" prefix and optionally * an \c "HA_" prefix for backward compatibility. * * \param[in] option Environment variable name (without prefix) * \param[in] value New value (or NULL to unset) * \param[in] compat If false and \p value is not \c NULL, set only * \c "PCMK_