diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c index d692ef620f..d67533c56c 100644 --- a/daemons/controld/controld_remote_ra.c +++ b/daemons/controld/controld_remote_ra.c @@ -1,1473 +1,1474 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #define REMOTE_LRMD_RA "remote" /* The max start timeout before cmd retry */ #define MAX_START_TIMEOUT_MS 10000 #define cmd_set_flags(cmd, flags_to_set) do { \ (cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Remote command", (cmd)->rsc_id, (cmd)->status, \ (flags_to_set), #flags_to_set); \ } while (0) #define cmd_clear_flags(cmd, flags_to_clear) do { \ (cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Remote command", (cmd)->rsc_id, (cmd)->status, \ (flags_to_clear), #flags_to_clear); \ } while (0) enum remote_cmd_status { cmd_reported_success = (1 << 0), cmd_cancel = (1 << 1), }; typedef struct remote_ra_cmd_s { /*! the local node the cmd is issued from */ char *owner; /*! the remote node the cmd is executed on */ char *rsc_id; /*! the action to execute */ char *action; /*! some string the client wants us to give it back */ char *userdata; /*! start delay in ms */ int start_delay; /*! timer id used for start delay. */ int delay_id; /*! timeout in ms for cmd */ int timeout; int remaining_timeout; /*! recurring interval in ms */ guint interval_ms; /*! interval timer id */ int interval_id; int monitor_timeout_id; int takeover_timeout_id; /*! action parameters */ lrmd_key_value_t *params; pcmk__action_result_t result; int call_id; time_t start_time; uint32_t status; } remote_ra_cmd_t; #define lrm_remote_set_flags(lrm_state, flags_to_set) do { \ lrm_state_t *lrm = (lrm_state); \ remote_ra_data_t *ra = lrm->remote_ra_data; \ ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \ lrm->node_name, ra->status, \ (flags_to_set), #flags_to_set); \ } while (0) #define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \ lrm_state_t *lrm = (lrm_state); \ remote_ra_data_t *ra = lrm->remote_ra_data; \ ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \ lrm->node_name, ra->status, \ (flags_to_clear), #flags_to_clear); \ } while (0) enum remote_status { expect_takeover = (1 << 0), takeover_complete = (1 << 1), remote_active = (1 << 2), /* Maintenance mode is difficult to determine from the controller's context, * so we have it signalled back with the transition from the scheduler. */ remote_in_maint = (1 << 3), /* Similar for whether we are controlling a guest node or remote node. * Fortunately there is a meta-attribute in the transition already and * as the situation doesn't change over time we can use the * resource start for noting down the information for later use when * the attributes aren't at hand. */ controlling_guest = (1 << 4), }; typedef struct remote_ra_data_s { crm_trigger_t *work; remote_ra_cmd_t *cur_cmd; GList *cmds; GList *recurring_cmds; uint32_t status; } remote_ra_data_t; static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms); static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd); static GList *fail_all_monitor_cmds(GList * list); static void free_cmd(gpointer user_data) { remote_ra_cmd_t *cmd = user_data; if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->interval_id) { g_source_remove(cmd->interval_id); } if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); } if (cmd->takeover_timeout_id) { g_source_remove(cmd->takeover_timeout_id); } free(cmd->owner); free(cmd->rsc_id); free(cmd->action); free(cmd->userdata); pcmk__reset_result(&(cmd->result)); lrmd_key_value_freeall(cmd->params); free(cmd); } static int generate_callid(void) { static int remote_ra_callid = 0; remote_ra_callid++; if (remote_ra_callid <= 0) { remote_ra_callid = 1; } return remote_ra_callid; } static gboolean recurring_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->interval_id = 0; connection_rsc = lrm_state_find(cmd->rsc_id); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd); ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); } return FALSE; } static gboolean start_delay_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->delay_id = 0; connection_rsc = lrm_state_find(cmd->rsc_id); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; mainloop_set_trigger(ra_data->work); } return FALSE; } static bool should_purge_attributes(crm_node_t *node) { bool purge = true; crm_node_t *conn_node = NULL; lrm_state_t *connection_rsc = NULL; if (!node->conn_host) { return purge; } /* Get the node that was hosting the remote connection resource from the * peer cache. That's the one we really care about here. */ conn_node = crm_get_peer(0, node->conn_host); if (conn_node == NULL) { return purge; } /* Check the uptime of connection_rsc. If it hasn't been running long * enough, set purge=true. "Long enough" means it started running earlier * than the timestamp when we noticed it went away in the first place. */ connection_rsc = lrm_state_find(node->uname); if (connection_rsc != NULL) { lrmd_t *lrm = connection_rsc->conn; time_t uptime = lrmd__uptime(lrm); time_t now = time(NULL); /* Add 20s of fuzziness to give corosync a while to notice the remote * host is gone. On various error conditions (failure to get uptime, * peer_lost isn't set) we default to purging. */ if (uptime > 0 && conn_node->peer_lost > 0 && uptime + 20 >= now - conn_node->peer_lost) { purge = false; } } return purge; } static enum controld_section_e section_to_delete(bool purge) { if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { if (purge) { return controld_section_all_unlocked; } else { return controld_section_lrm_unlocked; } } else { if (purge) { return controld_section_all; } else { return controld_section_lrm; } } } static void purge_remote_node_attrs(int call_opt, crm_node_t *node) { bool purge = should_purge_attributes(node); enum controld_section_e section = section_to_delete(purge); /* Purge node from attrd's memory */ if (purge) { update_attrd_remote_node_removed(node->uname, NULL); } controld_delete_node_state(node->uname, section, call_opt); } /*! * \internal * \brief Handle cluster communication related to pacemaker_remote node joining * * \param[in] node_name Name of newly integrated pacemaker_remote node */ static void remote_node_up(const char *node_name) { int call_opt; xmlNode *update, *state; crm_node_t *node; lrm_state_t *connection_rsc = NULL; CRM_CHECK(node_name != NULL, return); crm_info("Announcing Pacemaker Remote node %s", node_name); call_opt = crmd_cib_smart_opt(); /* Delete node's probe_complete attribute. This serves two purposes: * * - @COMPAT DCs < 1.1.14 in a rolling upgrade might use it * - deleting it (or any attribute for that matter) here ensures the * attribute manager learns the node is remote */ update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE); /* Ensure node is in the remote peer cache with member status */ node = crm_remote_peer_get(node_name); CRM_CHECK(node != NULL, return); purge_remote_node_attrs(call_opt, node); pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0); /* Apply any start state that we were given from the environment on the * remote node. */ connection_rsc = lrm_state_find(node->uname); if (connection_rsc != NULL) { lrmd_t *lrm = connection_rsc->conn; const char *start_state = lrmd__node_start_state(lrm); if (start_state) { set_join_state(start_state, node->uname, node->uuid, true); } } /* pacemaker_remote nodes don't participate in the membership layer, * so cluster nodes don't automatically get notified when they come and go. * We send a cluster message to the DC, and update the CIB node state entry, * so the DC will get it sooner (via message) or later (via CIB refresh), * and any other interested parties can query the CIB. */ broadcast_remote_state_message(node_name, true); update = create_xml_node(NULL, XML_CIB_TAG_STATUS); state = create_node_state_update(node, node_update_cluster, update, __func__); /* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever * needs to be fenced, this flag will allow various actions to determine * whether the fencing has happened yet. */ crm_xml_add(state, XML_NODE_IS_FENCED, "0"); /* TODO: If the remote connection drops, and this (async) CIB update either * failed or has not yet completed, later actions could mistakenly think the * node has already been fenced (if the XML_NODE_IS_FENCED attribute was * previously set, because it won't have been cleared). This could prevent * actual fencing or allow recurring monitor failures to be cleared too * soon. Ideally, we wouldn't rely on the CIB for the fenced status. */ controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL); free_xml(update); } enum down_opts { DOWN_KEEP_LRM, DOWN_ERASE_LRM }; /*! * \internal * \brief Handle cluster communication related to pacemaker_remote node leaving * * \param[in] node_name Name of lost node * \param[in] opts Whether to keep or erase LRM history */ static void remote_node_down(const char *node_name, const enum down_opts opts) { xmlNode *update; int call_opt = crmd_cib_smart_opt(); crm_node_t *node; /* Purge node from attrd's memory */ update_attrd_remote_node_removed(node_name, NULL); /* Normally, only node attributes should be erased, and the resource history * should be kept until the node comes back up. However, after a successful * fence, we want to clear the history as well, so we don't think resources * are still running on the node. */ if (opts == DOWN_ERASE_LRM) { controld_delete_node_state(node_name, controld_section_all, call_opt); } else { controld_delete_node_state(node_name, controld_section_attrs, call_opt); } /* Ensure node is in the remote peer cache with lost state */ node = crm_remote_peer_get(node_name); CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0); /* Notify DC */ broadcast_remote_state_message(node_name, false); /* Update CIB node state */ update = create_xml_node(NULL, XML_CIB_TAG_STATUS); create_node_state_update(node, node_update_cluster, update, __func__); controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL); free_xml(update); } /*! * \internal * \brief Handle effects of a remote RA command on node state * * \param[in] cmd Completed remote RA command */ static void check_remote_node_state(const remote_ra_cmd_t *cmd) { /* Only successful actions can change node state */ if (!pcmk__result_ok(&(cmd->result))) { return; } if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { remote_node_up(cmd->rsc_id); } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM, pcmk__str_casei)) { /* After a successful migration, we don't need to do remote_node_up() * because the DC already knows the node is up, and we don't want to * clear LRM history etc. We do need to add the remote node to this * host's remote peer cache, because (unless it happens to be DC) * it hasn't been tracking the remote node, and other code relies on * the cache to distinguish remote nodes from unseen cluster nodes. */ crm_node_t *node = crm_remote_peer_get(cmd->rsc_id); CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0); } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id); remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL; if (ra_data) { if (!pcmk_is_set(ra_data->status, takeover_complete)) { /* Stop means down if we didn't successfully migrate elsewhere */ remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM); } else if (AM_I_DC == FALSE) { /* Only the connection host and DC track node state, * so if the connection migrated elsewhere and we aren't DC, * un-cache the node, so we don't have stale info */ crm_remote_peer_cache_remove(cmd->rsc_id); } } } /* We don't do anything for successful monitors, which is correct for * routine recurring monitors, and for monitors on nodes where the * connection isn't supposed to be (the cluster will stop the connection in * that case). However, if the initial probe finds the connection already * active on the node where we want it, we probably should do * remote_node_up(). Unfortunately, we can't distinguish that case here. * Given that connections have to be initiated by the cluster, the chance of * that should be close to zero. */ } static void report_remote_ra_result(remote_ra_cmd_t * cmd) { lrmd_event_data_t op = { 0, }; check_remote_node_state(cmd); op.type = lrmd_event_exec_complete; op.rsc_id = cmd->rsc_id; op.op_type = cmd->action; op.user_data = cmd->userdata; op.timeout = cmd->timeout; op.interval_ms = cmd->interval_ms; op.t_run = (unsigned int) cmd->start_time; op.t_rcchange = (unsigned int) cmd->start_time; lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status, cmd->result.exit_reason); if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) { op.t_rcchange = (unsigned int) time(NULL); /* This edge case will likely never ever occur, but if it does the * result is that a failure will not be processed correctly. This is only * remotely possible because we are able to detect a connection resource's tcp * connection has failed at any moment after start has completed. The actual * recurring operation is just a connectivity ping. * * basically, we are not guaranteed that the first successful monitor op and * a subsequent failed monitor op will not occur in the same timestamp. We have to * make it look like the operations occurred at separate times though. */ if (op.t_rcchange == op.t_run) { op.t_rcchange++; } } if (cmd->params) { lrmd_key_value_t *tmp; op.params = pcmk__strkey_table(free, free); for (tmp = cmd->params; tmp; tmp = tmp->next) { g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value)); } } op.call_id = cmd->call_id; op.remote_nodename = cmd->owner; lrm_op_callback(&op); if (op.params) { g_hash_table_destroy(op.params); } lrmd__reset_result(&op); } static void update_remaining_timeout(remote_ra_cmd_t * cmd) { cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000; } static gboolean retry_start_cmd_cb(gpointer data) { lrm_state_t *lrm_state = data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd = NULL; int rc = ETIME; if (!ra_data || !ra_data->cur_cmd) { return FALSE; } cmd = ra_data->cur_cmd; if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { return FALSE; } update_remaining_timeout(cmd); if (cmd->remaining_timeout > 0) { rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout); } else { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "Not enough time remains to retry remote connection"); } if (rc != pcmk_rc_ok) { report_remote_ra_result(cmd); if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } ra_data->cur_cmd = NULL; free_cmd(cmd); } else { /* wait for connection event */ } return FALSE; } static gboolean connection_takeover_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; crm_info("takeover event timed out for node %s", cmd->rsc_id); cmd->takeover_timeout_id = 0; lrm_state = lrm_state_find(cmd->rsc_id); handle_remote_ra_stop(lrm_state, cmd); free_cmd(cmd); return FALSE; } static gboolean monitor_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; lrm_state = lrm_state_find(cmd->rsc_id); crm_info("Timed out waiting for remote poke response from %s%s", cmd->rsc_id, (lrm_state? "" : " (no LRM state)")); cmd->monitor_timeout_id = 0; pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "Remote executor did not respond"); if (lrm_state && lrm_state->remote_ra_data) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (ra_data->cur_cmd == cmd) { ra_data->cur_cmd = NULL; } if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } } report_remote_ra_result(cmd); free_cmd(cmd); if(lrm_state) { lrm_state_disconnect(lrm_state); } return FALSE; } static void synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type) { lrmd_event_data_t op = { 0, }; if (lrm_state == NULL) { /* if lrm_state not given assume local */ lrm_state = lrm_state_find(controld_globals.our_nodename); } CRM_ASSERT(lrm_state != NULL); op.type = lrmd_event_exec_complete; op.rsc_id = rsc_id; op.op_type = op_type; op.t_run = (unsigned int) time(NULL); op.t_rcchange = op.t_run; op.call_id = generate_callid(); lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); process_lrm_event(lrm_state, &op, NULL, NULL); } void remote_lrm_op_callback(lrmd_event_data_t * op) { gboolean cmd_handled = FALSE; lrm_state_t *lrm_state = NULL; remote_ra_data_t *ra_data = NULL; remote_ra_cmd_t *cmd = NULL; crm_debug("Processing '%s%s%s' event on remote connection to %s: %s " "(%d) status=%s (%d)", (op->op_type? op->op_type : ""), (op->op_type? " " : ""), lrmd_event_type2str(op->type), op->remote_nodename, services_ocf_exitcode_str(op->rc), op->rc, pcmk_exec_status_str(op->op_status), op->op_status); lrm_state = lrm_state_find(op->remote_nodename); if (!lrm_state || !lrm_state->remote_ra_data) { crm_debug("No state information found for remote connection event"); return; } ra_data = lrm_state->remote_ra_data; if (op->type == lrmd_event_new_client) { // Another client has connected to the remote daemon if (pcmk_is_set(ra_data->status, expect_takeover)) { // Great, we knew this was coming lrm_remote_clear_flags(lrm_state, expect_takeover); lrm_remote_set_flags(lrm_state, takeover_complete); } else { crm_err("Disconnecting from Pacemaker Remote node %s due to " "unexpected client takeover", op->remote_nodename); /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */ /* Do not free lrm_state->conn yet. */ /* It'll be freed in the following stop action. */ lrm_state_disconnect_only(lrm_state); } return; } /* filter all EXEC events up */ if (op->type == lrmd_event_exec_complete) { if (pcmk_is_set(ra_data->status, takeover_complete)) { crm_debug("ignoring event, this connection is taken over by another node"); } else { lrm_op_callback(op); } return; } if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) { if (!pcmk_is_set(ra_data->status, remote_active)) { crm_debug("Disconnection from Pacemaker Remote node %s complete", lrm_state->node_name); } else if (!remote_ra_is_in_maintenance(lrm_state)) { crm_err("Lost connection to Pacemaker Remote node %s", lrm_state->node_name); ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds); ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds); } else { crm_notice("Unmanaged Pacemaker Remote node %s disconnected", lrm_state->node_name); /* Do roughly what a 'stop' on the remote-resource would do */ handle_remote_ra_stop(lrm_state, NULL); remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM); /* now fake the reply of a successful 'stop' */ synthesize_lrmd_success(NULL, lrm_state->node_name, PCMK_ACTION_STOP); } return; } if (!ra_data->cur_cmd) { crm_debug("no event to match"); return; } cmd = ra_data->cur_cmd; /* Start actions and migrate from actions complete after connection * comes back to us. */ if ((op->type == lrmd_event_connect) && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { if (op->connection_rc < 0) { update_remaining_timeout(cmd); if ((op->connection_rc == -ENOKEY) || (op->connection_rc == -EKEYREJECTED)) { // Hard error, don't retry pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM, PCMK_EXEC_ERROR, pcmk_strerror(op->connection_rc)); } else if (cmd->remaining_timeout > 3000) { crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout); g_timeout_add(1000, retry_start_cmd_cb, lrm_state); return; } else { crm_trace("can't reschedule start, remaining timeout too small %d", cmd->remaining_timeout); pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "%s without enough time to retry", pcmk_strerror(op->connection_rc)); } } else { lrm_state_reset_tables(lrm_state, TRUE); pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); lrm_remote_set_flags(lrm_state, remote_active); } crm_debug("Remote connection event matched %s action", cmd->action); report_remote_ra_result(cmd); cmd_handled = TRUE; } else if ((op->type == lrmd_event_poke) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); cmd->monitor_timeout_id = 0; } /* Only report success the first time, after that only worry about failures. * For this function, if we get the poke pack, it is always a success. Pokes * only fail if the send fails, or the response times out. */ if (!pcmk_is_set(cmd->status, cmd_reported_success)) { pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); cmd_set_flags(cmd, cmd_reported_success); } crm_debug("Remote poke event matched %s action", cmd->action); /* success, keep rescheduling if interval is present. */ if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) { ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd); cmd->interval_id = g_timeout_add(cmd->interval_ms, recurring_helper, cmd); cmd = NULL; /* prevent free */ } cmd_handled = TRUE; } else if ((op->type == lrmd_event_disconnect) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { if (pcmk_is_set(ra_data->status, remote_active) && !pcmk_is_set(cmd->status, cmd_cancel)) { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Remote connection unexpectedly dropped " "during monitor"); report_remote_ra_result(cmd); crm_err("Remote connection to %s unexpectedly dropped during monitor", lrm_state->node_name); } cmd_handled = TRUE; } else if ((op->type == lrmd_event_new_client) && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { handle_remote_ra_stop(lrm_state, cmd); cmd_handled = TRUE; } else { crm_debug("Event did not match %s action", ra_data->cur_cmd->action); } if (cmd_handled) { ra_data->cur_cmd = NULL; if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } free_cmd(cmd); } } static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd) { remote_ra_data_t *ra_data = NULL; CRM_ASSERT(lrm_state); ra_data = lrm_state->remote_ra_data; if (!pcmk_is_set(ra_data->status, takeover_complete)) { /* delete pending ops when ever the remote connection is intentionally stopped */ g_hash_table_remove_all(lrm_state->active_ops); } else { /* we no longer hold the history if this connection has been migrated, * however, we keep metadata cache for future use */ lrm_state_reset_tables(lrm_state, FALSE); } lrm_remote_clear_flags(lrm_state, remote_active); lrm_state_disconnect(lrm_state); if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } ra_data->cmds = NULL; ra_data->recurring_cmds = NULL; ra_data->cur_cmd = NULL; if (cmd) { pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); } } // \return Standard Pacemaker return code static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms) { const char *server = NULL; lrmd_key_value_t *tmp = NULL; int port = 0; int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms; int rc = pcmk_rc_ok; for (tmp = cmd->params; tmp; tmp = tmp->next) { if (pcmk__strcase_any_of(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR, XML_RSC_ATTR_REMOTE_RA_SERVER, NULL)) { server = tmp->value; } else if (pcmk__str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT, pcmk__str_casei)) { port = atoi(tmp->value); } else if (pcmk__str_eq(tmp->key, CRM_META "_" XML_RSC_ATTR_CONTAINER, pcmk__str_casei)) { lrm_remote_set_flags(lrm_state, controlling_guest); } } rc = controld_connect_remote_executor(lrm_state, server, port, timeout_used); if (rc != pcmk_rc_ok) { pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Could not connect to Pacemaker Remote node %s: %s", lrm_state->node_name, pcmk_rc_str(rc)); } return rc; } static gboolean handle_remote_ra_exec(gpointer user_data) { int rc = 0; lrm_state_t *lrm_state = user_data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd; GList *first = NULL; if (ra_data->cur_cmd) { /* still waiting on previous cmd */ return TRUE; } while (ra_data->cmds) { first = ra_data->cmds; cmd = first->data; if (cmd->delay_id) { /* still waiting for start delay timer to trip */ return TRUE; } ra_data->cmds = g_list_remove_link(ra_data->cmds, first); g_list_free_1(first); if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete); if (handle_remote_ra_start(lrm_state, cmd, cmd->timeout) == pcmk_rc_ok) { /* take care of this later when we get async connection result */ crm_debug("Initiated async remote connection, %s action will complete after connect event", cmd->action); ra_data->cur_cmd = cmd; return TRUE; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) { if (lrm_state_is_connected(lrm_state) == TRUE) { rc = lrm_state_poke_connection(lrm_state); if (rc < 0) { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, pcmk_strerror(rc)); } } else { rc = -1; pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING, PCMK_EXEC_DONE, "Remote connection inactive"); } if (rc == 0) { crm_debug("Poked Pacemaker Remote at node %s, waiting for async response", cmd->rsc_id); ra_data->cur_cmd = cmd; cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd); return TRUE; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) { if (pcmk_is_set(ra_data->status, expect_takeover)) { /* briefly wait on stop for the takeover event to occur. If the * takeover event does not occur during the wait period, that's fine. * It just means that the remote-node's lrm_status section is going to get * cleared which will require all the resources running in the remote-node * to be explicitly re-detected via probe actions. If the takeover does occur * successfully, then we can leave the status section intact. */ cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd); ra_data->cur_cmd = cmd; return TRUE; } handle_remote_ra_stop(lrm_state, cmd); } else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) { lrm_remote_clear_flags(lrm_state, takeover_complete); lrm_remote_set_flags(lrm_state, expect_takeover); pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, NULL)) { /* Currently the only reloadable parameter is reconnect_interval, * which is only used by the scheduler via the CIB, so reloads are a * no-op. * * @COMPAT DC <2.1.0: We only need to check for "reload" in case * we're in a rolling upgrade with a DC scheduling "reload" instead * of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway, * so this would work for that purpose as well. */ pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); } free_cmd(cmd); } return TRUE; } static void remote_ra_data_init(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = NULL; if (lrm_state->remote_ra_data) { return; } ra_data = calloc(1, sizeof(remote_ra_data_t)); ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state); lrm_state->remote_ra_data = ra_data; } void remote_ra_cleanup(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (!ra_data) { return; } if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } mainloop_destroy_trigger(ra_data->work); free(ra_data); lrm_state->remote_ra_data = NULL; } gboolean is_remote_lrmd_ra(const char *agent, const char *provider, const char *id) { if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) { return TRUE; } if ((id != NULL) && (lrm_state_find(id) != NULL) && !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) { return TRUE; } return FALSE; } lrmd_rsc_info_t * remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id) { lrmd_rsc_info_t *info = NULL; if ((lrm_state_find(rsc_id))) { info = calloc(1, sizeof(lrmd_rsc_info_t)); info->id = strdup(rsc_id); info->type = strdup(REMOTE_LRMD_RA); info->standard = strdup(PCMK_RESOURCE_CLASS_OCF); info->provider = strdup("pacemaker"); } return info; } static gboolean is_remote_ra_supported_action(const char *action) { return pcmk__str_any_of(action, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, PCMK_ACTION_RELOAD_AGENT, PCMK_ACTION_RELOAD, NULL); } static GList * fail_all_monitor_cmds(GList * list) { GList *rm_list = NULL; remote_ra_cmd_t *cmd = NULL; GList *gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { rm_list = g_list_append(rm_list, cmd); } } for (gIter = rm_list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Lost connection to remote executor"); crm_trace("Pre-emptively failing %s %s (interval=%u, %s)", cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata); report_remote_ra_result(cmd); list = g_list_remove(list, cmd); free_cmd(cmd); } /* frees only the list data, not the cmds */ g_list_free(rm_list); return list; } static GList * remove_cmd(GList * list, const char *action, guint interval_ms) { remote_ra_cmd_t *cmd = NULL; GList *gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, action, pcmk__str_casei)) { break; } cmd = NULL; } if (cmd) { list = g_list_remove(list, cmd); free_cmd(cmd); } return list; } int remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action, guint interval_ms) { lrm_state_t *connection_rsc = NULL; remote_ra_data_t *ra_data = NULL; connection_rsc = lrm_state_find(rsc_id); if (!connection_rsc || !connection_rsc->remote_ra_data) { return -EINVAL; } ra_data = connection_rsc->remote_ra_data; ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms); ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action, interval_ms); if (ra_data->cur_cmd && (ra_data->cur_cmd->interval_ms == interval_ms) && (pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) { cmd_set_flags(ra_data->cur_cmd, cmd_cancel); } return 0; } static remote_ra_cmd_t * handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms, const char *userdata) { GList *gIter = NULL; remote_ra_cmd_t *cmd = NULL; /* there are 3 places a potential duplicate monitor operation * could exist. * 1. recurring_cmds list. where the op is waiting for its next interval * 2. cmds list, where the op is queued to get executed immediately * 3. cur_cmd, which means the monitor op is in flight right now. */ if (interval_ms == 0) { return NULL; } if (ra_data->cur_cmd && !pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) && (ra_data->cur_cmd->interval_ms == interval_ms) && pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { cmd = ra_data->cur_cmd; goto handle_dup; } for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { goto handle_dup; } } for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { goto handle_dup; } } return NULL; handle_dup: crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT, cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms); /* update the userdata */ if (userdata) { free(cmd->userdata); cmd->userdata = strdup(userdata); } /* if we've already reported success, generate a new call id */ if (pcmk_is_set(cmd->status, cmd_reported_success)) { cmd->start_time = time(NULL); cmd->call_id = generate_callid(); cmd_clear_flags(cmd, cmd_reported_success); } /* if we have an interval_id set, that means we are in the process of * waiting for this cmd's next interval. instead of waiting, cancel * the timer and execute the action immediately */ if (cmd->interval_id) { g_source_remove(cmd->interval_id); cmd->interval_id = 0; recurring_helper(cmd); } return cmd; } /*! * \internal * \brief Execute an action using the (internal) ocf:pacemaker:remote agent * * \param[in] lrm_state Executor state object for remote connection * \param[in] rsc_id Connection resource ID * \param[in] action Action to execute * \param[in] userdata String to copy and pass to execution callback * \param[in] interval_ms Action interval (in milliseconds) * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] start_delay_ms Delay (in milliseconds) before executing action * \param[in,out] params Connection resource parameters * \param[out] call_id Where to store call ID on success * * \return Standard Pacemaker return code * \note This takes ownership of \p params, which should not be used or freed * after calling this function. */ int controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id, const char *action, const char *userdata, guint interval_ms, int timeout_ms, int start_delay_ms, lrmd_key_value_t *params, int *call_id) { lrm_state_t *connection_rsc = NULL; remote_ra_cmd_t *cmd = NULL; remote_ra_data_t *ra_data = NULL; *call_id = 0; CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL) && (userdata != NULL) && (call_id != NULL), lrmd_key_value_freeall(params); return EINVAL); if (!is_remote_ra_supported_action(action)) { lrmd_key_value_freeall(params); return EOPNOTSUPP; } connection_rsc = lrm_state_find(rsc_id); if (connection_rsc == NULL) { lrmd_key_value_freeall(params); return ENOTCONN; } remote_ra_data_init(connection_rsc); ra_data = connection_rsc->remote_ra_data; cmd = handle_dup_monitor(ra_data, interval_ms, userdata); if (cmd) { *call_id = cmd->call_id; lrmd_key_value_freeall(params); return pcmk_rc_ok; } cmd = calloc(1, sizeof(remote_ra_cmd_t)); if (cmd == NULL) { lrmd_key_value_freeall(params); return ENOMEM; } cmd->owner = strdup(lrm_state->node_name); cmd->rsc_id = strdup(rsc_id); cmd->action = strdup(action); cmd->userdata = strdup(userdata); if ((cmd->owner == NULL) || (cmd->rsc_id == NULL) || (cmd->action == NULL) || (cmd->userdata == NULL)) { free_cmd(cmd); lrmd_key_value_freeall(params); return ENOMEM; } cmd->interval_ms = interval_ms; cmd->timeout = timeout_ms; cmd->start_delay = start_delay_ms; cmd->params = params; cmd->start_time = time(NULL); cmd->call_id = generate_callid(); if (cmd->start_delay) { cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); *call_id = cmd->call_id; return pcmk_rc_ok; } /*! * \internal * \brief Immediately fail all monitors of a remote node, if proxied here * * \param[in] node_name Name of pacemaker_remote node */ void remote_ra_fail(const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (lrm_state && lrm_state_is_connected(lrm_state)) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; crm_info("Failing monitors on Pacemaker Remote node %s", node_name); ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds); ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds); } } /* A guest node fencing implied by host fencing looks like: * * * * * * * */ #define XPATH_PSEUDO_FENCE "/" XML_GRAPH_TAG_PSEUDO_EVENT \ "[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \ "/" XML_CIB_TAG_NODE /*! * \internal * \brief Check a pseudo-action for Pacemaker Remote node side effects * * \param[in,out] xml XML of pseudo-action to check */ void remote_ra_process_pseudo(xmlNode *xml) { xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE); if (numXpathResults(search) == 1) { xmlNode *result = getXpathResult(search, 0); /* Normally, we handle the necessary side effects of a guest node stop * action when reporting the remote agent's result. However, if the stop * is implied due to fencing, it will be a fencing pseudo-event, and * there won't be a result to report. Handle that case here. * * This will result in a duplicate call to remote_node_down() if the * guest stop was real instead of implied, but that shouldn't hurt. * * There is still one corner case that isn't handled: if a guest node * isn't running any resources when its host is fenced, it will appear * to be cleanly stopped, so there will be no pseudo-fence, and our * peer cache state will be incorrect unless and until the guest is * recovered. */ if (result) { const char *remote = ID(result); if (remote) { remote_node_down(remote, DOWN_ERASE_LRM); } } } freeXpathObject(search); } static void remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance) { xmlNode *update, *state; int call_opt; crm_node_t *node; call_opt = crmd_cib_smart_opt(); node = crm_remote_peer_get(lrm_state->node_name); CRM_CHECK(node != NULL, return); update = create_xml_node(NULL, XML_CIB_TAG_STATUS); state = create_node_state_update(node, node_update_none, update, __func__); crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0"); if (controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL) == pcmk_rc_ok) { /* TODO: still not 100% sure that async update will succeed ... */ if (maintenance) { lrm_remote_set_flags(lrm_state, remote_in_maint); } else { lrm_remote_clear_flags(lrm_state, remote_in_maint); } } free_xml(update); } #define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \ "[@" XML_LRM_ATTR_TASK "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \ XML_GRAPH_TAG_MAINTENANCE /*! * \internal * \brief Check a pseudo-action holding updates for maintenance state * * \param[in,out] xml XML of pseudo-action to check */ void remote_ra_process_maintenance_nodes(xmlNode *xml) { xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE); if (numXpathResults(search) == 1) { xmlNode *node; int cnt = 0, cnt_remote = 0; for (node = first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE); node != NULL; node = crm_next_same_xml(node)) { lrm_state_t *lrm_state = lrm_state_find(ID(node)); cnt++; if (lrm_state && lrm_state->remote_ra_data && pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) { int is_maint; cnt_remote++; pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE), &is_maint, 0); remote_ra_maintenance(lrm_state, is_maint); } } - crm_trace("Action holds %d nodes (%d remotes found) " - "adjusting maintenance-mode", cnt, cnt_remote); + crm_trace("Action holds %d nodes (%d remotes found) adjusting " + PCMK_OPT_MAINTENANCE_MODE, + cnt, cnt_remote); } freeXpathObject(search); } gboolean remote_ra_is_in_maintenance(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; return pcmk_is_set(ra_data->status, remote_in_maint); } gboolean remote_ra_controlling_guest(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; return pcmk_is_set(ra_data->status, controlling_guest); } diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h index 0ec853b7b6..610f581969 100644 --- a/include/crm/msg_xml.h +++ b/include/crm/msg_xml.h @@ -1,498 +1,499 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_MSG_XML__H # define PCMK__CRM_MSG_XML__H # include #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) #include #endif #ifdef __cplusplus extern "C" { #endif /* This file defines constants for various XML syntax (mainly element and * attribute names). * * For consistency, new constants should start with "PCMK_", followed by "XE" * for XML element names, "XA" for XML attribute names, and "META" for meta * attribute names. Old names that don't follow this policy should eventually be * deprecated and replaced with names that do. */ /* * XML elements */ #define PCMK_XE_DATE_EXPRESSION "date_expression" #define PCMK_XE_OP_EXPRESSION "op_expression" /* This has been deprecated as a CIB element (an alias for with * "promotable" set to "true") since 2.0.0. */ #define PCMK_XE_PROMOTABLE_LEGACY "master" #define PCMK_XE_RSC_EXPRESSION "rsc_expression" /* * XML attributes */ #define PCMK_XA_FORMAT "format" /* These have been deprecated as CIB element attributes (aliases for * "promoted-max" and "promoted-node-max") since 2.0.0. */ #define PCMK_XA_PROMOTED_MAX_LEGACY "master-max" #define PCMK_XA_PROMOTED_NODE_MAX_LEGACY "master-node-max" /* * Meta attributes */ #define PCMK_META_CLONE_MAX "clone-max" #define PCMK_META_CLONE_MIN "clone-min" #define PCMK_META_CLONE_NODE_MAX "clone-node-max" #define PCMK_META_ENABLED "enabled" #define PCMK_META_FAILURE_TIMEOUT "failure-timeout" #define PCMK_META_MIGRATION_THRESHOLD "migration-threshold" #define PCMK_META_PROMOTED_MAX "promoted-max" #define PCMK_META_PROMOTED_NODE_MAX "promoted-node-max" /* * Cluster options */ #define PCMK_OPT_BATCH_LIMIT "batch-limit" #define PCMK_OPT_CLUSTER_DELAY "cluster-delay" #define PCMK_OPT_CLUSTER_INFRASTRUCTURE "cluster-infrastructure" #define PCMK_OPT_CLUSTER_IPC_LIMIT "cluster-ipc-limit" #define PCMK_OPT_CLUSTER_NAME "cluster-name" #define PCMK_OPT_CLUSTER_RECHECK_INTERVAL "cluster-recheck-interval" #define PCMK_OPT_CONCURRENT_FENCING "concurrent-fencing" #define PCMK_OPT_DC_DEADTIME "dc-deadtime" #define PCMK_OPT_DC_VERSION "dc-version" #define PCMK_OPT_ELECTION_TIMEOUT "election-timeout" #define PCMK_OPT_ENABLE_ACL "enable-acl" #define PCMK_OPT_ENABLE_STARTUP_PROBES "enable-startup-probes" #define PCMK_OPT_FENCE_REACTION "fence-reaction" #define PCMK_OPT_HAVE_WATCHDOG "have-watchdog" #define PCMK_OPT_JOIN_FINALIZATION_TIMEOUT "join-finalization-timeout" #define PCMK_OPT_JOIN_INTEGRATION_TIMEOUT "join-integration-timeout" #define PCMK_OPT_LOAD_THRESHOLD "load-threshold" +#define PCMK_OPT_MAINTENANCE_MODE "maintenance-mode" /* * Older constants that don't follow current naming */ # ifndef F_ORIG # define F_ORIG "src" # endif # ifndef F_SEQ # define F_SEQ "seq" # endif # ifndef F_SUBTYPE # define F_SUBTYPE "subt" # endif # ifndef F_TYPE # define F_TYPE "t" # endif # ifndef F_CLIENTNAME # define F_CLIENTNAME "cn" # endif # ifndef F_XML_TAGNAME # define F_XML_TAGNAME "__name__" # endif # ifndef T_CRM # define T_CRM "crmd" # endif # ifndef T_ATTRD # define T_ATTRD "attrd" # endif # define CIB_OPTIONS_FIRST "cib-bootstrap-options" # define F_CRM_DATA "crm_xml" # define F_CRM_TASK "crm_task" # define F_CRM_HOST_TO "crm_host_to" # define F_CRM_MSG_TYPE F_SUBTYPE # define F_CRM_SYS_TO "crm_sys_to" # define F_CRM_SYS_FROM "crm_sys_from" # define F_CRM_HOST_FROM F_ORIG # define F_CRM_REFERENCE XML_ATTR_REFERENCE # define F_CRM_VERSION XML_ATTR_VERSION # define F_CRM_ORIGIN "origin" # define F_CRM_USER "crm_user" # define F_CRM_JOIN_ID "join_id" # define F_CRM_DC_LEAVING "dc-leaving" # define F_CRM_ELECTION_ID "election-id" # define F_CRM_ELECTION_AGE_S "election-age-sec" # define F_CRM_ELECTION_AGE_US "election-age-nano-sec" # define F_CRM_ELECTION_OWNER "election-owner" # define F_CRM_TGRAPH "crm-tgraph-file" # define F_CRM_TGRAPH_INPUT "crm-tgraph-in" # define F_CRM_THROTTLE_MODE "crm-limit-mode" # define F_CRM_THROTTLE_MAX "crm-limit-max" /*---- Common tags/attrs */ # define XML_DIFF_MARKER "__crm_diff_marker__" # define XML_TAG_CIB "cib" # define XML_TAG_FAILED "failed" # define XML_ATTR_CRM_VERSION "crm_feature_set" # define XML_ATTR_DIGEST "digest" # define XML_ATTR_VALIDATION "validate-with" # define XML_ATTR_QUORUM_PANIC "no-quorum-panic" # define XML_ATTR_HAVE_QUORUM "have-quorum" # define XML_ATTR_GENERATION "epoch" # define XML_ATTR_GENERATION_ADMIN "admin_epoch" # define XML_ATTR_NUMUPDATES "num_updates" # define XML_ATTR_TIMEOUT "timeout" # define XML_ATTR_ORIGIN "crm-debug-origin" # define XML_ATTR_TSTAMP "crm-timestamp" # define XML_CIB_ATTR_WRITTEN "cib-last-written" # define XML_ATTR_VERSION "version" # define XML_ATTR_DESC "description" # define XML_ATTR_ID "id" # define XML_ATTR_NAME "name" # define XML_ATTR_IDREF "id-ref" # define XML_ATTR_ID_LONG "long-id" # define XML_ATTR_TYPE "type" # define XML_ATTR_OP "op" # define XML_ATTR_DC_UUID "dc-uuid" # define XML_ATTR_UPDATE_ORIG "update-origin" # define XML_ATTR_UPDATE_CLIENT "update-client" # define XML_ATTR_UPDATE_USER "update-user" # define XML_BOOLEAN_TRUE "true" # define XML_BOOLEAN_FALSE "false" # define XML_BOOLEAN_YES XML_BOOLEAN_TRUE # define XML_BOOLEAN_NO XML_BOOLEAN_FALSE # define XML_TAG_OPTIONS "options" /*---- top level tags/attrs */ # define XML_ATTR_REQUEST "request" # define XML_ATTR_RESPONSE "response" # define XML_ATTR_UNAME "uname" # define XML_ATTR_REFERENCE "reference" # define XML_CRM_TAG_PING "ping_response" # define XML_PING_ATTR_STATUS "result" # define XML_PING_ATTR_SYSFROM "crm_subsystem" # define XML_PING_ATTR_CRMDSTATE "crmd_state" # define XML_PING_ATTR_PACEMAKERDSTATE "pacemakerd_state" # define XML_PING_ATTR_PACEMAKERDSTATE_INIT "init" # define XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS "starting_daemons" # define XML_PING_ATTR_PACEMAKERDSTATE_WAITPING "wait_for_ping" # define XML_PING_ATTR_PACEMAKERDSTATE_RUNNING "running" # define XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN "shutting_down" # define XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE "shutdown_complete" # define XML_PING_ATTR_PACEMAKERDSTATE_REMOTE "remote" # define XML_FAIL_TAG_CIB "failed_update" # define XML_FAILCIB_ATTR_ID "id" # define XML_FAILCIB_ATTR_OBJTYPE "object_type" # define XML_FAILCIB_ATTR_OP "operation" # define XML_FAILCIB_ATTR_REASON "reason" /*---- CIB specific tags/attrs */ # define XML_CIB_TAG_SECTION_ALL "all" # define XML_CIB_TAG_CONFIGURATION "configuration" # define XML_CIB_TAG_STATUS "status" # define XML_CIB_TAG_RESOURCES "resources" # define XML_CIB_TAG_NODES "nodes" # define XML_CIB_TAG_CONSTRAINTS "constraints" # define XML_CIB_TAG_CRMCONFIG "crm_config" # define XML_CIB_TAG_OPCONFIG "op_defaults" # define XML_CIB_TAG_RSCCONFIG "rsc_defaults" # define XML_CIB_TAG_ACLS "acls" # define XML_CIB_TAG_ALERTS "alerts" # define XML_CIB_TAG_ALERT "alert" # define XML_CIB_TAG_ALERT_RECIPIENT "recipient" # define XML_CIB_TAG_ALERT_SELECT "select" # define XML_CIB_TAG_ALERT_ATTRIBUTES "select_attributes" # define XML_CIB_TAG_ALERT_FENCING "select_fencing" # define XML_CIB_TAG_ALERT_NODES "select_nodes" # define XML_CIB_TAG_ALERT_RESOURCES "select_resources" # define XML_CIB_TAG_ALERT_ATTR "attribute" # define XML_CIB_TAG_STATE "node_state" # define XML_CIB_TAG_NODE "node" # define XML_CIB_TAG_NVPAIR "nvpair" # define XML_CIB_TAG_PROPSET "cluster_property_set" # define XML_TAG_ATTR_SETS "instance_attributes" # define XML_TAG_META_SETS "meta_attributes" # define XML_TAG_ATTRS "attributes" # define XML_TAG_PARAMS "parameters" # define XML_TAG_PARAM "param" # define XML_TAG_UTILIZATION "utilization" # define XML_TAG_RESOURCE_REF "resource_ref" # define XML_CIB_TAG_RESOURCE "primitive" # define XML_CIB_TAG_GROUP "group" # define XML_CIB_TAG_INCARNATION "clone" # define XML_CIB_TAG_CONTAINER "bundle" # define XML_CIB_TAG_RSC_TEMPLATE "template" # define XML_RSC_ATTR_TARGET "container-attribute-target" # define XML_RSC_ATTR_RESTART "restart-type" # define XML_RSC_ATTR_ORDERED "ordered" # define XML_RSC_ATTR_INTERLEAVE "interleave" # define XML_RSC_ATTR_INCARNATION "clone" # define XML_RSC_ATTR_PROMOTABLE "promotable" # define XML_RSC_ATTR_MANAGED "is-managed" # define XML_RSC_ATTR_TARGET_ROLE "target-role" # define XML_RSC_ATTR_UNIQUE "globally-unique" # define XML_RSC_ATTR_NOTIFY "notify" # define XML_RSC_ATTR_STICKINESS "resource-stickiness" # define XML_RSC_ATTR_MULTIPLE "multiple-active" # define XML_RSC_ATTR_REQUIRES "requires" # define XML_RSC_ATTR_CONTAINER "container" # define XML_RSC_ATTR_INTERNAL_RSC "internal_rsc" # define XML_RSC_ATTR_MAINTENANCE "maintenance" # define XML_RSC_ATTR_REMOTE_NODE "remote-node" # define XML_RSC_ATTR_CLEAR_OP "clear_failure_op" # define XML_RSC_ATTR_CLEAR_INTERVAL "clear_failure_interval" # define XML_RSC_ATTR_REMOTE_RA_ADDR "addr" # define XML_RSC_ATTR_REMOTE_RA_SERVER "server" # define XML_RSC_ATTR_REMOTE_RA_PORT "port" # define XML_RSC_ATTR_CRITICAL "critical" # define XML_REMOTE_ATTR_RECONNECT_INTERVAL "reconnect_interval" # define XML_OP_ATTR_ON_FAIL "on-fail" # define XML_OP_ATTR_START_DELAY "start-delay" # define XML_OP_ATTR_ALLOW_MIGRATE "allow-migrate" # define XML_OP_ATTR_ORIGIN "interval-origin" # define XML_OP_ATTR_PENDING "record-pending" # define XML_OP_ATTR_DIGESTS_ALL "digests-all" # define XML_OP_ATTR_DIGESTS_SECURE "digests-secure" # define XML_CIB_TAG_LRM "lrm" # define XML_LRM_TAG_RESOURCES "lrm_resources" # define XML_LRM_TAG_RESOURCE "lrm_resource" # define XML_LRM_TAG_RSC_OP "lrm_rsc_op" # define XML_AGENT_ATTR_CLASS "class" # define XML_AGENT_ATTR_PROVIDER "provider" //! \deprecated Do not use (will be removed in a future release) # define XML_CIB_ATTR_REPLACE "replace" # define XML_CIB_ATTR_PRIORITY "priority" # define XML_NODE_IS_REMOTE "remote_node" # define XML_NODE_IS_FENCED "node_fenced" # define XML_NODE_IS_MAINTENANCE "node_in_maintenance" # define XML_CIB_ATTR_SHUTDOWN "shutdown" /* Aside from being an old name for the executor, LRM is a misnomer here because * the controller and scheduler use these to track actions, which are not always * executor operations. */ // XML attribute that takes interval specification (user-facing configuration) # define XML_LRM_ATTR_INTERVAL "interval" // XML attribute that takes interval in milliseconds (daemon APIs) // (identical value as above, but different constant allows clearer code intent) # define XML_LRM_ATTR_INTERVAL_MS XML_LRM_ATTR_INTERVAL # define XML_LRM_ATTR_TASK "operation" # define XML_LRM_ATTR_TASK_KEY "operation_key" # define XML_LRM_ATTR_TARGET "on_node" # define XML_LRM_ATTR_TARGET_UUID "on_node_uuid" /*! Actions to be executed on Pacemaker Remote nodes are routed through the * controller on the cluster node hosting the remote connection. That cluster * node is considered the router node for the action. */ # define XML_LRM_ATTR_ROUTER_NODE "router_node" # define XML_LRM_ATTR_RSCID "rsc-id" # define XML_LRM_ATTR_OPSTATUS "op-status" # define XML_LRM_ATTR_RC "rc-code" # define XML_LRM_ATTR_CALLID "call-id" # define XML_LRM_ATTR_OP_DIGEST "op-digest" # define XML_LRM_ATTR_OP_RESTART "op-force-restart" # define XML_LRM_ATTR_OP_SECURE "op-secure-params" # define XML_LRM_ATTR_RESTART_DIGEST "op-restart-digest" # define XML_LRM_ATTR_SECURE_DIGEST "op-secure-digest" # define XML_LRM_ATTR_EXIT_REASON "exit-reason" # define XML_RSC_OP_LAST_CHANGE "last-rc-change" # define XML_RSC_OP_T_EXEC "exec-time" # define XML_RSC_OP_T_QUEUE "queue-time" # define XML_LRM_ATTR_MIGRATE_SOURCE "migrate_source" # define XML_LRM_ATTR_MIGRATE_TARGET "migrate_target" # define XML_TAG_GRAPH "transition_graph" # define XML_GRAPH_TAG_RSC_OP "rsc_op" # define XML_GRAPH_TAG_PSEUDO_EVENT "pseudo_event" # define XML_GRAPH_TAG_CRM_EVENT "crm_event" # define XML_GRAPH_TAG_DOWNED "downed" # define XML_GRAPH_TAG_MAINTENANCE "maintenance" # define XML_TAG_RULE "rule" # define XML_RULE_ATTR_SCORE "score" # define XML_RULE_ATTR_SCORE_ATTRIBUTE "score-attribute" # define XML_RULE_ATTR_ROLE "role" # define XML_RULE_ATTR_BOOLEAN_OP "boolean-op" # define XML_TAG_EXPRESSION "expression" # define XML_EXPR_ATTR_ATTRIBUTE "attribute" # define XML_EXPR_ATTR_OPERATION "operation" # define XML_EXPR_ATTR_VALUE "value" # define XML_EXPR_ATTR_TYPE "type" # define XML_EXPR_ATTR_VALUE_SOURCE "value-source" # define XML_CONS_TAG_RSC_DEPEND "rsc_colocation" # define XML_CONS_TAG_RSC_ORDER "rsc_order" # define XML_CONS_TAG_RSC_LOCATION "rsc_location" # define XML_CONS_TAG_RSC_TICKET "rsc_ticket" # define XML_CONS_TAG_RSC_SET "resource_set" # define XML_CONS_ATTR_SYMMETRICAL "symmetrical" # define XML_LOCATION_ATTR_DISCOVERY "resource-discovery" # define XML_COLOC_ATTR_SOURCE "rsc" # define XML_COLOC_ATTR_SOURCE_ROLE "rsc-role" # define XML_COLOC_ATTR_TARGET "with-rsc" # define XML_COLOC_ATTR_TARGET_ROLE "with-rsc-role" # define XML_COLOC_ATTR_NODE_ATTR "node-attribute" # define XML_COLOC_ATTR_INFLUENCE "influence" //! \deprecated Deprecated since 2.1.5 # define XML_COLOC_ATTR_SOURCE_INSTANCE "rsc-instance" //! \deprecated Deprecated since 2.1.5 # define XML_COLOC_ATTR_TARGET_INSTANCE "with-rsc-instance" # define XML_LOC_ATTR_SOURCE "rsc" # define XML_LOC_ATTR_SOURCE_PATTERN "rsc-pattern" # define XML_ORDER_ATTR_FIRST "first" # define XML_ORDER_ATTR_THEN "then" # define XML_ORDER_ATTR_FIRST_ACTION "first-action" # define XML_ORDER_ATTR_THEN_ACTION "then-action" # define XML_ORDER_ATTR_KIND "kind" //! \deprecated Deprecated since 2.1.5 # define XML_ORDER_ATTR_FIRST_INSTANCE "first-instance" //! \deprecated Deprecated since 2.1.5 # define XML_ORDER_ATTR_THEN_INSTANCE "then-instance" # define XML_TICKET_ATTR_TICKET "ticket" # define XML_TICKET_ATTR_LOSS_POLICY "loss-policy" # define XML_NVPAIR_ATTR_NAME "name" # define XML_NVPAIR_ATTR_VALUE "value" # define XML_NODE_ATTR_RSC_DISCOVERY "resource-discovery-enabled" # define XML_CONFIG_ATTR_FORCE_QUIT "shutdown-escalation" # define XML_CONFIG_ATTR_SHUTDOWN_LOCK "shutdown-lock" # define XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT "shutdown-lock-limit" # define XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY "priority-fencing-delay" # define XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT "node-pending-timeout" # define XML_ALERT_ATTR_PATH "path" # define XML_ALERT_ATTR_TIMEOUT "timeout" # define XML_ALERT_ATTR_TSTAMP_FORMAT "timestamp-format" # define XML_ALERT_ATTR_REC_VALUE "value" # define XML_CIB_TAG_GENERATION_TUPPLE "generation_tuple" # define XML_ATTR_TRANSITION_MAGIC "transition-magic" # define XML_ATTR_TRANSITION_KEY "transition-key" # define XML_ATTR_TE_NOWAIT "op_no_wait" # define XML_ATTR_TE_TARGET_RC "op_target_rc" # define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes" //! \deprecated Do not use (will be removed in a future release) # define XML_TAG_DIFF_ADDED "diff-added" //! \deprecated Do not use (will be removed in a future release) # define XML_TAG_DIFF_REMOVED "diff-removed" # define XML_ACL_TAG_USER "acl_target" # define XML_ACL_TAG_USERv1 "acl_user" # define XML_ACL_TAG_GROUP "acl_group" # define XML_ACL_TAG_ROLE "acl_role" # define XML_ACL_TAG_PERMISSION "acl_permission" # define XML_ACL_TAG_ROLE_REF "role" # define XML_ACL_TAG_ROLE_REFv1 "role_ref" # define XML_ACL_ATTR_KIND "kind" # define XML_ACL_TAG_READ "read" # define XML_ACL_TAG_WRITE "write" # define XML_ACL_TAG_DENY "deny" # define XML_ACL_ATTR_REF "reference" # define XML_ACL_ATTR_REFv1 "ref" # define XML_ACL_ATTR_TAG "object-type" # define XML_ACL_ATTR_TAGv1 "tag" # define XML_ACL_ATTR_XPATH "xpath" # define XML_ACL_ATTR_ATTRIBUTE "attribute" # define XML_CIB_TAG_TICKETS "tickets" # define XML_CIB_TAG_TICKET_STATE "ticket_state" # define XML_CIB_TAG_TAGS "tags" # define XML_CIB_TAG_TAG "tag" # define XML_CIB_TAG_OBJ_REF "obj_ref" # define XML_TAG_FENCING_TOPOLOGY "fencing-topology" # define XML_TAG_FENCING_LEVEL "fencing-level" # define XML_ATTR_STONITH_INDEX "index" # define XML_ATTR_STONITH_TARGET "target" # define XML_ATTR_STONITH_TARGET_VALUE "target-value" # define XML_ATTR_STONITH_TARGET_PATTERN "target-pattern" # define XML_ATTR_STONITH_TARGET_ATTRIBUTE "target-attribute" # define XML_ATTR_STONITH_DEVICES "devices" # define XML_TAG_DIFF "diff" # define XML_DIFF_VERSION "version" # define XML_DIFF_VSOURCE "source" # define XML_DIFF_VTARGET "target" # define XML_DIFF_CHANGE "change" # define XML_DIFF_LIST "change-list" # define XML_DIFF_ATTR "change-attr" # define XML_DIFF_RESULT "change-result" # define XML_DIFF_OP "operation" # define XML_DIFF_PATH "path" # define XML_DIFF_POSITION "position" # define ID(x) crm_element_value(x, XML_ATTR_ID) #ifdef __cplusplus } #endif #endif diff --git a/lib/pengine/common.c b/lib/pengine/common.c index e3c4686fcd..2012be2890 100644 --- a/lib/pengine/common.c +++ b/lib/pengine/common.c @@ -1,623 +1,623 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include static bool check_placement_strategy(const char *value) { return pcmk__strcase_any_of(value, "default", "utilization", "minimal", "balanced", NULL); } static pcmk__cluster_option_t pe_opts[] = { /* name, old name, type, allowed values, * default value, validator, * short description, * long description */ { "no-quorum-policy", NULL, "select", "stop, freeze, ignore, demote, suicide", "stop", pcmk__valid_quorum, N_("What to do when the cluster does not have quorum"), NULL }, { "symmetric-cluster", NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("Whether resources can run on any node by default"), NULL }, { - "maintenance-mode", NULL, "boolean", NULL, + PCMK_OPT_MAINTENANCE_MODE, NULL, "boolean", NULL, "false", pcmk__valid_boolean, N_("Whether the cluster should refrain from monitoring, starting, " "and stopping resources"), NULL }, { "start-failure-is-fatal", NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("Whether a start failure should prevent a resource from being " "recovered on the same node"), N_("When true, the cluster will immediately ban a resource from a node " "if it fails to start there. When false, the cluster will instead " "check the resource's fail count against its migration-threshold.") }, { PCMK_OPT_ENABLE_STARTUP_PROBES, NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("Whether the cluster should check for active resources during start-up"), NULL }, { XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL, "false", pcmk__valid_boolean, N_("Whether to lock resources to a cleanly shut down node"), N_("When true, resources active on a node when it is cleanly shut down " "are kept \"locked\" to that node (not allowed to run elsewhere) " "until they start again on that node after it rejoins (or for at " "most shutdown-lock-limit, if set). Stonith resources and " "Pacemaker Remote connections are never locked. Clone and bundle " "instances and the promoted role of promotable clones are " "currently never locked, though support could be added in a future " "release.") }, { XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL, "0", pcmk__valid_interval_spec, N_("Do not lock resources to a cleanly shut down node longer than " "this"), N_("If shutdown-lock is true and this is set to a nonzero time " "duration, shutdown locks will expire after this much time has " "passed since the shutdown was initiated, even if the node has not " "rejoined.") }, // Fencing-related options { "stonith-enabled", NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("*** Advanced Use Only *** " "Whether nodes may be fenced as part of recovery"), N_("If false, unresponsive nodes are immediately assumed to be harmless, " "and resources that were active on them may be recovered " "elsewhere. This can result in a \"split-brain\" situation, " "potentially leading to data loss and/or service unavailability.") }, { "stonith-action", NULL, "select", "reboot, off, poweroff", PCMK_ACTION_REBOOT, pcmk__is_fencing_action, N_("Action to send to fence device when a node needs to be fenced " "(\"poweroff\" is a deprecated alias for \"off\")"), NULL }, { "stonith-timeout", NULL, "time", NULL, "60s", pcmk__valid_interval_spec, N_("*** Advanced Use Only *** Unused by Pacemaker"), N_("This value is not used by Pacemaker, but is kept for backward " "compatibility, and certain legacy fence agents might use it.") }, { PCMK_OPT_HAVE_WATCHDOG, NULL, "boolean", NULL, "false", pcmk__valid_boolean, N_("Whether watchdog integration is enabled"), N_("This is set automatically by the cluster according to whether SBD " "is detected to be in use. User-configured values are ignored. " "The value `true` is meaningful if diskless SBD is used and " "`stonith-watchdog-timeout` is nonzero. In that case, if fencing " "is required, watchdog-based self-fencing will be performed via " "SBD without requiring a fencing resource explicitly configured.") }, { PCMK_OPT_CONCURRENT_FENCING, NULL, "boolean", NULL, PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean, N_("Allow performing fencing operations in parallel"), NULL }, { "startup-fencing", NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("*** Advanced Use Only *** Whether to fence unseen nodes at start-up"), N_("Setting this to false may lead to a \"split-brain\" situation," "potentially leading to data loss and/or service unavailability.") }, { XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY, NULL, "time", NULL, "0", pcmk__valid_interval_spec, N_("Apply fencing delay targeting the lost nodes with the highest total resource priority"), N_("Apply specified delay for the fencings that are targeting the lost " "nodes with the highest total resource priority in case we don't " "have the majority of the nodes in our cluster partition, so that " "the more significant nodes potentially win any fencing match, " "which is especially meaningful under split-brain of 2-node " "cluster. A promoted resource instance takes the base priority + 1 " "on calculation if the base priority is not 0. Any static/random " "delays that are introduced by `pcmk_delay_base/max` configured " "for the corresponding fencing resources will be added to this " "delay. This delay should be significantly greater than, safely " "twice, the maximum `pcmk_delay_base/max`. By default, priority " "fencing delay is disabled.") }, { XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL, "0", pcmk__valid_interval_spec, N_("How long to wait for a node that has joined the cluster to join " "the controller process group"), N_("Fence nodes that do not join the controller process group within " "this much time after joining the cluster, to allow the cluster " "to continue managing resources. A value of 0 means never fence " "pending nodes. Setting the value to 2h means fence nodes after " "2 hours.") }, { PCMK_OPT_CLUSTER_DELAY, NULL, "time", NULL, "60s", pcmk__valid_interval_spec, N_("Maximum time for node-to-node communication"), N_("The node elected Designated Controller (DC) will consider an action " "failed if it does not get a response from the node executing the " "action within this time (after considering the action's own " "timeout). The \"correct\" value will depend on the speed and " "load of your network and cluster nodes.") }, { PCMK_OPT_BATCH_LIMIT, NULL, "integer", NULL, "0", pcmk__valid_number, N_("Maximum number of jobs that the cluster may execute in parallel " "across all nodes"), N_("The \"correct\" value will depend on the speed and load of your " "network and cluster nodes. If set to 0, the cluster will " "impose a dynamically calculated limit when any node has a " "high load.") }, { "migration-limit", NULL, "integer", NULL, "-1", pcmk__valid_number, N_("The number of live migration actions that the cluster is allowed " "to execute in parallel on a node (-1 means no limit)") }, /* Orphans and stopping */ { "stop-all-resources", NULL, "boolean", NULL, "false", pcmk__valid_boolean, N_("Whether the cluster should stop all active resources"), NULL }, { "stop-orphan-resources", NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("Whether to stop resources that were removed from the configuration"), NULL }, { "stop-orphan-actions", NULL, "boolean", NULL, "true", pcmk__valid_boolean, N_("Whether to cancel recurring actions removed from the configuration"), NULL }, { "remove-after-stop", NULL, "boolean", NULL, "false", pcmk__valid_boolean, N_("*** Deprecated *** Whether to remove stopped resources from " "the executor"), N_("Values other than default are poorly tested and potentially dangerous." " This option will be removed in a future release.") }, /* Storing inputs */ { "pe-error-series-max", NULL, "integer", NULL, "-1", pcmk__valid_number, N_("The number of scheduler inputs resulting in errors to save"), N_("Zero to disable, -1 to store unlimited.") }, { "pe-warn-series-max", NULL, "integer", NULL, "5000", pcmk__valid_number, N_("The number of scheduler inputs resulting in warnings to save"), N_("Zero to disable, -1 to store unlimited.") }, { "pe-input-series-max", NULL, "integer", NULL, "4000", pcmk__valid_number, N_("The number of scheduler inputs without errors or warnings to save"), N_("Zero to disable, -1 to store unlimited.") }, /* Node health */ { PCMK__OPT_NODE_HEALTH_STRATEGY, NULL, "select", PCMK__VALUE_NONE ", " PCMK__VALUE_MIGRATE_ON_RED ", " PCMK__VALUE_ONLY_GREEN ", " PCMK__VALUE_PROGRESSIVE ", " PCMK__VALUE_CUSTOM, PCMK__VALUE_NONE, pcmk__validate_health_strategy, N_("How cluster should react to node health attributes"), N_("Requires external entities to create node attributes (named with " "the prefix \"#health\") with values \"red\", " "\"yellow\", or \"green\".") }, { PCMK__OPT_NODE_HEALTH_BASE, NULL, "integer", NULL, "0", pcmk__valid_number, N_("Base health score assigned to a node"), N_("Only used when \"node-health-strategy\" is set to \"progressive\".") }, { PCMK__OPT_NODE_HEALTH_GREEN, NULL, "integer", NULL, "0", pcmk__valid_number, N_("The score to use for a node health attribute whose value is \"green\""), N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".") }, { PCMK__OPT_NODE_HEALTH_YELLOW, NULL, "integer", NULL, "0", pcmk__valid_number, N_("The score to use for a node health attribute whose value is \"yellow\""), N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".") }, { PCMK__OPT_NODE_HEALTH_RED, NULL, "integer", NULL, "-INFINITY", pcmk__valid_number, N_("The score to use for a node health attribute whose value is \"red\""), N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".") }, /*Placement Strategy*/ { "placement-strategy", NULL, "select", "default, utilization, minimal, balanced", "default", check_placement_strategy, N_("How the cluster should allocate resources to nodes"), NULL }, }; void pe_metadata(pcmk__output_t *out) { const char *desc_short = "Pacemaker scheduler options"; const char *desc_long = "Cluster options used by Pacemaker's scheduler"; gchar *s = pcmk__format_option_metadata("pacemaker-schedulerd", desc_short, desc_long, pe_opts, PCMK__NELEM(pe_opts)); out->output_xml(out, "metadata", s); g_free(s); } void verify_pe_options(GHashTable * options) { pcmk__validate_cluster_options(options, pe_opts, PCMK__NELEM(pe_opts)); } const char * pe_pref(GHashTable * options, const char *name) { return pcmk__cluster_option(options, pe_opts, PCMK__NELEM(pe_opts), name); } const char * fail2text(enum action_fail_response fail) { const char *result = ""; switch (fail) { case pcmk_on_fail_ignore: result = "ignore"; break; case pcmk_on_fail_demote: result = "demote"; break; case pcmk_on_fail_block: result = "block"; break; case pcmk_on_fail_restart: result = "recover"; break; case pcmk_on_fail_ban: result = "migrate"; break; case pcmk_on_fail_stop: result = "stop"; break; case pcmk_on_fail_fence_node: result = "fence"; break; case pcmk_on_fail_standby_node: result = "standby"; break; case pcmk_on_fail_restart_container: result = "restart-container"; break; case pcmk_on_fail_reset_remote: result = "reset-remote"; break; } return result; } enum action_tasks text2task(const char *task) { if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)) { return pcmk_action_stop; } else if (pcmk__str_eq(task, PCMK_ACTION_STOPPED, pcmk__str_casei)) { return pcmk_action_stopped; } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_casei)) { return pcmk_action_start; } else if (pcmk__str_eq(task, PCMK_ACTION_RUNNING, pcmk__str_casei)) { return pcmk_action_started; } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_casei)) { return pcmk_action_shutdown; } else if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { return pcmk_action_fence; } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { return pcmk_action_monitor; } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { return pcmk_action_notify; } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFIED, pcmk__str_casei)) { return pcmk_action_notified; } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) { return pcmk_action_promote; } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) { return pcmk_action_demote; } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTED, pcmk__str_casei)) { return pcmk_action_promoted; } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTED, pcmk__str_casei)) { return pcmk_action_demoted; } return pcmk_action_unspecified; } const char * task2text(enum action_tasks task) { const char *result = ""; switch (task) { case pcmk_action_unspecified: result = "no_action"; break; case pcmk_action_stop: result = PCMK_ACTION_STOP; break; case pcmk_action_stopped: result = PCMK_ACTION_STOPPED; break; case pcmk_action_start: result = PCMK_ACTION_START; break; case pcmk_action_started: result = PCMK_ACTION_RUNNING; break; case pcmk_action_shutdown: result = PCMK_ACTION_DO_SHUTDOWN; break; case pcmk_action_fence: result = PCMK_ACTION_STONITH; break; case pcmk_action_monitor: result = PCMK_ACTION_MONITOR; break; case pcmk_action_notify: result = PCMK_ACTION_NOTIFY; break; case pcmk_action_notified: result = PCMK_ACTION_NOTIFIED; break; case pcmk_action_promote: result = PCMK_ACTION_PROMOTE; break; case pcmk_action_promoted: result = PCMK_ACTION_PROMOTED; break; case pcmk_action_demote: result = PCMK_ACTION_DEMOTE; break; case pcmk_action_demoted: result = PCMK_ACTION_DEMOTED; break; } return result; } const char * role2text(enum rsc_role_e role) { switch (role) { case pcmk_role_stopped: return PCMK__ROLE_STOPPED; case pcmk_role_started: return PCMK__ROLE_STARTED; case pcmk_role_unpromoted: #ifdef PCMK__COMPAT_2_0 return PCMK__ROLE_UNPROMOTED_LEGACY; #else return PCMK__ROLE_UNPROMOTED; #endif case pcmk_role_promoted: #ifdef PCMK__COMPAT_2_0 return PCMK__ROLE_PROMOTED_LEGACY; #else return PCMK__ROLE_PROMOTED; #endif default: // pcmk_role_unknown return PCMK__ROLE_UNKNOWN; } } enum rsc_role_e text2role(const char *role) { if (pcmk__str_eq(role, PCMK__ROLE_UNKNOWN, pcmk__str_casei|pcmk__str_null_matches)) { return pcmk_role_unknown; } else if (pcmk__str_eq(role, PCMK__ROLE_STOPPED, pcmk__str_casei)) { return pcmk_role_stopped; } else if (pcmk__str_eq(role, PCMK__ROLE_STARTED, pcmk__str_casei)) { return pcmk_role_started; } else if (pcmk__strcase_any_of(role, PCMK__ROLE_UNPROMOTED, PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) { return pcmk_role_unpromoted; } else if (pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED, PCMK__ROLE_PROMOTED_LEGACY, NULL)) { return pcmk_role_promoted; } return pcmk_role_unknown; // Invalid role given } void add_hash_param(GHashTable * hash, const char *name, const char *value) { CRM_CHECK(hash != NULL, return); crm_trace("Adding name='%s' value='%s' to hash table", pcmk__s(name, ""), pcmk__s(value, "")); if (name == NULL || value == NULL) { return; } else if (pcmk__str_eq(value, "#default", pcmk__str_casei)) { return; } else if (g_hash_table_lookup(hash, name) == NULL) { g_hash_table_insert(hash, strdup(name), strdup(value)); } } /*! * \internal * \brief Look up an attribute value on the appropriate node * * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta * attribute is set to "host" for \p rsc or \p force_host is \c true, query the * attribute on the node's host. Otherwise, query the attribute on \p node * itself. * * \param[in] node Node to query attribute value on by default * \param[in] name Name of attribute to query * \param[in] rsc Resource on whose behalf we're querying * \param[in] node_type Type of resource location lookup * \param[in] force_host Force a lookup on the guest node's host, regardless of * the \c XML_RSC_ATTR_TARGET value * * \return Value of the attribute on \p node or on the host of \p node * * \note If \p force_host is \c true, \p node \e must be a guest node. */ const char * pe__node_attribute_calculated(const pcmk_node_t *node, const char *name, const pcmk_resource_t *rsc, enum pcmk__rsc_node node_type, bool force_host) { // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status} bool is_guest = (node != NULL) && (node->details->type == pcmk_node_variant_remote) && (node->details->remote_rsc != NULL) && (node->details->remote_rsc->container != NULL); const char *source = NULL; const char *node_type_s = NULL; const char *reason = NULL; const pcmk_resource_t *container = NULL; const pcmk_node_t *host = NULL; CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL) && (!force_host || is_guest)); /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a * user configuration error. */ source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET); if (!force_host && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) { return g_hash_table_lookup(node->details->attrs, name); } container = node->details->remote_rsc->container; switch (node_type) { case pcmk__rsc_node_assigned: node_type_s = "assigned"; host = container->allocated_to; if (host == NULL) { reason = "not assigned"; } break; case pcmk__rsc_node_current: node_type_s = "current"; if (container->running_on != NULL) { host = container->running_on->data; } if (host == NULL) { reason = "inactive"; } break; default: // Add support for other enum pcmk__rsc_node values if needed CRM_ASSERT(false); break; } if (host != NULL) { const char *value = g_hash_table_lookup(host->details->attrs, name); pcmk__rsc_trace(rsc, "%s: Value lookup for %s on %s container host %s %s%s", rsc->id, name, node_type_s, pe__node_name(host), ((value != NULL)? "succeeded: " : "failed"), pcmk__s(value, "")); return value; } pcmk__rsc_trace(rsc, "%s: Not looking for %s on %s container host: %s is %s", rsc->id, name, node_type_s, container->id, reason); return NULL; } const char * pe_node_attribute_raw(const pcmk_node_t *node, const char *name) { if(node == NULL) { return NULL; } return g_hash_table_lookup(node->details->attrs, name); } diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c index b439af84de..2485d295a6 100644 --- a/lib/pengine/pe_output.c +++ b/lib/pengine/pe_output.c @@ -1,3171 +1,3171 @@ /* * Copyright 2019-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include const char * pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts) { const char * desc = NULL; // User-supplied description if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) { desc = crm_element_value(rsc->xml, XML_ATTR_DESC); } return desc; } /* Never display node attributes whose name starts with one of these prefixes */ #define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \ "shutdown", PCMK_NODE_ATTR_TERMINATE, "standby", "#", \ NULL } static int compare_attribute(gconstpointer a, gconstpointer b) { int rc; rc = strcmp((const char *)a, (const char *)b); return rc; } /*! * \internal * \brief Determine whether extended information about an attribute should be added. * * \param[in] node Node that ran this resource * \param[in,out] rsc_list List of resources for this node * \param[in,out] scheduler Scheduler data * \param[in] attrname Attribute to find * \param[out] expected_score Expected value for this attribute * * \return true if extended information should be printed, false otherwise * \note Currently, extended information is only supported for ping/pingd * resources, for which a message will be printed if connectivity is lost * or degraded. */ static bool add_extra_info(const pcmk_node_t *node, GList *rsc_list, pcmk_scheduler_t *scheduler, const char *attrname, int *expected_score) { GList *gIter = NULL; for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; const char *type = g_hash_table_lookup(rsc->meta, "type"); const char *name = NULL; GHashTable *params = NULL; if (rsc->children != NULL) { if (add_extra_info(node, rsc->children, scheduler, attrname, expected_score)) { return true; } } if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) { continue; } params = pe_rsc_params(rsc, node, scheduler); name = g_hash_table_lookup(params, "name"); if (name == NULL) { name = "pingd"; } /* To identify the resource with the attribute name. */ if (pcmk__str_eq(name, attrname, pcmk__str_casei)) { int host_list_num = 0; const char *hosts = g_hash_table_lookup(params, "host_list"); const char *multiplier = g_hash_table_lookup(params, "multiplier"); int multiplier_i; if (hosts) { char **host_list = g_strsplit(hosts, " ", 0); host_list_num = g_strv_length(host_list); g_strfreev(host_list); } if ((multiplier == NULL) || (pcmk__scan_min_int(multiplier, &multiplier_i, INT_MIN) != pcmk_rc_ok)) { /* The ocf:pacemaker:ping resource agent defaults multiplier to * 1. The agent currently does not handle invalid text, but it * should, and this would be a reasonable choice ... */ multiplier_i = 1; } *expected_score = host_list_num * multiplier_i; return true; } } return false; } static GList * filter_attr_list(GList *attr_list, char *name) { int i; const char *filt_str[] = FILTER_STR; CRM_CHECK(name != NULL, return attr_list); /* filtering automatic attributes */ for (i = 0; filt_str[i] != NULL; i++) { if (g_str_has_prefix(name, filt_str[i])) { return attr_list; } } return g_list_insert_sorted(attr_list, name, compare_attribute); } static GList * get_operation_list(xmlNode *rsc_entry) { GList *op_list = NULL; xmlNode *rsc_op = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) { const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS); const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); int op_rc_i; pcmk__scan_min_int(op_rc, &op_rc_i, 0); /* Display 0-interval monitors as "probe" */ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei) && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) { task = "probe"; } /* Ignore notifies and some probes */ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none) || (pcmk__str_eq(task, "probe", pcmk__str_none) && (op_rc_i == CRM_EX_NOT_RUNNING))) { continue; } if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) { op_list = g_list_append(op_list, rsc_op); } } op_list = g_list_sort(op_list, sort_op_by_callid); return op_list; } static void add_dump_node(gpointer key, gpointer value, gpointer user_data) { xmlNodePtr node = user_data; pcmk_create_xml_text_node(node, (const char *) key, (const char *) value); } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; char *new_text = crm_strdup_printf("%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } #define XPATH_STACK "//" XML_CIB_TAG_NVPAIR \ "[@" XML_NVPAIR_ATTR_NAME "='" \ PCMK_OPT_CLUSTER_INFRASTRUCTURE "']" static const char * get_cluster_stack(pcmk_scheduler_t *scheduler) { xmlNode *stack = get_xpath_object(XPATH_STACK, scheduler->input, LOG_DEBUG); return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown"; } static char * last_changed_string(const char *last_written, const char *user, const char *client, const char *origin) { if (last_written != NULL || user != NULL || client != NULL || origin != NULL) { return crm_strdup_printf("%s%s%s%s%s%s%s", last_written ? last_written : "", user ? " by " : "", user ? user : "", client ? " via " : "", client ? client : "", origin ? " on " : "", origin ? origin : ""); } else { return strdup(""); } } static char * op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s, int rc, bool print_timing) { const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); char *interval_str = NULL; char *buf = NULL; if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { char *pair = pcmk__format_nvpair("interval", interval_ms_s, "ms"); interval_str = crm_strdup_printf(" %s", pair); free(pair); } if (print_timing) { char *last_change_str = NULL; char *exec_str = NULL; char *queue_str = NULL; const char *value = NULL; time_t epoch = 0; if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok) && (epoch > 0)) { char *epoch_str = pcmk__epoch2str(&epoch, 0); last_change_str = crm_strdup_printf(" %s=\"%s\"", XML_RSC_OP_LAST_CHANGE, pcmk__s(epoch_str, "")); free(epoch_str); } value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC); if (value) { char *pair = pcmk__format_nvpair(XML_RSC_OP_T_EXEC, value, "ms"); exec_str = crm_strdup_printf(" %s", pair); free(pair); } value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE); if (value) { char *pair = pcmk__format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms"); queue_str = crm_strdup_printf(" %s", pair); free(pair); } buf = crm_strdup_printf("(%s) %s:%s%s%s%s rc=%d (%s)", call, task, interval_str ? interval_str : "", last_change_str ? last_change_str : "", exec_str ? exec_str : "", queue_str ? queue_str : "", rc, services_ocf_exitcode_str(rc)); if (last_change_str) { free(last_change_str); } if (exec_str) { free(exec_str); } if (queue_str) { free(queue_str); } } else { buf = crm_strdup_printf("(%s) %s%s%s", call, task, interval_str ? ":" : "", interval_str ? interval_str : ""); } if (interval_str) { free(interval_str); } return buf; } static char * resource_history_string(pcmk_resource_t *rsc, const char *rsc_id, bool all, int failcount, time_t last_failure) { char *buf = NULL; if (rsc == NULL) { buf = crm_strdup_printf("%s: orphan", rsc_id); } else if (all || failcount || last_failure > 0) { char *failcount_s = NULL; char *lastfail_s = NULL; if (failcount > 0) { failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX, failcount); } else { failcount_s = strdup(""); } if (last_failure > 0) { buf = pcmk__epoch2str(&last_failure, 0); lastfail_s = crm_strdup_printf(" %s='%s'", PCMK__LAST_FAILURE_PREFIX, buf); free(buf); } buf = crm_strdup_printf("%s: migration-threshold=%d%s%s", rsc_id, rsc->migration_threshold, failcount_s, lastfail_s? lastfail_s : ""); free(failcount_s); free(lastfail_s); } else { buf = crm_strdup_printf("%s:", rsc_id); } return buf; } /*! * \internal * \brief Get a node's feature set for status display purposes * * \param[in] node Node to check * * \return String representation of feature set if the node is fully up (using * "<3.15.1" for older nodes that don't set the #feature-set attribute), * otherwise NULL */ static const char * get_node_feature_set(const pcmk_node_t *node) { if (node->details->online && node->details->expected_up && !pe__is_guest_or_remote_node(node)) { const char *feature_set = g_hash_table_lookup(node->details->attrs, CRM_ATTR_FEATURE_SET); /* The feature set attribute is present since 3.15.1. If it is missing, * then the node must be running an earlier version. */ return pcmk__s(feature_set, "<3.15.1"); } return NULL; } static bool is_mixed_version(pcmk_scheduler_t *scheduler) { const char *feature_set = NULL; for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = gIter->data; const char *node_feature_set = get_node_feature_set(node); if (node_feature_set != NULL) { if (feature_set == NULL) { feature_set = node_feature_set; } else if (strcmp(feature_set, node_feature_set) != 0) { return true; } } } return false; } static char * formatted_xml_buf(const pcmk_resource_t *rsc, bool raw) { if (raw) { return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml); } else { return dump_xml_formatted(rsc->xml); } } #define XPATH_DC_VERSION "//" XML_CIB_TAG_NVPAIR \ "[@" XML_NVPAIR_ATTR_NAME "='" \ PCMK_OPT_DC_VERSION "']" PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t") static int cluster_summary(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); int rc = pcmk_rc_no_output; const char *stack_s = get_cluster_stack(scheduler); if (pcmk_is_set(section_opts, pcmk_section_stack)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-stack", stack_s, pcmkd_state); } if (pcmk_is_set(section_opts, pcmk_section_dc)) { xmlNode *dc_version = get_xpath_object(XPATH_DC_VERSION, scheduler->input, LOG_DEBUG); const char *dc_version_s = dc_version? crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE) : NULL; const char *quorum = crm_element_value(scheduler->input, XML_ATTR_HAVE_QUORUM); char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL; bool mixed_version = is_mixed_version(scheduler); PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-dc", scheduler->dc_node, quorum, dc_version_s, dc_name, mixed_version); free(dc_name); } if (pcmk_is_set(section_opts, pcmk_section_times)) { const char *last_written = crm_element_value(scheduler->input, XML_CIB_ATTR_WRITTEN); const char *user = crm_element_value(scheduler->input, XML_ATTR_UPDATE_USER); const char *client = crm_element_value(scheduler->input, XML_ATTR_UPDATE_CLIENT); const char *origin = crm_element_value(scheduler->input, XML_ATTR_UPDATE_ORIG); PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-times", scheduler->localhost, last_written, user, client, origin); } if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes), scheduler->ninstances, scheduler->disabled_resources, scheduler->blocked_resources); } if (pcmk_is_set(section_opts, pcmk_section_options)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-options", scheduler); } PCMK__OUTPUT_LIST_FOOTER(out, rc); if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) { if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } return rc; } PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t") static int cluster_summary_html(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); int rc = pcmk_rc_no_output; const char *stack_s = get_cluster_stack(scheduler); if (pcmk_is_set(section_opts, pcmk_section_stack)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-stack", stack_s, pcmkd_state); } /* Always print DC if none, even if not requested */ if ((scheduler->dc_node == NULL) || pcmk_is_set(section_opts, pcmk_section_dc)) { xmlNode *dc_version = get_xpath_object(XPATH_DC_VERSION, scheduler->input, LOG_DEBUG); const char *dc_version_s = dc_version? crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE) : NULL; const char *quorum = crm_element_value(scheduler->input, XML_ATTR_HAVE_QUORUM); char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL; bool mixed_version = is_mixed_version(scheduler); PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-dc", scheduler->dc_node, quorum, dc_version_s, dc_name, mixed_version); free(dc_name); } if (pcmk_is_set(section_opts, pcmk_section_times)) { const char *last_written = crm_element_value(scheduler->input, XML_CIB_ATTR_WRITTEN); const char *user = crm_element_value(scheduler->input, XML_ATTR_UPDATE_USER); const char *client = crm_element_value(scheduler->input, XML_ATTR_UPDATE_CLIENT); const char *origin = crm_element_value(scheduler->input, XML_ATTR_UPDATE_ORIG); PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-times", scheduler->localhost, last_written, user, client, origin); } if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes), scheduler->ninstances, scheduler->disabled_resources, scheduler->blocked_resources); } if (pcmk_is_set(section_opts, pcmk_section_options)) { /* Kind of a hack - close the list we may have opened earlier in this * function so we can put all the options into their own list. We * only want to do this on HTML output, though. */ PCMK__OUTPUT_LIST_FOOTER(out, rc); out->begin_list(out, NULL, NULL, "Config Options"); out->message(out, "cluster-options", scheduler); } PCMK__OUTPUT_LIST_FOOTER(out, rc); if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) { if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } return rc; } char * pe__node_display_name(pcmk_node_t *node, bool print_detail) { char *node_name; const char *node_host = NULL; const char *node_id = NULL; int name_len; CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL)); /* Host is displayed only if this is a guest node and detail is requested */ if (print_detail && pe__is_guest_node(node)) { const pcmk_resource_t *container = node->details->remote_rsc->container; const pcmk_node_t *host_node = pe__current_node(container); if (host_node && host_node->details) { node_host = host_node->details->uname; } if (node_host == NULL) { node_host = ""; /* so we at least get "uname@" to indicate guest */ } } /* Node ID is displayed if different from uname and detail is requested */ if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) { node_id = node->details->id; } /* Determine name length */ name_len = strlen(node->details->uname) + 1; if (node_host) { name_len += strlen(node_host) + 1; /* "@node_host" */ } if (node_id) { name_len += strlen(node_id) + 3; /* + " (node_id)" */ } /* Allocate and populate display name */ node_name = malloc(name_len); CRM_ASSERT(node_name != NULL); strcpy(node_name, node->details->uname); if (node_host) { strcat(node_name, "@"); strcat(node_name, node_host); } if (node_id) { strcat(node_name, " ("); strcat(node_name, node_id); strcat(node_name, ")"); } return node_name; } int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name , size_t pairs_count, ...) { xmlNodePtr xml_node = NULL; va_list args; CRM_ASSERT(tag_name != NULL); xml_node = pcmk__output_xml_peek_parent(out); CRM_ASSERT(xml_node != NULL); xml_node = create_xml_node(xml_node, tag_name); va_start(args, pairs_count); while(pairs_count--) { const char *param_name = va_arg(args, const char *); const char *param_value = va_arg(args, const char *); if (param_name && param_value) { crm_xml_add(xml_node, param_name, param_value); } }; va_end(args); if (is_list) { pcmk__output_xml_push_parent(out, xml_node); } return pcmk_rc_ok; } static const char * role_desc(enum rsc_role_e role) { if (role == pcmk_role_promoted) { #ifdef PCMK__COMPAT_2_0 return "as " PCMK__ROLE_PROMOTED_LEGACY " "; #else return "in " PCMK__ROLE_PROMOTED " role "; #endif } return ""; } PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pcmk__location_t *", "uint32_t") static int ban_html(pcmk__output_t *out, va_list args) { pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *); pcmk__location_t *location = va_arg(args, pcmk__location_t *); uint32_t show_opts = va_arg(args, uint32_t); char *node_name = pe__node_display_name(pe_node, pcmk_is_set(show_opts, pcmk_show_node_id)); char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s", location->id, location->rsc->id, role_desc(location->role_filter), node_name); pcmk__output_create_html_node(out, "li", NULL, NULL, buf); free(node_name); free(buf); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pcmk__location_t *", "uint32_t") static int ban_text(pcmk__output_t *out, va_list args) { pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *); pcmk__location_t *location = va_arg(args, pcmk__location_t *); uint32_t show_opts = va_arg(args, uint32_t); char *node_name = pe__node_display_name(pe_node, pcmk_is_set(show_opts, pcmk_show_node_id)); out->list_item(out, NULL, "%s\tprevents %s from running %son %s", location->id, location->rsc->id, role_desc(location->role_filter), node_name); free(node_name); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pcmk__location_t *", "uint32_t") static int ban_xml(pcmk__output_t *out, va_list args) { pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *); pcmk__location_t *location = va_arg(args, pcmk__location_t *); uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t); const char *promoted_only = pcmk__btoa(location->role_filter == pcmk_role_promoted); char *weight_s = pcmk__itoa(pe_node->weight); pcmk__output_create_xml_node(out, "ban", "id", location->id, "resource", location->rsc->id, "node", pe_node->details->uname, "weight", weight_s, "promoted-only", promoted_only, /* This is a deprecated alias for * promoted_only. Removing it will break * backward compatibility of the API schema, * which will require an API schema major * version bump. */ "master_only", promoted_only, NULL); free(weight_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ban-list", "pcmk_scheduler_t *", "const char *", "GList *", "uint32_t", "bool") static int ban_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); const char *prefix = va_arg(args, const char *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); GList *gIter, *gIter2; int rc = pcmk_rc_no_output; /* Print each ban */ for (gIter = scheduler->placement_constraints; gIter != NULL; gIter = gIter->next) { pcmk__location_t *location = gIter->data; const pcmk_resource_t *rsc = location->rsc; if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) { continue; } if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) && !pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc, false)), only_rsc, pcmk__str_star_matches)) { continue; } for (gIter2 = location->nodes; gIter2 != NULL; gIter2 = gIter2->next) { pcmk_node_t *node = (pcmk_node_t *) gIter2->data; if (node->weight < 0) { PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints"); out->message(out, "ban", node, location, show_opts); } } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") static int cluster_counts_html(pcmk__output_t *out, va_list args) { unsigned int nnodes = va_arg(args, unsigned int); int nresources = va_arg(args, int); int ndisabled = va_arg(args, int); int nblocked = va_arg(args, int); xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL); char *nnodes_str = crm_strdup_printf("%d node%s configured", nnodes, pcmk__plural_s(nnodes)); pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str); free(nnodes_str); if (ndisabled && nblocked) { char *s = crm_strdup_printf("%d resource instance%s configured (%d ", nresources, pcmk__plural_s(nresources), ndisabled); pcmk_create_html_node(resources_node, "span", NULL, NULL, s); free(s); pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED"); s = crm_strdup_printf(", %d ", nblocked); pcmk_create_html_node(resources_node, "span", NULL, NULL, s); free(s); pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED"); pcmk_create_html_node(resources_node, "span", NULL, NULL, " from further action due to failure)"); } else if (ndisabled && !nblocked) { char *s = crm_strdup_printf("%d resource instance%s configured (%d ", nresources, pcmk__plural_s(nresources), ndisabled); pcmk_create_html_node(resources_node, "span", NULL, NULL, s); free(s); pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED"); pcmk_create_html_node(resources_node, "span", NULL, NULL, ")"); } else if (!ndisabled && nblocked) { char *s = crm_strdup_printf("%d resource instance%s configured (%d ", nresources, pcmk__plural_s(nresources), nblocked); pcmk_create_html_node(resources_node, "span", NULL, NULL, s); free(s); pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED"); pcmk_create_html_node(resources_node, "span", NULL, NULL, " from further action due to failure)"); } else { char *s = crm_strdup_printf("%d resource instance%s configured", nresources, pcmk__plural_s(nresources)); pcmk_create_html_node(resources_node, "span", NULL, NULL, s); free(s); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") static int cluster_counts_text(pcmk__output_t *out, va_list args) { unsigned int nnodes = va_arg(args, unsigned int); int nresources = va_arg(args, int); int ndisabled = va_arg(args, int); int nblocked = va_arg(args, int); out->list_item(out, NULL, "%d node%s configured", nnodes, pcmk__plural_s(nnodes)); if (ndisabled && nblocked) { out->list_item(out, NULL, "%d resource instance%s configured " "(%d DISABLED, %d BLOCKED from " "further action due to failure)", nresources, pcmk__plural_s(nresources), ndisabled, nblocked); } else if (ndisabled && !nblocked) { out->list_item(out, NULL, "%d resource instance%s configured " "(%d DISABLED)", nresources, pcmk__plural_s(nresources), ndisabled); } else if (!ndisabled && nblocked) { out->list_item(out, NULL, "%d resource instance%s configured " "(%d BLOCKED from further action " "due to failure)", nresources, pcmk__plural_s(nresources), nblocked); } else { out->list_item(out, NULL, "%d resource instance%s configured", nresources, pcmk__plural_s(nresources)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") static int cluster_counts_xml(pcmk__output_t *out, va_list args) { unsigned int nnodes = va_arg(args, unsigned int); int nresources = va_arg(args, int); int ndisabled = va_arg(args, int); int nblocked = va_arg(args, int); xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL); xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL); char *s = pcmk__itoa(nnodes); crm_xml_add(nodes_node, "number", s); free(s); s = pcmk__itoa(nresources); crm_xml_add(resources_node, "number", s); free(s); s = pcmk__itoa(ndisabled); crm_xml_add(resources_node, "disabled", s); free(s); s = pcmk__itoa(nblocked); crm_xml_add(resources_node, "blocked", s); free(s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *", "char *", "int") static int cluster_dc_html(pcmk__output_t *out, va_list args) { pcmk_node_t *dc = va_arg(args, pcmk_node_t *); const char *quorum = va_arg(args, const char *); const char *dc_version_s = va_arg(args, const char *); char *dc_name = va_arg(args, char *); bool mixed_version = va_arg(args, int); xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: "); if (dc) { char *buf = crm_strdup_printf("%s (version %s) -", dc_name, dc_version_s ? dc_version_s : "unknown"); pcmk_create_html_node(node, "span", NULL, NULL, buf); free(buf); if (mixed_version) { pcmk_create_html_node(node, "span", NULL, "warning", " MIXED-VERSION"); } pcmk_create_html_node(node, "span", NULL, NULL, " partition"); if (crm_is_true(quorum)) { pcmk_create_html_node(node, "span", NULL, NULL, " with"); } else { pcmk_create_html_node(node, "span", NULL, "warning", " WITHOUT"); } pcmk_create_html_node(node, "span", NULL, NULL, " quorum"); } else { pcmk_create_html_node(node, "span", NULL, "warning", "NONE"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *", "char *", "int") static int cluster_dc_text(pcmk__output_t *out, va_list args) { pcmk_node_t *dc = va_arg(args, pcmk_node_t *); const char *quorum = va_arg(args, const char *); const char *dc_version_s = va_arg(args, const char *); char *dc_name = va_arg(args, char *); bool mixed_version = va_arg(args, int); if (dc) { out->list_item(out, "Current DC", "%s (version %s) - %spartition %s quorum", dc_name, dc_version_s ? dc_version_s : "unknown", mixed_version ? "MIXED-VERSION " : "", crm_is_true(quorum) ? "with" : "WITHOUT"); } else { out->list_item(out, "Current DC", "NONE"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *", "char *", "int") static int cluster_dc_xml(pcmk__output_t *out, va_list args) { pcmk_node_t *dc = va_arg(args, pcmk_node_t *); const char *quorum = va_arg(args, const char *); const char *dc_version_s = va_arg(args, const char *); char *dc_name G_GNUC_UNUSED = va_arg(args, char *); bool mixed_version = va_arg(args, int); if (dc) { pcmk__output_create_xml_node(out, "current_dc", "present", "true", "version", dc_version_s ? dc_version_s : "", "name", dc->details->uname, "id", dc->details->id, "with_quorum", pcmk__btoa(crm_is_true(quorum)), "mixed_version", pcmk__btoa(mixed_version), NULL); } else { pcmk__output_create_xml_node(out, "current_dc", "present", "false", NULL); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int") static int cluster_maint_mode_text(pcmk__output_t *out, va_list args) { unsigned long long flags = va_arg(args, unsigned long long); if (pcmk_is_set(flags, pcmk_sched_in_maintenance)) { pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n"); pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n"); return pcmk_rc_ok; } else if (pcmk_is_set(flags, pcmk_sched_stop_all)) { pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n"); pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n"); return pcmk_rc_ok; } else { return pcmk_rc_no_output; } } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_html(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { out->list_item(out, NULL, "STONITH of failed nodes enabled"); } else { out->list_item(out, NULL, "STONITH of failed nodes disabled"); } if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) { out->list_item(out, NULL, "Cluster is symmetric"); } else { out->list_item(out, NULL, "Cluster is asymmetric"); } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: out->list_item(out, NULL, "No quorum policy: Freeze resources"); break; case pcmk_no_quorum_stop: out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); break; case pcmk_no_quorum_demote: out->list_item(out, NULL, "No quorum policy: Demote promotable " "resources and stop all other resources"); break; case pcmk_no_quorum_ignore: out->list_item(out, NULL, "No quorum policy: Ignore"); break; case pcmk_no_quorum_fence: out->list_item(out, NULL, "No quorum policy: Suicide"); break; } if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) { xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: "); pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED"); pcmk_create_html_node(node, "span", NULL, NULL, " (the cluster will not attempt to start, stop, or recover services)"); } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) { xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: "); pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED"); pcmk_create_html_node(node, "span", NULL, NULL, " (the cluster will keep all resources stopped)"); } else { out->list_item(out, NULL, "Resource management: enabled"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_log(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) { return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services."); } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) { return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources."); } else { return pcmk_rc_no_output; } } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_text(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { out->list_item(out, NULL, "STONITH of failed nodes enabled"); } else { out->list_item(out, NULL, "STONITH of failed nodes disabled"); } if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) { out->list_item(out, NULL, "Cluster is symmetric"); } else { out->list_item(out, NULL, "Cluster is asymmetric"); } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: out->list_item(out, NULL, "No quorum policy: Freeze resources"); break; case pcmk_no_quorum_stop: out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); break; case pcmk_no_quorum_demote: out->list_item(out, NULL, "No quorum policy: Demote promotable " "resources and stop all other resources"); break; case pcmk_no_quorum_ignore: out->list_item(out, NULL, "No quorum policy: Ignore"); break; case pcmk_no_quorum_fence: out->list_item(out, NULL, "No quorum policy: Suicide"); break; } return pcmk_rc_ok; } #define bv(flag) pcmk__btoa(pcmk_is_set(scheduler->flags, (flag))) PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_xml(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); const char *no_quorum_policy = NULL; char *stonith_timeout_str = pcmk__itoa(scheduler->stonith_timeout); char *priority_fencing_delay_str = pcmk__itoa(scheduler->priority_fencing_delay * 1000); switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: no_quorum_policy = "freeze"; break; case pcmk_no_quorum_stop: no_quorum_policy = "stop"; break; case pcmk_no_quorum_demote: no_quorum_policy = "demote"; break; case pcmk_no_quorum_ignore: no_quorum_policy = "ignore"; break; case pcmk_no_quorum_fence: no_quorum_policy = "suicide"; break; } pcmk__output_create_xml_node(out, "cluster_options", "stonith-enabled", bv(pcmk_sched_fencing_enabled), "symmetric-cluster", bv(pcmk_sched_symmetric_cluster), "no-quorum-policy", no_quorum_policy, - "maintenance-mode", - bv(pcmk_sched_in_maintenance), + PCMK_OPT_MAINTENANCE_MODE, + bv(pcmk_sched_in_maintenance), "stop-all-resources", bv(pcmk_sched_stop_all), "stonith-timeout-ms", stonith_timeout_str, "priority-fencing-delay-ms", priority_fencing_delay_str, NULL); free(stonith_timeout_str); free(priority_fencing_delay_str); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state") static int cluster_stack_html(pcmk__output_t *out, va_list args) { const char *stack_s = va_arg(args, const char *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); pcmk_create_html_node(node, "span", NULL, "bold", "Stack: "); pcmk_create_html_node(node, "span", NULL, NULL, stack_s); if (pcmkd_state != pcmk_pacemakerd_state_invalid) { pcmk_create_html_node(node, "span", NULL, NULL, " ("); pcmk_create_html_node(node, "span", NULL, NULL, pcmk__pcmkd_state_enum2friendly(pcmkd_state)); pcmk_create_html_node(node, "span", NULL, NULL, ")"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state") static int cluster_stack_text(pcmk__output_t *out, va_list args) { const char *stack_s = va_arg(args, const char *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); if (pcmkd_state != pcmk_pacemakerd_state_invalid) { out->list_item(out, "Stack", "%s (%s)", stack_s, pcmk__pcmkd_state_enum2friendly(pcmkd_state)); } else { out->list_item(out, "Stack", "%s", stack_s); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state") static int cluster_stack_xml(pcmk__output_t *out, va_list args) { const char *stack_s = va_arg(args, const char *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = NULL; if (pcmkd_state != pcmk_pacemakerd_state_invalid) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(pcmkd_state); } pcmk__output_create_xml_node(out, "stack", "type", stack_s, "pacemakerd-state", state_s, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *", "const char *") static int cluster_times_html(pcmk__output_t *out, va_list args) { const char *our_nodename = va_arg(args, const char *); const char *last_written = va_arg(args, const char *); const char *user = va_arg(args, const char *); const char *client = va_arg(args, const char *); const char *origin = va_arg(args, const char *); xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL); char *time_s = pcmk__epoch2str(NULL, 0); pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: "); pcmk_create_html_node(updated_node, "span", NULL, NULL, time_s); if (our_nodename != NULL) { pcmk_create_html_node(updated_node, "span", NULL, NULL, " on "); pcmk_create_html_node(updated_node, "span", NULL, NULL, our_nodename); } free(time_s); time_s = last_changed_string(last_written, user, client, origin); pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: "); pcmk_create_html_node(changed_node, "span", NULL, NULL, time_s); free(time_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *", "const char *") static int cluster_times_xml(pcmk__output_t *out, va_list args) { const char *our_nodename = va_arg(args, const char *); const char *last_written = va_arg(args, const char *); const char *user = va_arg(args, const char *); const char *client = va_arg(args, const char *); const char *origin = va_arg(args, const char *); char *time_s = pcmk__epoch2str(NULL, 0); pcmk__output_create_xml_node(out, "last_update", "time", time_s, "origin", our_nodename, NULL); pcmk__output_create_xml_node(out, "last_change", "time", last_written ? last_written : "", "user", user ? user : "", "client", client ? client : "", "origin", origin ? origin : "", NULL); free(time_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *", "const char *") static int cluster_times_text(pcmk__output_t *out, va_list args) { const char *our_nodename = va_arg(args, const char *); const char *last_written = va_arg(args, const char *); const char *user = va_arg(args, const char *); const char *client = va_arg(args, const char *); const char *origin = va_arg(args, const char *); char *time_s = pcmk__epoch2str(NULL, 0); out->list_item(out, "Last updated", "%s%s%s", time_s, (our_nodename != NULL)? " on " : "", pcmk__s(our_nodename, "")); free(time_s); time_s = last_changed_string(last_written, user, client, origin); out->list_item(out, "Last change", " %s", time_s); free(time_s); return pcmk_rc_ok; } /*! * \internal * \brief Display a failed action in less-technical natural language * * \param[in,out] out Output object to use for display * \param[in] xml_op XML containing failed action * \param[in] op_key Operation key of failed action * \param[in] node_name Where failed action occurred * \param[in] rc OCF exit code of failed action * \param[in] status Execution status of failed action * \param[in] exit_reason Exit reason given for failed action * \param[in] exec_time String containing execution time in milliseconds */ static void failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op, const char *op_key, const char *node_name, int rc, int status, const char *exit_reason, const char *exec_time) { char *rsc_id = NULL; char *task = NULL; guint interval_ms = 0; time_t last_change_epoch = 0; GString *str = NULL; if (pcmk__str_empty(op_key) || !parse_op_key(op_key, &rsc_id, &task, &interval_ms)) { rsc_id = strdup("unknown resource"); task = strdup("unknown action"); interval_ms = 0; } CRM_ASSERT((rsc_id != NULL) && (task != NULL)); str = g_string_sized_new(256); // Should be sufficient for most messages pcmk__g_strcat(str, rsc_id, " ", NULL); if (interval_ms != 0) { pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ", NULL); } pcmk__g_strcat(str, pcmk__readable_action(task, interval_ms), " on ", node_name, NULL); if (status == PCMK_EXEC_DONE) { pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'", NULL); if (!pcmk__str_empty(exit_reason)) { pcmk__g_strcat(str, " (", exit_reason, ")", NULL); } } else { pcmk__g_strcat(str, " could not be executed (", pcmk_exec_status_str(status), NULL); if (!pcmk__str_empty(exit_reason)) { pcmk__g_strcat(str, ": ", exit_reason, NULL); } g_string_append_c(str, ')'); } if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_change_epoch) == pcmk_ok) { char *s = pcmk__epoch2str(&last_change_epoch, 0); pcmk__g_strcat(str, " at ", s, NULL); free(s); } if (!pcmk__str_empty(exec_time)) { int exec_time_ms = 0; if ((pcmk__scan_min_int(exec_time, &exec_time_ms, 0) == pcmk_rc_ok) && (exec_time_ms > 0)) { pcmk__g_strcat(str, " after ", pcmk__readable_interval(exec_time_ms), NULL); } } out->list_item(out, NULL, "%s", str->str); g_string_free(str, TRUE); free(rsc_id); free(task); } /*! * \internal * \brief Display a failed action with technical details * * \param[in,out] out Output object to use for display * \param[in] xml_op XML containing failed action * \param[in] op_key Operation key of failed action * \param[in] node_name Where failed action occurred * \param[in] rc OCF exit code of failed action * \param[in] status Execution status of failed action * \param[in] exit_reason Exit reason given for failed action * \param[in] exec_time String containing execution time in milliseconds */ static void failed_action_technical(pcmk__output_t *out, const xmlNode *xml_op, const char *op_key, const char *node_name, int rc, int status, const char *exit_reason, const char *exec_time) { const char *call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); const char *queue_time = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE); const char *exit_status = services_ocf_exitcode_str(rc); const char *lrm_status = pcmk_exec_status_str(status); time_t last_change_epoch = 0; GString *str = NULL; if (pcmk__str_empty(op_key)) { op_key = "unknown operation"; } if (pcmk__str_empty(exit_status)) { exit_status = "unknown exit status"; } if (pcmk__str_empty(call_id)) { call_id = "unknown"; } str = g_string_sized_new(256); g_string_append_printf(str, "%s on %s '%s' (%d): call=%s, status='%s'", op_key, node_name, exit_status, rc, call_id, lrm_status); if (!pcmk__str_empty(exit_reason)) { pcmk__g_strcat(str, ", exitreason='", exit_reason, "'", NULL); } if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_change_epoch) == pcmk_ok) { char *last_change_str = pcmk__epoch2str(&last_change_epoch, 0); pcmk__g_strcat(str, ", " XML_RSC_OP_LAST_CHANGE "=" "'", last_change_str, "'", NULL); free(last_change_str); } if (!pcmk__str_empty(queue_time)) { pcmk__g_strcat(str, ", queued=", queue_time, "ms", NULL); } if (!pcmk__str_empty(exec_time)) { pcmk__g_strcat(str, ", exec=", exec_time, "ms", NULL); } out->list_item(out, NULL, "%s", str->str); g_string_free(str, TRUE); } PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t") static int failed_action_default(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); uint32_t show_opts = va_arg(args, uint32_t); const char *op_key = pe__xe_history_key(xml_op); const char *node_name = crm_element_value(xml_op, XML_ATTR_UNAME); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); const char *exec_time = crm_element_value(xml_op, XML_RSC_OP_T_EXEC); int rc; int status; pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0); pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), &status, 0); if (pcmk__str_empty(node_name)) { node_name = "unknown node"; } if (pcmk_is_set(show_opts, pcmk_show_failed_detail)) { failed_action_technical(out, xml_op, op_key, node_name, rc, status, exit_reason, exec_time); } else { failed_action_friendly(out, xml_op, op_key, node_name, rc, status, exit_reason, exec_time); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t") static int failed_action_xml(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t); const char *op_key = pe__xe_history_key(xml_op); const char *op_key_name = "op_key"; int rc; int status; const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); time_t epoch = 0; char *rc_s = NULL; char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none"); xmlNodePtr node = NULL; pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0); pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), &status, 0); rc_s = pcmk__itoa(rc); if (crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY) == NULL) { op_key_name = "id"; } node = pcmk__output_create_xml_node(out, "failure", op_key_name, op_key, "node", crm_element_value(xml_op, XML_ATTR_UNAME), "exitstatus", services_ocf_exitcode_str(rc), "exitreason", pcmk__s(reason_s, ""), "exitcode", rc_s, "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID), "status", pcmk_exec_status_str(status), NULL); free(rc_s); if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok) && (epoch > 0)) { guint interval_ms = 0; char *interval_ms_s = NULL; char *rc_change = pcmk__epoch2str(&epoch, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); interval_ms_s = crm_strdup_printf("%u", interval_ms); pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change, "queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE), "exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC), "interval", interval_ms_s, "task", crm_element_value(xml_op, XML_LRM_ATTR_TASK), NULL); free(interval_ms_s); free(rc_change); } free(reason_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("failed-action-list", "pcmk_scheduler_t *", "GList *", "GList *", "uint32_t", "bool") static int failed_action_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); xmlNode *xml_op = NULL; int rc = pcmk_rc_no_output; if (xmlChildElementCount(scheduler->failed) == 0) { return rc; } for (xml_op = pcmk__xml_first_child(scheduler->failed); xml_op != NULL; xml_op = pcmk__xml_next(xml_op)) { char *rsc = NULL; if (!pcmk__str_in_list(crm_element_value(xml_op, XML_ATTR_UNAME), only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } if (pcmk_xe_mask_probe_failure(xml_op)) { continue; } if (!parse_op_key(pe__xe_history_key(xml_op), &rsc, NULL, NULL)) { continue; } if (!pcmk__str_in_list(rsc, only_rsc, pcmk__str_star_matches)) { free(rsc); continue; } free(rsc); PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions"); out->message(out, "failed-action", xml_op, show_opts); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void status_node(pcmk_node_t *node, xmlNodePtr parent, uint32_t show_opts) { int health = pe__node_health(node); // Cluster membership if (node->details->online) { pcmk_create_html_node(parent, "span", NULL, "online", " online"); } else { pcmk_create_html_node(parent, "span", NULL, "offline", " OFFLINE"); } // Standby mode if (node->details->standby_onfail && (node->details->running_rsc != NULL)) { pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby due to on-fail," " with active resources)"); } else if (node->details->standby_onfail) { pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby due to on-fail)"); } else if (node->details->standby && (node->details->running_rsc != NULL)) { pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby, with active resources)"); } else if (node->details->standby) { pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby)"); } // Maintenance mode if (node->details->maintenance) { pcmk_create_html_node(parent, "span", NULL, "maint", " (in maintenance mode)"); } // Node health if (health < 0) { pcmk_create_html_node(parent, "span", NULL, "health_red", " (health is RED)"); } else if (health == 0) { pcmk_create_html_node(parent, "span", NULL, "health_yellow", " (health is YELLOW)"); } // Feature set if (pcmk_is_set(show_opts, pcmk_show_feature_set)) { const char *feature_set = get_node_feature_set(node); if (feature_set != NULL) { char *buf = crm_strdup_printf(", feature set %s", feature_set); pcmk_create_html_node(parent, "span", NULL, NULL, buf); free(buf); } } } PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *", "GList *") static int node_html(pcmk__output_t *out, va_list args) { pcmk_node_t *node = va_arg(args, pcmk_node_t *); uint32_t show_opts = va_arg(args, uint32_t); bool full = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id)); if (full) { xmlNodePtr item_node; if (pcmk_all_flags_set(show_opts, pcmk_show_brief | pcmk_show_rscs_by_node)) { GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); out->begin_list(out, NULL, NULL, "%s:", node_name); item_node = pcmk__output_xml_create_parent(out, "li", NULL); pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:"); status_node(node, item_node, show_opts); if (rscs != NULL) { uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs; out->begin_list(out, NULL, NULL, "Resources"); pe__rscs_brief_output(out, rscs, new_show_opts); out->end_list(out); } pcmk__output_xml_pop_parent(out); out->end_list(out); } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { GList *lpc2 = NULL; int rc = pcmk_rc_no_output; out->begin_list(out, NULL, NULL, "%s:", node_name); item_node = pcmk__output_xml_create_parent(out, "li", NULL); pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:"); status_node(node, item_node, show_opts); for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc2->data; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources"); show_opts |= pcmk_show_rsc_only; out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc, only_node, only_rsc); } PCMK__OUTPUT_LIST_FOOTER(out, rc); pcmk__output_xml_pop_parent(out); out->end_list(out); } else { char *buf = crm_strdup_printf("%s:", node_name); item_node = pcmk__output_create_xml_node(out, "li", NULL); pcmk_create_html_node(item_node, "span", NULL, "bold", buf); status_node(node, item_node, show_opts); free(buf); } } else { out->begin_list(out, NULL, NULL, "%s:", node_name); } free(node_name); return pcmk_rc_ok; } /*! * \internal * \brief Get a human-friendly textual description of a node's status * * \param[in] node Node to check * * \return String representation of node's status */ static const char * node_text_status(const pcmk_node_t *node) { if (node->details->unclean) { if (node->details->online) { return "UNCLEAN (online)"; } else if (node->details->pending) { return "UNCLEAN (pending)"; } else { return "UNCLEAN (offline)"; } } else if (node->details->pending) { return "pending"; } else if (node->details->standby_onfail && node->details->online) { return "standby (on-fail)"; } else if (node->details->standby) { if (node->details->online) { if (node->details->running_rsc) { return "standby (with active resources)"; } else { return "standby"; } } else { return "OFFLINE (standby)"; } } else if (node->details->maintenance) { if (node->details->online) { return "maintenance"; } else { return "OFFLINE (maintenance)"; } } else if (node->details->online) { return "online"; } return "OFFLINE"; } PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *", "GList *") static int node_text(pcmk__output_t *out, va_list args) { pcmk_node_t *node = va_arg(args, pcmk_node_t *); uint32_t show_opts = va_arg(args, uint32_t); bool full = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); if (full) { char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id)); GString *str = g_string_sized_new(64); int health = pe__node_health(node); // Create a summary line with node type, name, and status if (pe__is_guest_node(node)) { g_string_append(str, "GuestNode"); } else if (pe__is_remote_node(node)) { g_string_append(str, "RemoteNode"); } else { g_string_append(str, "Node"); } pcmk__g_strcat(str, " ", node_name, ": ", node_text_status(node), NULL); if (health < 0) { g_string_append(str, " (health is RED)"); } else if (health == 0) { g_string_append(str, " (health is YELLOW)"); } if (pcmk_is_set(show_opts, pcmk_show_feature_set)) { const char *feature_set = get_node_feature_set(node); if (feature_set != NULL) { pcmk__g_strcat(str, ", feature set ", feature_set, NULL); } } /* If we're grouping by node, print its resources */ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { if (pcmk_is_set(show_opts, pcmk_show_brief)) { GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); if (rscs != NULL) { uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs; out->begin_list(out, NULL, NULL, "%s", str->str); out->begin_list(out, NULL, NULL, "Resources"); pe__rscs_brief_output(out, rscs, new_show_opts); out->end_list(out); out->end_list(out); g_list_free(rscs); } } else { GList *gIter2 = NULL; out->begin_list(out, NULL, NULL, "%s", str->str); out->begin_list(out, NULL, NULL, "Resources"); for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter2->data; show_opts |= pcmk_show_rsc_only; out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc, only_node, only_rsc); } out->end_list(out); out->end_list(out); } } else { out->list_item(out, NULL, "%s", str->str); } g_string_free(str, TRUE); free(node_name); } else { char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id)); out->begin_list(out, NULL, NULL, "Node: %s", node_name); free(node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *", "GList *") static int node_xml(pcmk__output_t *out, va_list args) { pcmk_node_t *node = va_arg(args, pcmk_node_t *); uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t); bool full = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); if (full) { const char *node_type = "unknown"; char *length_s = pcmk__itoa(g_list_length(node->details->running_rsc)); int health = pe__node_health(node); const char *health_s = NULL; const char *feature_set; switch (node->details->type) { case pcmk_node_variant_cluster: node_type = "member"; break; case pcmk_node_variant_remote: node_type = "remote"; break; case node_ping: node_type = "ping"; break; } if (health < 0) { health_s = "red"; } else if (health == 0) { health_s = "yellow"; } else { health_s = "green"; } feature_set = get_node_feature_set(node); pe__name_and_nvpairs_xml(out, true, "node", 15, "name", node->details->uname, "id", node->details->id, "online", pcmk__btoa(node->details->online), "standby", pcmk__btoa(node->details->standby), "standby_onfail", pcmk__btoa(node->details->standby_onfail), "maintenance", pcmk__btoa(node->details->maintenance), "pending", pcmk__btoa(node->details->pending), "unclean", pcmk__btoa(node->details->unclean), "health", health_s, "feature_set", feature_set, "shutdown", pcmk__btoa(node->details->shutdown), "expected_up", pcmk__btoa(node->details->expected_up), "is_dc", pcmk__btoa(node->details->is_dc), "resources_running", length_s, "type", node_type); if (pe__is_guest_node(node)) { xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out); crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id); } if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { GList *lpc = NULL; for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; show_opts |= pcmk_show_rsc_only; out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc, only_node, only_rsc); } } free(length_s); out->end_list(out); } else { pcmk__output_xml_create_parent(out, "node", "name", node->details->uname, NULL); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int") static int node_attribute_text(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); bool add_extra = va_arg(args, int); int expected_score = va_arg(args, int); if (add_extra) { int v; if (value == NULL) { v = 0; } else { pcmk__scan_min_int(value, &v, INT_MIN); } if (v <= 0) { out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value); } else if (v < expected_score) { out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score); } else { out->list_item(out, NULL, "%-32s\t: %-10s", name, value); } } else { out->list_item(out, NULL, "%-32s\t: %-10s", name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int") static int node_attribute_html(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); bool add_extra = va_arg(args, int); int expected_score = va_arg(args, int); if (add_extra) { int v; char *s = crm_strdup_printf("%s: %s", name, value); xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL); if (value == NULL) { v = 0; } else { pcmk__scan_min_int(value, &v, INT_MIN); } pcmk_create_html_node(item_node, "span", NULL, NULL, s); free(s); if (v <= 0) { pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)"); } else if (v < expected_score) { char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score); pcmk_create_html_node(item_node, "span", NULL, "bold", buf); free(buf); } } else { out->list_item(out, NULL, "%s: %s", name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr") static int node_and_op(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); xmlNodePtr xml_op = va_arg(args, xmlNodePtr); pcmk_resource_t *rsc = NULL; gchar *node_str = NULL; char *last_change_str = NULL; const char *op_rsc = crm_element_value(xml_op, "resource"); int status; time_t last_change = 0; pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), &status, PCMK_EXEC_UNKNOWN); rsc = pe_find_resource(scheduler->resources, op_rsc); if (rsc) { const pcmk_node_t *node = pe__current_node(rsc); const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending; if (node == NULL) { node = rsc->pending_node; } node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node, show_opts, target_role, false); } else { node_str = crm_strdup_printf("Unknown resource %s", op_rsc); } if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_change) == pcmk_ok) { last_change_str = crm_strdup_printf(", %s='%s', exec=%sms", XML_RSC_OP_LAST_CHANGE, pcmk__trim(ctime(&last_change)), crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); } out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s", node_str, pe__xe_history_key(xml_op), crm_element_value(xml_op, XML_ATTR_UNAME), crm_element_value(xml_op, XML_LRM_ATTR_CALLID), crm_element_value(xml_op, XML_LRM_ATTR_RC), last_change_str ? last_change_str : "", pcmk_exec_status_str(status)); g_free(node_str); free(last_change_str); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr") static int node_and_op_xml(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); xmlNodePtr xml_op = va_arg(args, xmlNodePtr); pcmk_resource_t *rsc = NULL; const char *op_rsc = crm_element_value(xml_op, "resource"); int status; time_t last_change = 0; xmlNode *node = NULL; pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), &status, PCMK_EXEC_UNKNOWN); node = pcmk__output_create_xml_node(out, "operation", "op", pe__xe_history_key(xml_op), "node", crm_element_value(xml_op, XML_ATTR_UNAME), "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID), "rc", crm_element_value(xml_op, XML_LRM_ATTR_RC), "status", pcmk_exec_status_str(status), NULL); rsc = pe_find_resource(scheduler->resources, op_rsc); if (rsc) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); char *agent_tuple = NULL; agent_tuple = crm_strdup_printf("%s:%s:%s", class, pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "", kind); pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc), "agent", agent_tuple, NULL); free(agent_tuple); } if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_change) == pcmk_ok) { pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, pcmk__trim(ctime(&last_change)), XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC), NULL); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int") static int node_attribute_xml(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); bool add_extra = va_arg(args, int); int expected_score = va_arg(args, int); xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute", "name", name, "value", value, NULL); if (add_extra) { char *buf = pcmk__itoa(expected_score); crm_xml_add(node, "expected", buf); free(buf); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute-list", "pcmk_scheduler_t *", "uint32_t", "bool", "GList *", "GList *") static int node_attribute_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); int rc = pcmk_rc_no_output; /* Display each node's attributes */ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = gIter->data; GList *attr_list = NULL; GHashTableIter iter; gpointer key; if (!node || !node->details || !node->details->online) { continue; } g_hash_table_iter_init(&iter, node->details->attrs); while (g_hash_table_iter_next (&iter, &key, NULL)) { attr_list = filter_attr_list(attr_list, key); } if (attr_list == NULL) { continue; } if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { g_list_free(attr_list); continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes"); out->message(out, "node", node, show_opts, false, only_node, only_rsc); for (GList *aIter = attr_list; aIter != NULL; aIter = aIter->next) { const char *name = aIter->data; const char *value = NULL; int expected_score = 0; bool add_extra = false; value = pe_node_attribute_raw(node, name); add_extra = add_extra_info(node, node->details->running_rsc, scheduler, name, &expected_score); /* Print attribute name and value */ out->message(out, "node-attribute", name, value, add_extra, expected_score); } g_list_free(attr_list); out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *") static int node_capacity(pcmk__output_t *out, va_list args) { const pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *comment = va_arg(args, const char *); char *dump_text = crm_strdup_printf("%s: %s capacity:", comment, pe__node_name(node)); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); out->list_item(out, NULL, "%s", dump_text); free(dump_text); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *") static int node_capacity_xml(pcmk__output_t *out, va_list args) { const pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *comment = va_arg(args, const char *); xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity", "node", node->details->uname, "comment", comment, NULL); g_hash_table_foreach(node->details->utilization, add_dump_node, xml_node); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-history-list", "pcmk_scheduler_t *", "pcmk_node_t *", "xmlNodePtr", "GList *", "GList *", "uint32_t", "uint32_t") static int node_history_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); xmlNode *node_state = va_arg(args, xmlNode *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); xmlNode *lrm_rsc = NULL; xmlNode *rsc_entry = NULL; int rc = pcmk_rc_no_output; lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); /* Print history of each of the node's resources */ for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE); rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id); const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); /* We can't use is_filtered here to filter group resources. For is_filtered, * we have to decide whether to check the parent or not. If we check the * parent, all elements of a group will always be printed because that's how * is_filtered works for groups. If we do not check the parent, sometimes * this will filter everything out. * * For other resource types, is_filtered is okay. */ if (parent->variant == pcmk_rsc_variant_group) { if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) && !pcmk__str_in_list(rsc_printable_id(parent), only_rsc, pcmk__str_star_matches)) { continue; } } else { if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { continue; } } if (!pcmk_is_set(section_opts, pcmk_section_operations)) { time_t last_failure = 0; int failcount = pe_get_failcount(node, rsc, &last_failure, pcmk__fc_default, NULL); if (failcount <= 0) { continue; } if (rc == pcmk_rc_no_output) { rc = pcmk_rc_ok; out->message(out, "node", node, show_opts, false, only_node, only_rsc); } out->message(out, "resource-history", rsc, rsc_id, false, failcount, last_failure, false); } else { GList *op_list = get_operation_list(rsc_entry); pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, crm_element_value(rsc_entry, XML_ATTR_ID)); if (op_list == NULL) { continue; } if (rc == pcmk_rc_no_output) { rc = pcmk_rc_ok; out->message(out, "node", node, show_opts, false, only_node, only_rsc); } out->message(out, "resource-operation-list", scheduler, rsc, node, op_list, show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool") static int node_list_html(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer G_GNUC_UNUSED = va_arg(args, int); int rc = pcmk_rc_no_output; for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Node List"); out->message(out, "node", node, show_opts, true, only_node, only_rsc); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool") static int node_list_text(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); /* space-separated lists of node names */ GString *online_nodes = NULL; GString *online_remote_nodes = NULL; GString *online_guest_nodes = NULL; GString *offline_nodes = NULL; GString *offline_remote_nodes = NULL; int rc = pcmk_rc_no_output; for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id)); if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { free(node_name); continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node List"); // Determine whether to display node individually or in a list if (node->details->unclean || node->details->pending || (node->details->standby_onfail && node->details->online) || node->details->standby || node->details->maintenance || pcmk_is_set(show_opts, pcmk_show_rscs_by_node) || pcmk_is_set(show_opts, pcmk_show_feature_set) || (pe__node_health(node) <= 0)) { // Display node individually } else if (node->details->online) { // Display online node in a list if (pe__is_guest_node(node)) { pcmk__add_word(&online_guest_nodes, 1024, node_name); } else if (pe__is_remote_node(node)) { pcmk__add_word(&online_remote_nodes, 1024, node_name); } else { pcmk__add_word(&online_nodes, 1024, node_name); } free(node_name); continue; } else { // Display offline node in a list if (pe__is_remote_node(node)) { pcmk__add_word(&offline_remote_nodes, 1024, node_name); } else if (pe__is_guest_node(node)) { /* ignore offline guest nodes */ } else { pcmk__add_word(&offline_nodes, 1024, node_name); } free(node_name); continue; } /* If we get here, node is in bad state, or we're grouping by node */ out->message(out, "node", node, show_opts, true, only_node, only_rsc); free(node_name); } /* If we're not grouping by node, summarize nodes by status */ if (online_nodes != NULL) { out->list_item(out, "Online", "[ %s ]", (const char *) online_nodes->str); g_string_free(online_nodes, TRUE); } if (offline_nodes != NULL) { out->list_item(out, "OFFLINE", "[ %s ]", (const char *) offline_nodes->str); g_string_free(offline_nodes, TRUE); } if (online_remote_nodes) { out->list_item(out, "RemoteOnline", "[ %s ]", (const char *) online_remote_nodes->str); g_string_free(online_remote_nodes, TRUE); } if (offline_remote_nodes) { out->list_item(out, "RemoteOFFLINE", "[ %s ]", (const char *) offline_remote_nodes->str); g_string_free(offline_remote_nodes, TRUE); } if (online_guest_nodes != NULL) { out->list_item(out, "GuestOnline", "[ %s ]", (const char *) online_guest_nodes->str); g_string_free(online_guest_nodes, TRUE); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool") static int node_list_xml(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer G_GNUC_UNUSED = va_arg(args, int); out->begin_list(out, NULL, NULL, "nodes"); for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } out->message(out, "node", node, show_opts, true, only_node, only_rsc); } out->end_list(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-summary", "pcmk_scheduler_t *", "GList *", "GList *", "uint32_t", "uint32_t", "bool") static int node_summary(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); xmlNode *node_state = NULL; xmlNode *cib_status = pcmk_find_cib_element(scheduler->input, XML_CIB_TAG_STATUS); int rc = pcmk_rc_no_output; if (xmlChildElementCount(cib_status) == 0) { return rc; } for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE); node_state != NULL; node_state = crm_next_same_xml(node_state)) { pcmk_node_t *node = pe_find_node_id(scheduler->nodes, ID(node_state)); if (!node || !node->details || !node->details->online) { continue; } if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary"); out->message(out, "node-history-list", scheduler, node, node_state, only_node, only_rsc, section_opts, show_opts); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *", "const char *", "const char *") static int node_weight(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); const char *prefix = va_arg(args, const char *); const char *uname = va_arg(args, const char *); const char *score = va_arg(args, const char *); if (rsc) { out->list_item(out, NULL, "%s: %s allocation score on %s: %s", prefix, rsc->id, uname, score); } else { out->list_item(out, NULL, "%s: %s = %s", prefix, uname, score); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *", "const char *", "const char *") static int node_weight_xml(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); const char *prefix = va_arg(args, const char *); const char *uname = va_arg(args, const char *); const char *score = va_arg(args, const char *); xmlNodePtr node = pcmk__output_create_xml_node(out, "node_weight", "function", prefix, "node", uname, "score", score, NULL); if (rsc) { crm_xml_add(node, "id", rsc->id); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t") static int op_history_text(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); const char *task = va_arg(args, const char *); const char *interval_ms_s = va_arg(args, const char *); int rc = va_arg(args, int); uint32_t show_opts = va_arg(args, uint32_t); char *buf = op_history_string(xml_op, task, interval_ms_s, rc, pcmk_is_set(show_opts, pcmk_show_timing)); out->list_item(out, NULL, "%s", buf); free(buf); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t") static int op_history_xml(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); const char *task = va_arg(args, const char *); const char *interval_ms_s = va_arg(args, const char *); int rc = va_arg(args, int); uint32_t show_opts = va_arg(args, uint32_t); char *rc_s = pcmk__itoa(rc); xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history", "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID), "task", task, "rc", rc_s, "rc_text", services_ocf_exitcode_str(rc), NULL); free(rc_s); if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { char *s = crm_strdup_printf("%sms", interval_ms_s); crm_xml_add(node, "interval", s); free(s); } if (pcmk_is_set(show_opts, pcmk_show_timing)) { const char *value = NULL; time_t epoch = 0; if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok) && (epoch > 0)) { char *s = pcmk__epoch2str(&epoch, 0); crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, s); free(s); } value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC); if (value) { char *s = crm_strdup_printf("%sms", value); crm_xml_add(node, XML_RSC_OP_T_EXEC, s); free(s); } value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE); if (value) { char *s = crm_strdup_printf("%sms", value); crm_xml_add(node, XML_RSC_OP_T_QUEUE, s); free(s); } } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int promotion_score(pcmk__output_t *out, va_list args) { pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *chosen = va_arg(args, pcmk_node_t *); const char *score = va_arg(args, const char *); out->list_item(out, NULL, "%s promotion score on %s: %s", child_rsc->id, chosen? chosen->details->uname : "none", score); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int promotion_score_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *chosen = va_arg(args, pcmk_node_t *); const char *score = va_arg(args, const char *); xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score", "id", child_rsc->id, "score", score, NULL); if (chosen) { crm_xml_add(node, "node", chosen->details->uname); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool") static int resource_config(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); bool raw = va_arg(args, int); char *rsc_xml = formatted_xml_buf(rsc, raw); out->output_xml(out, "xml", rsc_xml); free(rsc_xml); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool") static int resource_config_text(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); bool raw = va_arg(args, int); char *rsc_xml = formatted_xml_buf(rsc, raw); pcmk__formatted_printf(out, "Resource XML:\n"); out->output_xml(out, "xml", rsc_xml); free(rsc_xml); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *", "bool", "int", "time_t", "bool") static int resource_history_text(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *rsc_id = va_arg(args, const char *); bool all = va_arg(args, int); int failcount = va_arg(args, int); time_t last_failure = va_arg(args, time_t); bool as_header = va_arg(args, int); char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure); if (as_header) { out->begin_list(out, NULL, NULL, "%s", buf); } else { out->list_item(out, NULL, "%s", buf); } free(buf); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *", "bool", "int", "time_t", "bool") static int resource_history_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *rsc_id = va_arg(args, const char *); bool all = va_arg(args, int); int failcount = va_arg(args, int); time_t last_failure = va_arg(args, time_t); bool as_header = va_arg(args, int); xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history", "id", rsc_id, NULL); if (rsc == NULL) { pcmk__xe_set_bool_attr(node, "orphan", true); } else if (all || failcount || last_failure > 0) { char *migration_s = pcmk__itoa(rsc->migration_threshold); pcmk__xe_set_props(node, "orphan", "false", "migration-threshold", migration_s, NULL); free(migration_s); if (failcount > 0) { char *s = pcmk__itoa(failcount); crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s); free(s); } if (last_failure > 0) { char *s = pcmk__epoch2str(&last_failure, 0); crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, s); free(s); } } if (!as_header) { pcmk__output_xml_pop_parent(out); } return pcmk_rc_ok; } static void print_resource_header(pcmk__output_t *out, uint32_t show_opts) { if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { /* Active resources have already been printed by node */ out->begin_list(out, NULL, NULL, "Inactive Resources"); } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { out->begin_list(out, NULL, NULL, "Full List of Resources"); } else { out->begin_list(out, NULL, NULL, "Active Resources"); } } PCMK__OUTPUT_ARGS("resource-list", "pcmk_scheduler_t *", "uint32_t", "bool", "GList *", "GList *", "bool") static int resource_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); uint32_t show_opts = va_arg(args, uint32_t); bool print_summary = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); bool print_spacer = va_arg(args, int); GList *rsc_iter; int rc = pcmk_rc_no_output; bool printed_header = false; /* If we already showed active resources by node, and * we're not showing inactive resources, we have nothing to do */ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node) && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { return rc; } /* If we haven't already printed resources grouped by node, * and brief output was requested, print resource summary */ if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { GList *rscs = pe__filter_rsc_list(scheduler->resources, only_rsc); PCMK__OUTPUT_SPACER_IF(out, print_spacer); print_resource_header(out, show_opts); printed_header = true; rc = pe__rscs_brief_output(out, rscs, show_opts); g_list_free(rscs); } /* For each resource, display it if appropriate */ for (rsc_iter = scheduler->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) rsc_iter->data; int x; /* Complex resources may have some sub-resources active and some inactive */ gboolean is_active = rsc->fns->active(rsc, TRUE); gboolean partially_active = rsc->fns->active(rsc, FALSE); /* Skip inactive orphans (deleted but still in CIB) */ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed) && !is_active) { continue; /* Skip active resources if we already displayed them by node */ } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { if (is_active) { continue; } /* Skip primitives already counted in a brief summary */ } else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pcmk_rsc_variant_primitive)) { continue; /* Skip resources that aren't at least partially active, * unless we're displaying inactive resources */ } else if (!partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { continue; } else if (partially_active && !pe__rsc_running_on_any(rsc, only_node)) { continue; } if (!printed_header) { PCMK__OUTPUT_SPACER_IF(out, print_spacer); print_resource_header(out, show_opts); printed_header = true; } /* Print this resource */ x = out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc, only_node, only_rsc); if (x == pcmk_rc_ok) { rc = pcmk_rc_ok; } } if (print_summary && rc != pcmk_rc_ok) { if (!printed_header) { PCMK__OUTPUT_SPACER_IF(out, print_spacer); print_resource_header(out, show_opts); printed_header = true; } if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) { out->list_item(out, NULL, "No inactive resources"); } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { out->list_item(out, NULL, "No resources"); } else { out->list_item(out, NULL, "No active resources"); } } if (printed_header) { out->end_list(out); } return rc; } PCMK__OUTPUT_ARGS("resource-operation-list", "pcmk_scheduler_t *", "pcmk_resource_t *", "pcmk_node_t *", "GList *", "uint32_t") static int resource_operation_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler G_GNUC_UNUSED = va_arg(args, pcmk_scheduler_t *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); GList *op_list = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); GList *gIter = NULL; int rc = pcmk_rc_no_output; /* Print each operation */ for (gIter = op_list; gIter != NULL; gIter = gIter->next) { xmlNode *xml_op = (xmlNode *) gIter->data; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); int op_rc_i; pcmk__scan_min_int(op_rc, &op_rc_i, 0); /* Display 0-interval monitors as "probe" */ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei) && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) { task = "probe"; } /* If this is the first printed operation, print heading for resource */ if (rc == pcmk_rc_no_output) { time_t last_failure = 0; int failcount = pe_get_failcount(node, rsc, &last_failure, pcmk__fc_default, NULL); out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true, failcount, last_failure, true); rc = pcmk_rc_ok; } /* Print the operation */ out->message(out, "op-history", xml_op, task, interval_ms_s, op_rc_i, show_opts); } /* Free the list we created (no need to free the individual items) */ g_list_free(op_list); PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int resource_util(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *fn = va_arg(args, const char *); char *dump_text = crm_strdup_printf("%s: %s utilization on %s:", fn, rsc->id, pe__node_name(node)); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); out->list_item(out, NULL, "%s", dump_text); free(dump_text); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int resource_util_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *fn = va_arg(args, const char *); xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization", "resource", rsc->id, "node", node->details->uname, "function", fn, NULL); g_hash_table_foreach(rsc->utilization, add_dump_node, xml_node); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *") static int ticket_html(pcmk__output_t *out, va_list args) { pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *); if (ticket->last_granted > -1) { char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0); out->list_item(out, NULL, "%s:\t%s%s %s=\"%s\"", ticket->id, ticket->granted ? "granted" : "revoked", ticket->standby ? " [standby]" : "", "last-granted", pcmk__s(epoch_str, "")); free(epoch_str); } else { out->list_item(out, NULL, "%s:\t%s%s", ticket->id, ticket->granted ? "granted" : "revoked", ticket->standby ? " [standby]" : ""); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *") static int ticket_text(pcmk__output_t *out, va_list args) { pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *); if (ticket->last_granted > -1) { char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0); out->list_item(out, ticket->id, "%s%s %s=\"%s\"", ticket->granted ? "granted" : "revoked", ticket->standby ? " [standby]" : "", "last-granted", pcmk__s(epoch_str, "")); free(epoch_str); } else { out->list_item(out, ticket->id, "%s%s", ticket->granted ? "granted" : "revoked", ticket->standby ? " [standby]" : ""); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *") static int ticket_xml(pcmk__output_t *out, va_list args) { pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *); xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, "ticket", "id", ticket->id, "status", ticket->granted ? "granted" : "revoked", "standby", pcmk__btoa(ticket->standby), NULL); if (ticket->last_granted > -1) { char *buf = pcmk__epoch2str(&ticket->last_granted, 0); crm_xml_add(node, "last-granted", buf); free(buf); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-list", "pcmk_scheduler_t *", "bool") static int ticket_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); bool print_spacer = va_arg(args, int); GHashTableIter iter; gpointer key, value; if (g_hash_table_size(scheduler->tickets) == 0) { return pcmk_rc_no_output; } PCMK__OUTPUT_SPACER_IF(out, print_spacer); /* Print section heading */ out->begin_list(out, NULL, NULL, "Tickets"); /* Print each ticket */ g_hash_table_iter_init(&iter, scheduler->tickets); while (g_hash_table_iter_next(&iter, &key, &value)) { pcmk_ticket_t *ticket = (pcmk_ticket_t *) value; out->message(out, "ticket", ticket); } /* Close section */ out->end_list(out); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "ban", "default", ban_text }, { "ban", "html", ban_html }, { "ban", "xml", ban_xml }, { "ban-list", "default", ban_list }, { "bundle", "default", pe__bundle_text }, { "bundle", "xml", pe__bundle_xml }, { "bundle", "html", pe__bundle_html }, { "clone", "default", pe__clone_default }, { "clone", "xml", pe__clone_xml }, { "cluster-counts", "default", cluster_counts_text }, { "cluster-counts", "html", cluster_counts_html }, { "cluster-counts", "xml", cluster_counts_xml }, { "cluster-dc", "default", cluster_dc_text }, { "cluster-dc", "html", cluster_dc_html }, { "cluster-dc", "xml", cluster_dc_xml }, { "cluster-options", "default", cluster_options_text }, { "cluster-options", "html", cluster_options_html }, { "cluster-options", "log", cluster_options_log }, { "cluster-options", "xml", cluster_options_xml }, { "cluster-summary", "default", cluster_summary }, { "cluster-summary", "html", cluster_summary_html }, { "cluster-stack", "default", cluster_stack_text }, { "cluster-stack", "html", cluster_stack_html }, { "cluster-stack", "xml", cluster_stack_xml }, { "cluster-times", "default", cluster_times_text }, { "cluster-times", "html", cluster_times_html }, { "cluster-times", "xml", cluster_times_xml }, { "failed-action", "default", failed_action_default }, { "failed-action", "xml", failed_action_xml }, { "failed-action-list", "default", failed_action_list }, { "group", "default", pe__group_default}, { "group", "xml", pe__group_xml }, { "maint-mode", "text", cluster_maint_mode_text }, { "node", "default", node_text }, { "node", "html", node_html }, { "node", "xml", node_xml }, { "node-and-op", "default", node_and_op }, { "node-and-op", "xml", node_and_op_xml }, { "node-capacity", "default", node_capacity }, { "node-capacity", "xml", node_capacity_xml }, { "node-history-list", "default", node_history_list }, { "node-list", "default", node_list_text }, { "node-list", "html", node_list_html }, { "node-list", "xml", node_list_xml }, { "node-weight", "default", node_weight }, { "node-weight", "xml", node_weight_xml }, { "node-attribute", "default", node_attribute_text }, { "node-attribute", "html", node_attribute_html }, { "node-attribute", "xml", node_attribute_xml }, { "node-attribute-list", "default", node_attribute_list }, { "node-summary", "default", node_summary }, { "op-history", "default", op_history_text }, { "op-history", "xml", op_history_xml }, { "primitive", "default", pe__resource_text }, { "primitive", "xml", pe__resource_xml }, { "primitive", "html", pe__resource_html }, { "promotion-score", "default", promotion_score }, { "promotion-score", "xml", promotion_score_xml }, { "resource-config", "default", resource_config }, { "resource-config", "text", resource_config_text }, { "resource-history", "default", resource_history_text }, { "resource-history", "xml", resource_history_xml }, { "resource-list", "default", resource_list }, { "resource-operation-list", "default", resource_operation_list }, { "resource-util", "default", resource_util }, { "resource-util", "xml", resource_util_xml }, { "ticket", "default", ticket_text }, { "ticket", "html", ticket_html }, { "ticket", "xml", ticket_xml }, { "ticket-list", "default", ticket_list }, { NULL, NULL, NULL } }; void pe__register_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 64d484c0e1..3eabd0ed17 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,5104 +1,5105 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); // A (parsed) resource action history entry struct action_history { pcmk_resource_t *rsc; // Resource that history is for pcmk_node_t *node; // Node that history is for xmlNode *xml; // History entry XML // Parsed from entry XML const char *id; // XML ID of history entry const char *key; // Operation key of action const char *task; // Action name const char *exit_reason; // Exit reason given for result guint interval_ms; // Action interval int call_id; // Call ID of action int expected_exit_status; // Expected exit status of action int exit_status; // Actual exit status of action int execution_status; // Execution status of action }; /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than * use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the * flag is stringified more readably in log messages. */ #define set_config_flag(scheduler, option, flag) do { \ const char *scf_value = pe_pref((scheduler)->config_hash, (option)); \ if (scf_value != NULL) { \ if (crm_is_true(scf_value)) { \ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } else { \ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } \ } \ } while(0) static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *failed); static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node); static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler); static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler); static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler); static gboolean is_dangling_guest_node(pcmk_node_t *node) { /* we are looking for a remote-node that was supposed to be mapped to a * container resource, but all traces of that container have disappeared * from both the config and the status section. */ if (pe__is_guest_or_remote_node(node) && node->details->remote_rsc && node->details->remote_rsc->container == NULL && pcmk_is_set(node->details->remote_rsc->flags, pcmk_rsc_removed_filler)) { return TRUE; } return FALSE; } /*! * \brief Schedule a fence action for a node * * \param[in,out] scheduler Scheduler data * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed * \param[in] priority_delay Whether to consider `priority-fencing-delay` */ void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay) { CRM_CHECK(node, return); /* A guest node is fenced by marking its container as failed */ if (pe__is_guest_node(node)) { pcmk_resource_t *rsc = node->details->remote_rsc->container; if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", pe__node_name(node), reason, rsc->id); } else { pcmk__sched_warn("Guest node %s will be fenced " "(by recovering its guest resource %s): %s", pe__node_name(node), rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ node->details->remote_requires_reset = TRUE; pe__set_resource_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", pe__node_name(node), reason); pe__set_resource_flags(node->details->remote_rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); } else if (pe__is_remote_node(node)) { pcmk_resource_t *rsc = node->details->remote_rsc; if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", pe__node_name(node), reason); } else if(node->details->remote_requires_reset == FALSE) { node->details->remote_requires_reset = TRUE; pcmk__sched_warn("Remote node %s %s: %s", pe__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; // No need to apply `priority-fencing-delay` for remote nodes pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", pe__node_name(node), pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean", reason); } else { pcmk__sched_warn("Cluster node %s %s: %s", pe__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \ "[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'" \ "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \ "and @" XML_NVPAIR_ATTR_VALUE "='" PCMK__VALUE_UNFENCING "']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \ "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler) { xmlXPathObjectPtr result = NULL; if (!pcmk_is_set(scheduler->flags, flag)) { result = xpath_search(scheduler->input, xpath); if (result && (numXpathResults(result) > 0)) { pe__set_working_set_flags(scheduler, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler) { const char *value = NULL; GHashTable *config_hash = pcmk__strkey_table(free, free); pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = pcmk_role_unknown, .now = scheduler->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; scheduler->config_hash = config_hash; pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash, CIB_OPTIONS_FIRST, FALSE, scheduler); verify_pe_options(scheduler->config_hash); set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES, pcmk_sched_probe_resources); if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) { crm_info("Startup probes: disabled (dangerous)"); } value = pe_pref(scheduler->config_hash, PCMK_OPT_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_info("Watchdog-based self-fencing will be performed via SBD if " "fencing is required and stonith-watchdog-timeout is nonzero"); pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING, scheduler); value = pe_pref(scheduler->config_hash, "stonith-timeout"); scheduler->stonith_timeout = (int) crm_parse_interval_spec(value); crm_debug("STONITH timeout: %d", scheduler->stonith_timeout); set_config_flag(scheduler, "stonith-enabled", pcmk_sched_fencing_enabled); if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { crm_debug("STONITH of failed nodes is enabled"); } else { crm_debug("STONITH of failed nodes is disabled"); } scheduler->stonith_action = pe_pref(scheduler->config_hash, "stonith-action"); if (!strcmp(scheduler->stonith_action, "poweroff")) { pcmk__warn_once(pcmk__wo_poweroff, "Support for stonith-action of 'poweroff' is " "deprecated and will be removed in a future release " "(use 'off' instead)"); scheduler->stonith_action = PCMK_ACTION_OFF; } crm_trace("STONITH will %s nodes", scheduler->stonith_action); set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING, pcmk_sched_concurrent_fencing); if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) { crm_debug("Concurrent fencing is enabled"); } else { crm_debug("Concurrent fencing is disabled"); } value = pe_pref(scheduler->config_hash, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY); if (value) { scheduler->priority_fencing_delay = crm_parse_interval_spec(value) / 1000; crm_trace("Priority fencing delay is %ds", scheduler->priority_fencing_delay); } set_config_flag(scheduler, "stop-all-resources", pcmk_sched_stop_all); crm_debug("Stop all active resources: %s", pcmk__btoa(pcmk_is_set(scheduler->flags, pcmk_sched_stop_all))); set_config_flag(scheduler, "symmetric-cluster", pcmk_sched_symmetric_cluster); if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(scheduler->config_hash, "no-quorum-policy"); if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_ignore; } else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_freeze; } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_demote; } else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) { if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { int do_panic = 0; crm_element_value_int(scheduler->input, XML_ATTR_QUORUM_PANIC, &do_panic); if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) { scheduler->no_quorum_policy = pcmk_no_quorum_fence; } else { crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { pcmk__config_err("Resetting no-quorum-policy to 'stop' because " "fencing is disabled"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { scheduler->no_quorum_policy = pcmk_no_quorum_stop; } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case pcmk_no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case pcmk_no_quorum_demote: crm_debug("On loss of quorum: " "Demote promotable resources and stop other resources"); break; case pcmk_no_quorum_fence: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case pcmk_no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(scheduler, "stop-orphan-resources", pcmk_sched_stop_removed_resources); if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) { crm_trace("Orphan resources are stopped"); } else { crm_trace("Orphan resources are ignored"); } set_config_flag(scheduler, "stop-orphan-actions", pcmk_sched_cancel_removed_actions); if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) { crm_trace("Orphan resource actions are stopped"); } else { crm_trace("Orphan resource actions are ignored"); } value = pe_pref(scheduler->config_hash, "remove-after-stop"); if (value != NULL) { if (crm_is_true(value)) { pe__set_working_set_flags(scheduler, pcmk_sched_remove_after_stop); #ifndef PCMK__COMPAT_2_0 pcmk__warn_once(pcmk__wo_remove_after, "Support for the remove-after-stop cluster " "property is deprecated and will be removed in a " "future release"); #endif } else { pe__clear_working_set_flags(scheduler, pcmk_sched_remove_after_stop); } } - set_config_flag(scheduler, "maintenance-mode", pcmk_sched_in_maintenance); + set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE, + pcmk_sched_in_maintenance); crm_trace("Maintenance mode: %s", pcmk__btoa(pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance))); set_config_flag(scheduler, "start-failure-is-fatal", pcmk_sched_start_failure_fatal); if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) { crm_trace("Start failures are always fatal"); } else { crm_trace("Start failures are handled by failcount"); } if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { set_config_flag(scheduler, "startup-fencing", pcmk_sched_startup_fencing); } if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pcmk__warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes"); } pe__unpack_node_health_scores(scheduler); scheduler->placement_strategy = pe_pref(scheduler->config_hash, "placement-strategy"); crm_trace("Placement strategy: %s", scheduler->placement_strategy); set_config_flag(scheduler, "shutdown-lock", pcmk_sched_shutdown_lock); if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) { value = pe_pref(scheduler->config_hash, XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT); scheduler->shutdown_lock = crm_parse_interval_spec(value) / 1000; crm_trace("Resources will be locked to nodes that were cleanly " "shut down (locks expire after %s)", pcmk__readable_interval(scheduler->shutdown_lock)); } else { crm_trace("Resources will not be locked to nodes that were cleanly " "shut down"); } value = pe_pref(scheduler->config_hash, XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT); scheduler->node_pending_timeout = crm_parse_interval_spec(value) / 1000; if (scheduler->node_pending_timeout == 0) { crm_trace("Do not fence pending nodes"); } else { crm_trace("Fence pending nodes after %s", pcmk__readable_interval(scheduler->node_pending_timeout * 1000)); } return TRUE; } pcmk_node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pcmk_scheduler_t *scheduler) { pcmk_node_t *new_node = NULL; if (pe_find_node(scheduler->nodes, uname) != NULL) { pcmk__config_warn("More than one node entry has name '%s'", uname); } new_node = calloc(1, sizeof(pcmk_node_t)); if (new_node == NULL) { pcmk__sched_err("Could not allocate memory for node %s", uname); return NULL; } new_node->weight = char2score(score); new_node->details = calloc(1, sizeof(struct pe_node_shared_s)); if (new_node->details == NULL) { free(new_node); pcmk__sched_err("Could not allocate memory for node %s", uname); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->rsc_discovery_enabled = TRUE; new_node->details->running_rsc = NULL; new_node->details->data_set = scheduler; if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) { new_node->details->type = pcmk_node_variant_cluster; } else if (pcmk__str_eq(type, "remote", pcmk__str_casei)) { new_node->details->type = pcmk_node_variant_remote; pe__set_working_set_flags(scheduler, pcmk_sched_have_remote_nodes); } else { /* @COMPAT 'ping' is the default for backward compatibility, but it * should be changed to 'member' at a compatibility break */ if (!pcmk__str_eq(type, "ping", pcmk__str_casei)) { pcmk__config_warn("Node %s has unrecognized type '%s', " "assuming 'ping'", pcmk__s(uname, "without name"), type); } pcmk__warn_once(pcmk__wo_ping_node, "Support for nodes of type 'ping' (such as %s) is " "deprecated and will be removed in a future release", pcmk__s(uname, "unnamed node")); new_node->details->type = node_ping; } new_node->details->attrs = pcmk__strkey_table(free, free); if (pe__is_guest_or_remote_node(new_node)) { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("remote")); } else { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("cluster")); } new_node->details->utilization = pcmk__strkey_table(free, free); new_node->details->digest_cache = pcmk__strkey_table(free, pe__free_digests); scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node, pe__cmp_node_name); return new_node; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = ID(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) { if (!pcmk__str_eq((const char *)attr_set->name, XML_TAG_META_SETS, pcmk__str_casei)) { continue; } for (attr = pcmk__xe_first_child(attr_set); attr != NULL; attr = pcmk__xe_next(attr)) { const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); if (pcmk__str_eq(name, XML_RSC_ATTR_REMOTE_NODE, pcmk__str_casei)) { remote_name = value; } else if (pcmk__str_eq(name, "remote-addr", pcmk__str_casei)) { remote_server = value; } else if (pcmk__str_eq(name, "remote-port", pcmk__str_casei)) { remote_port = value; } else if (pcmk__str_eq(name, "remote-connect-timeout", pcmk__str_casei)) { connect_timeout = value; } else if (pcmk__str_eq(name, "remote-allow-migrate", pcmk__str_casei)) { remote_allow_migrate=value; } else if (pcmk__str_eq(name, XML_RSC_ATTR_MANAGED, pcmk__str_casei)) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (pe_find_resource(data->resources, remote_name) != NULL) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node) { if ((new_node->details->type == pcmk_node_variant_remote) && (new_node->details->remote_rsc == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } /* We need to be able to determine if a node's status section * exists or not separate from whether the node is unclean. */ new_node->details->unseen = TRUE; } gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; pcmk_node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, pcmk__str_none)) { new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE "> entry in configuration without id"); continue; } new_node = pe_create_node(id, uname, type, score, scheduler); if (new_node == NULL) { return FALSE; } handle_startup_fencing(scheduler, new_node); add_node_attrs(xml_obj, new_node, FALSE, scheduler); crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); } } if (scheduler->localhost && (pe_find_node(scheduler->nodes, scheduler->localhost) == NULL)) { crm_info("Creating a fake local node"); pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0, scheduler); } return TRUE; } static void setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { const char *container_id = NULL; if (rsc->children) { g_list_foreach(rsc->children, (GFunc) setup_container, scheduler); return; } container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER); if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) { pcmk_resource_t *container = pe_find_resource(scheduler->resources, container_id); if (container) { rsc->container = container; pe__set_resource_flags(container, pcmk_rsc_has_filler); container->fillers = g_list_append(container->fillers, rsc); pcmk__rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id); } else { pcmk__config_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id); } } } gboolean unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = ID(xml_obj); /* The "pe_find_node" check is here to make sure we don't iterate over * an expanded node that has already been added to the node list. */ if (new_node_id && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) { crm_trace("Found remote node %s defined by resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, scheduler); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, scheduler); if (new_node_id && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, scheduler); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, pcmk__str_none)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, scheduler); if (new_node_id && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, ID(xml_obj2), ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, scheduler); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc) { pcmk_node_t *remote_node = NULL; if (new_rsc->is_remote_node == FALSE) { return; } if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pe_find_node(scheduler->nodes, new_rsc->id); CRM_CHECK(remote_node != NULL, return); pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s", new_rsc->id, pe__node_name(remote_node)); remote_node->details->remote_rsc = new_rsc; if (new_rsc->container == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(scheduler, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); } } static void destroy_tag(gpointer data) { pcmk_tag_t *tag = data; if (tag) { free(tag->id); g_list_free_full(tag->refs, free); free(tag); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] scheduler Scheduler data * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when pe__unpack_resource() calls resource_location() */ gboolean unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; GList *gIter = NULL; scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag); for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { pcmk_resource_t *new_rsc = NULL; const char *id = ID(xml_obj); if (pcmk__str_empty(id)) { pcmk__config_err("Ignoring <%s> resource without ID", xml_obj->name); continue; } if (pcmk__str_eq((const char *) xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, pcmk__str_none)) { if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ g_hash_table_insert(scheduler->template_rsc_sets, strdup(id), NULL); } continue; } crm_trace("Unpacking <%s " XML_ATTR_ID "='%s'>", xml_obj->name, id); if (pe__unpack_resource(xml_obj, &new_rsc, NULL, scheduler) == pcmk_rc_ok) { scheduler->resources = g_list_append(scheduler->resources, new_rsc); pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { pcmk__config_err("Ignoring <%s> resource '%s' " "because configuration is invalid", xml_obj->name, id); } } for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; setup_container(rsc, scheduler); link_rsc2remotenode(scheduler, rsc); } scheduler->resources = g_list_sort(scheduler->resources, pe__cmp_rsc_priority); if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) { /* Ignore */ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled) && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) { pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined"); pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option"); pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler) { xmlNode *xml_tag = NULL; scheduler->tags = pcmk__strkey_table(free, destroy_tag); for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = ID(xml_tag); if (!pcmk__str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, pcmk__str_none)) { continue; } if (tag_id == NULL) { pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID, (const char *) xml_tag->name); continue; } for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) { const char *obj_ref = ID(xml_obj_ref); if (!pcmk__str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, pcmk__str_none)) { continue; } if (obj_ref == NULL) { pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID, xml_obj_ref->name, tag_id); continue; } if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) { return FALSE; } } } return TRUE; } /* The ticket state section: * "/cib/status/tickets/ticket_state" */ static gboolean unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler) { const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; pcmk_ticket_t *ticket = NULL; ticket_id = ID(xml_ticket); if (pcmk__str_empty(ticket_id)) { return FALSE; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(scheduler->tickets, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, scheduler); if (ticket == NULL) { return FALSE; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = pcmk__xml_attr_value(xIter); if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) { continue; } g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value)); } granted = g_hash_table_lookup(ticket->state, "granted"); if (granted && crm_is_true(granted)) { ticket->granted = TRUE; crm_info("We have ticket '%s'", ticket->id); } else { ticket->granted = FALSE; crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, "last-granted"); if (last_granted) { long long last_granted_ll; pcmk__scan_ll(last_granted, &last_granted_ll, 0LL); ticket->last_granted = (time_t) last_granted_ll; } standby = g_hash_table_lookup(ticket->state, "standby"); if (standby && crm_is_true(standby)) { ticket->standby = TRUE; if (ticket->granted) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { ticket->standby = FALSE; } crm_trace("Done with ticket state for %s", ticket_id); return TRUE; } static gboolean unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) { continue; } unpack_ticket_state(xml_obj, scheduler); } return TRUE; } static void unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *resource_discovery_enabled = NULL; const xmlNode *attrs = NULL; pcmk_resource_t *rsc = NULL; if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { return; } if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) { return; } crm_trace("Processing Pacemaker Remote node %s", pe__node_name(this_node)); pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_MAINTENANCE), &(this_node->details->remote_maintenance), 0); rsc = this_node->details->remote_rsc; if (this_node->details->remote_requires_reset == FALSE) { this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; } attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, scheduler); if (pe__shutdown_requested(this_node)) { crm_info("%s is shutting down", pe__node_name(this_node)); this_node->details->shutdown = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("%s is in standby mode", pe__node_name(this_node)); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) || ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) { crm_info("%s is in maintenance mode", pe__node_name(this_node)); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { if (pe__is_remote_node(this_node) && !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { pcmk__config_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY " attribute on Pacemaker Remote node %s" " because fencing is disabled", pe__node_name(this_node)); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("%s has resource discovery disabled", pe__node_name(this_node)); this_node->details->rsc_discovery_enabled = FALSE; } } } /*! * \internal * \brief Unpack a cluster node's transient attributes * * \param[in] state CIB node state XML * \param[in,out] node Cluster node whose attributes are being unpacked * \param[in,out] scheduler Scheduler data */ static void unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, node, TRUE, scheduler); if (crm_is_true(pe_node_attribute_raw(node, "standby"))) { crm_info("%s is in standby mode", pe__node_name(node)); node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(node, "maintenance"))) { crm_info("%s is in maintenance mode", pe__node_name(node)); node->details->maintenance = TRUE; } discovery = pe_node_attribute_raw(node, XML_NODE_ATTR_RSC_DISCOVERY); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__config_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY " attribute " "for %s because disabling resource discovery is " "not allowed for cluster nodes", pe__node_name(node)); } } /*! * \internal * \brief Unpack a node state entry (first pass) * * Unpack one node state entry from status. This unpacks information from the * node_state element itself and node attributes inside it, but not the * resource history inside it. Multiple passes through the status are needed to * fully unpack everything. * * \param[in] state CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *id = NULL; const char *uname = NULL; pcmk_node_t *this_node = NULL; id = crm_element_value(state, XML_ATTR_ID); if (id == NULL) { pcmk__config_err("Ignoring invalid " XML_CIB_TAG_STATE " entry without " XML_ATTR_ID); crm_log_xml_info(state, "missing-id"); return; } uname = crm_element_value(state, XML_ATTR_UNAME); if (uname == NULL) { /* If a joining peer makes the cluster acquire the quorum from corosync * meanwhile it has not joined CPG membership of pacemaker-controld yet, * it's possible that the created node_state entry doesn't have an uname * yet. We should recognize the node as `pending` and wait for it to * join CPG. */ crm_trace("Handling " XML_CIB_TAG_STATE " entry with id=\"%s\" without " XML_ATTR_UNAME, id); } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { pcmk__config_warn("Ignoring recorded node state for id=\"%s\" (%s) " "because it is no longer in the configuration", id, pcmk__s(uname, "uname unknown")); return; } if (pe__is_guest_or_remote_node(this_node)) { /* We can't determine the online status of Pacemaker Remote nodes until * after all resource history has been unpacked. In this first pass, we * do need to mark whether the node has been fenced, as this plays a * role during unpacking cluster node resource state. */ pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_FENCED), &(this_node->details->remote_was_fenced), 0); return; } unpack_transient_attributes(state, this_node, scheduler); /* Provisionally mark this cluster node as clean. We have at least seen it * in the current cluster's lifetime. */ this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; crm_trace("Determining online status of cluster node %s (id %s)", pe__node_name(this_node), id); determine_online_status(state, this_node, scheduler); if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate) && this_node->details->online && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) { /* Everything else should flow from this automatically * (at least until the scheduler becomes able to migrate off * healthy resources) */ pe_fence_node(scheduler, this_node, "cluster does not have quorum", FALSE); } } /*! * \internal * \brief Unpack nodes' resource history as much as possible * * Unpack as many nodes' resource history as possible in one pass through the * status. We need to process Pacemaker Remote nodes' connections/containers * before unpacking their history; the connection/container history will be * in another node's history, so it might take multiple passes to unpack * everything. * * \param[in] status CIB XML status section * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen * \param[in,out] scheduler Scheduler data * * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done, * or EAGAIN if more unpacking remains to be done) */ static int unpack_node_history(const xmlNode *status, bool fence, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; // Loop through all node_state entries in CIB status for (const xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE); state != NULL; state = crm_next_same_xml(state)) { const char *id = ID(state); const char *uname = crm_element_value(state, XML_ATTR_UNAME); pcmk_node_t *this_node = NULL; if ((id == NULL) || (uname == NULL)) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history from malformed " XML_CIB_TAG_STATE " without id and/or uname"); continue; } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history for node %s because " "no longer in configuration", id); continue; } if (this_node->details->unpacked) { crm_trace("Not unpacking resource history for node %s because " "already unpacked", id); continue; } if (fence) { // We're processing all remaining nodes } else if (pe__is_guest_node(this_node)) { /* We can unpack a guest node's history only after we've unpacked * other resource history to the point that we know that the node's * connection and containing resource are both up. */ pcmk_resource_t *rsc = this_node->details->remote_rsc; if ((rsc == NULL) || (rsc->role != pcmk_role_started) || (rsc->container->role != pcmk_role_started)) { crm_trace("Not unpacking resource history for guest node %s " "because container and connection are not known to " "be up", id); continue; } } else if (pe__is_remote_node(this_node)) { /* We can unpack a remote node's history only after we've unpacked * other resource history to the point that we know that the node's * connection is up, with the exception of when shutdown locks are * in use. */ pcmk_resource_t *rsc = this_node->details->remote_rsc; if ((rsc == NULL) || (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock) && (rsc->role != pcmk_role_started))) { crm_trace("Not unpacking resource history for remote node %s " "because connection is not known to be up", id); continue; } /* If fencing and shutdown locks are disabled and we're not processing * unseen nodes, then we don't want to unpack offline nodes until online * nodes have been unpacked. This allows us to number active clone * instances first. */ } else if (!pcmk_any_flags_set(scheduler->flags, pcmk_sched_fencing_enabled |pcmk_sched_shutdown_lock) && !this_node->details->online) { crm_trace("Not unpacking resource history for offline " "cluster node %s", id); continue; } if (pe__is_guest_or_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); unpack_handle_remote_attrs(this_node, state, scheduler); } crm_trace("Unpacking resource history for %snode %s", (fence? "unseen " : ""), id); this_node->details->unpacked = TRUE; unpack_node_lrm(this_node, state, scheduler); rc = EAGAIN; // Other node histories might depend on this one } return rc; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler) { xmlNode *state = NULL; crm_trace("Beginning unpack"); if (scheduler->tickets == NULL) { scheduler->tickets = pcmk__strkey_table(free, destroy_ticket); } for (state = pcmk__xe_first_child(status); state != NULL; state = pcmk__xe_next(state)) { if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) { unpack_tickets_state((xmlNode *) state, scheduler); } else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { unpack_node_state(state, scheduler); } } while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) { crm_trace("Another pass through node resource histories is needed"); } // Now catch any nodes we didn't see unpack_node_history(status, pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled), scheduler); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (scheduler->stop_needed != NULL) { for (GList *item = scheduler->stop_needed; item; item = item->next) { pcmk_resource_t *container = item->data; pcmk_node_t *node = pe__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(scheduler->stop_needed); scheduler->stop_needed = NULL; } /* Now that we know status of all Pacemaker Remote connections and nodes, * we can stop connections for node shutdowns, and check the online status * of remote/guest nodes that didn't have any node history to unpack. */ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *this_node = gIter->data; if (!pe__is_guest_or_remote_node(this_node)) { continue; } if (this_node->details->shutdown && (this_node->details->remote_rsc != NULL)) { pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped, "remote shutdown"); } if (!this_node->details->unpacked) { determine_remote_online_status(scheduler, this_node); } } return TRUE; } /*! * \internal * \brief Unpack node's time when it became a member at the cluster layer * * \param[in] node_state Node's node_state entry * \param[in,out] scheduler Scheduler data * * \return Epoch time when node became a cluster member * (or scheduler effective time for legacy entries) if a member, * 0 if not a member, or -1 if no valid information available */ static long long unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler) { const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM); int member = 0; if (member_time == NULL) { return -1LL; } else if (crm_str_to_boolean(member_time, &member) == 1) { /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was * recorded as a boolean for a DC < 2.1.7, or the node is pending * shutdown and has left the CPG, in which case it was set to 1 to avoid * fencing for node-pending-timeout. * * We return the effective time for in_ccm=1 because what's important to * avoid fencing is that effective time minus this value is less than * the pending node timeout. */ return member? (long long) get_effective_time(scheduler) : 0LL; } else { long long when_member = 0LL; if ((pcmk__scan_ll(member_time, &when_member, 0LL) != pcmk_rc_ok) || (when_member < 0LL)) { crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM " in " XML_CIB_TAG_STATE " entry", member_time); return -1LL; } return when_member; } } /*! * \internal * \brief Unpack node's time when it became online in process group * * \param[in] node_state Node's node_state entry * * \return Epoch time when node became online in process group (or 0 if not * online, or 1 for legacy online entries) */ static long long unpack_node_online(const xmlNode *node_state) { const char *peer_time = crm_element_value(node_state, PCMK__XA_CRMD); // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline" if (pcmk__str_eq(peer_time, OFFLINESTATUS, pcmk__str_casei|pcmk__str_null_matches)) { return 0LL; } else if (pcmk__str_eq(peer_time, ONLINESTATUS, pcmk__str_casei)) { return 1LL; } else { long long when_online = 0LL; if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok) || (when_online < 0)) { crm_warn("Unrecognized value '%s' for " PCMK__XA_CRMD " in " XML_CIB_TAG_STATE " entry, assuming offline", peer_time); return 0LL; } return when_online; } } /*! * \internal * \brief Unpack node attribute for user-requested fencing * * \param[in] node Node to check * \param[in] node_state Node's node_state entry in CIB status * * \return \c true if fencing has been requested for \p node, otherwise \c false */ static bool unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state) { long long value = 0LL; int value_i = 0; const char *value_s = pe_node_attribute_raw(node, PCMK_NODE_ATTR_TERMINATE); // Value may be boolean or an epoch time if (crm_str_to_boolean(value_s, &value_i) == 1) { return (value_i != 0); } if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) { return (value > 0); } crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE "node attribute for %s", value_s, pe__node_name(node)); return false; } static gboolean determine_online_status_no_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); if (when_member <= 0) { crm_trace("Node %s is %sdown", pe__node_name(this_node), ((when_member < 0)? "presumed " : "")); } else if (when_online > 0) { if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { online = TRUE; } else { crm_debug("Node %s is not ready to run resources: %s", pe__node_name(this_node), join); } } else if (this_node->details->expected_up == FALSE) { crm_trace("Node %s controller is down: " "member@%lld online@%lld join=%s expected=%s", pe__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } else { /* mark it unclean */ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE); crm_info("Node %s member@%lld online@%lld join=%s expected=%s", pe__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } return online; } /*! * \internal * \brief Check whether a node has taken too long to join controller group * * \param[in,out] scheduler Scheduler data * \param[in] node Node to check * \param[in] when_member Epoch time when node became a cluster member * \param[in] when_online Epoch time when node joined controller group * * \return true if node has been pending (on the way up) longer than * node-pending-timeout, otherwise false * \note This will also update the cluster's recheck time if appropriate. */ static inline bool pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, long long when_member, long long when_online) { if ((scheduler->node_pending_timeout > 0) && (when_member > 0) && (when_online <= 0)) { // There is a timeout on pending nodes, and node is pending time_t timeout = when_member + scheduler->node_pending_timeout; if (get_effective_time(node->details->data_set) >= timeout) { return true; // Node has timed out } // Node is pending, but still has time pe__update_recheck_time(timeout, scheduler, "pending node timeout"); } return false; } static bool determine_online_status_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { bool termination_requested = unpack_node_terminate(this_node, node_state); const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); /* - PCMK__XA_JOIN ::= member|down|pending|banned - PCMK__XA_EXPECTED ::= member|down @COMPAT with entries recorded for DCs < 2.1.7 - PCMK__XA_IN_CCM ::= true|false - PCMK__XA_CRMD ::= online|offline Since crm_feature_set 3.18.0 (pacemaker-2.1.7): - PCMK__XA_IN_CCM ::= |0 Since when node has been a cluster member. A value 0 of means the node is not a cluster member. - PCMK__XA_CRMD ::= |0 Since when peer has been online in CPG. A value 0 means the peer is offline in CPG. */ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s", pe__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, ""), (termination_requested? " (termination requested)" : "")); if (this_node->details->shutdown) { crm_debug("%s is shutting down", pe__node_name(this_node)); /* Slightly different criteria since we can't shut down a dead peer */ return (when_online > 0); } if (when_member < 0) { pe_fence_node(scheduler, this_node, "peer has not been seen by the cluster", FALSE); return false; } if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) { pe_fence_node(scheduler, this_node, "peer failed Pacemaker membership criteria", FALSE); } else if (termination_requested) { if ((when_member <= 0) && (when_online <= 0) && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) { crm_info("%s was fenced as requested", pe__node_name(this_node)); return false; } pe_fence_node(scheduler, this_node, "fencing was requested", false); } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_null_matches)) { if (pending_too_long(scheduler, this_node, when_member, when_online)) { pe_fence_node(scheduler, this_node, "peer pending timed out on joining the process group", FALSE); } else if ((when_member > 0) || (when_online > 0)) { crm_info("- %s is not ready to run resources", pe__node_name(this_node)); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", pe__node_name(this_node)); } } else if (when_member <= 0) { // Consider `priority-fencing-delay` for lost nodes pe_fence_node(scheduler, this_node, "peer is no longer part of the cluster", TRUE); } else if (when_online <= 0) { pe_fence_node(scheduler, this_node, "peer process is no longer available", FALSE); /* Everything is running at this point, now check join state */ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) { crm_info("%s is active", pe__node_name(this_node)); } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) { crm_info("%s is not ready to run resources", pe__node_name(this_node)); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { pe_fence_node(scheduler, this_node, "peer was in an unknown state", FALSE); } return (when_member > 0); } static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node) { pcmk_resource_t *rsc = this_node->details->remote_rsc; pcmk_resource_t *container = NULL; pcmk_node_t *host = NULL; /* If there is a node state entry for a (former) Pacemaker Remote node * but no resource creating that node, the node's connection resource will * be NULL. Consider it an offline remote node in that case. */ if (rsc == NULL) { this_node->details->online = FALSE; goto remote_online_done; } container = rsc->container; if (container && pcmk__list_of_1(rsc->running_on)) { host = rsc->running_on->data; } /* If the resource is currently started, mark it online. */ if (rsc->role == pcmk_role_started) { crm_trace("%s node %s presumed ONLINE because connection resource is started", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if ((rsc->role == pcmk_role_started) && (rsc->next_role == pcmk_role_stopped)) { crm_trace("%s node %s shutting down because connection resource is stopping", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; } else if ((rsc->role == pcmk_role_stopped) || ((container != NULL) && (container->role == pcmk_role_stopped))) { crm_trace("%s node %s OFFLINE because its resource is stopped", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = FALSE; } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } remote_online_done: crm_trace("Remote node %s online=%s", this_node->details->id, this_node->details->online ? "TRUE" : "FALSE"); } static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED); CRM_CHECK(this_node != NULL, return); this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { this_node->details->expected_up = TRUE; } if (this_node->details->type == node_ping) { this_node->details->unclean = FALSE; online = FALSE; /* As far as resource management is concerned, * the node is safely offline. * Anyone caught abusing this logic will be shot */ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { online = determine_online_status_no_fencing(scheduler, node_state, this_node); } else { online = determine_online_status_fencing(scheduler, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; // @COMPAT deprecated and unused this_node->weight = -INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->fixed = TRUE; // @COMPAT deprecated and unused this_node->weight = -INFINITY; } if (this_node->details->type == node_ping) { crm_info("%s is not a Pacemaker node", pe__node_name(this_node)); } else if (this_node->details->unclean) { pcmk__sched_warn("%s is unclean", pe__node_name(this_node)); } else if (this_node->details->online) { crm_info("%s is %s", pe__node_name(this_node), this_node->details->shutdown ? "shutting down" : this_node->details->pending ? "pending" : this_node->details->standby ? "standby" : this_node->details->maintenance ? "maintenance" : "online"); } else { crm_trace("%s is offline", pe__node_name(this_node)); } } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!pcmk__str_empty(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; CRM_ASSERT(end); basename = strndup(last_rsc_id, end - last_rsc_id + 1); CRM_ASSERT(basename); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; CRM_ASSERT(end); zero = calloc(base_name_len + 3, sizeof(char)); CRM_ASSERT(zero); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static pcmk_resource_t * create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { pcmk_node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pe_find_node(scheduler->nodes, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, "remote", NULL, scheduler); } link_rsc2remotenode(scheduler, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) { /* This orphaned rsc needs to be mapped to a container. */ crm_trace("Detected orphaned container filler %s", rsc_id); pe__set_resource_flags(rsc, pcmk_rsc_removed_filler); } pe__set_resource_flags(rsc, pcmk_rsc_removed); scheduler->resources = g_list_append(scheduler->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history * * \param[in,out] parent Clone resource that orphan will be added to * \param[in] rsc_id Orphan's resource ID * \param[in] node Where orphan is active (for logging only) * \param[in,out] scheduler Scheduler data * * \return Newly added orphaned instance of \p parent */ static pcmk_resource_t * create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *top = pe__create_clone_child(parent, scheduler); // find_rsc() because we might be a cloned group pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pcmk_rsc_match_clone_only); pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, pe__node_name(node)); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of clone-max instances); * (3) a newly created orphan (i.e. clone-max instances are already active). * * \param[in,out] scheduler Scheduler data * \param[in] node Node on which to check for instance * \param[in,out] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (no instance) */ static pcmk_resource_t * find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, pcmk_resource_t *parent, const char *rsc_id) { GList *rIter = NULL; pcmk_resource_t *rsc = NULL; pcmk_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; CRM_ASSERT(parent != NULL); CRM_ASSERT(pe_rsc_is_clone(parent)); CRM_ASSERT(!pcmk_is_set(parent->flags, pcmk_rsc_unique)); // Check for active (or partially active, for cloned groups) instance pcmk__rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, pe__node_name(node), parent->id); for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) { GList *locations = NULL; pcmk_resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if globally-unique * was flipped from true to false); and * (3) when we re-run calculations on the same scheduler data as part of * a simulation. */ child->fns->location(child, &locations, 2); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (((pcmk_node_t *) locations->data)->details == node->details) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if globally-unique is switched from true to * false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->running_on) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, pe__node_name(node)); skip_inactive = TRUE; rsc = NULL; } else { pcmk__rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && !pcmk_is_set(child->flags, pcmk_rsc_blocked)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); /* ... but don't use it if it was already associated with a * pending action on another node */ if (inactive_instance && inactive_instance->pending_node && (inactive_instance->pending_node->details != node->details)) { inactive_instance = NULL; } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pcmk__rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has "requires" set to "quorum" or "nothing", and we don't * have a clone instance for every node, we don't want to consume a valid * instance number for unclean nodes. Such instances may appear to be active * according to the history, but should be considered inactive, so we can * start an instance elsewhere. Treat such instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pe__is_guest_node(node) && !pe__is_universal_clone(parent, scheduler)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler); pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static pcmk_resource_t * unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, const char *rsc_id) { pcmk_resource_t *rsc = NULL; pcmk_resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(scheduler->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when clone-max=0, we create * a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources, clone0_id); if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->variant > pcmk_rsc_variant_primitive) { crm_trace("Resource history for %s is orphaned because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pe_rsc_is_anon_clone(parent)) { if (pe_rsc_is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(scheduler, node, parent, base); free(base); CRM_ASSERT(rsc != NULL); } } if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei) && !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) { pcmk__str_update(&rsc->clone_name, rsc_id); pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, pe__node_name(node), rsc->id, pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : ""); } return rsc; } static pcmk_resource_t * process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, pe__node_name(node)); rsc = create_fake_resource(rsc_id, rsc_entry, scheduler); if (rsc == NULL) { return NULL; } if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) { pe__clear_resource_flags(rsc, pcmk_rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", scheduler); } return rsc; } static void process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node, enum action_fail_response on_fail) { pcmk_node_t *tmpnode = NULL; char *reason = NULL; enum action_fail_response save_on_fail = pcmk_on_fail_ignore; CRM_ASSERT(rsc); pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), pe__node_name(node), fail2text(on_fail)); /* process current state */ if (rsc->role != pcmk_role_unknown) { pcmk_resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { pcmk_node_t *n = pe__copy_node(node); pcmk__rsc_trace(rsc, "%s%s%s known on %s", rsc->id, ((rsc->clone_name == NULL)? "" : " also known as "), ((rsc->clone_name == NULL)? "" : rsc->clone_name), pe__node_name(n)); g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n); } if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) { break; } iter = iter->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if ((rsc->role > pcmk_role_stopped) && node->details->online == FALSE && node->details->maintenance == FALSE && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pe__is_guest_node(node)) { pe__set_resource_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); should_fence = TRUE; } else if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) { if (pe__is_remote_node(node) && node->details->remote_rsc && !pcmk_is_set(node->details->remote_rsc->flags, pcmk_rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ node->details->unseen = TRUE; reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(rsc->cluster, node, reason, FALSE); } free(reason); } /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */ save_on_fail = on_fail; if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = pcmk_on_fail_ignore; } switch (on_fail) { case pcmk_on_fail_ignore: /* nothing to do */ break; case pcmk_on_fail_demote: pe__set_resource_flags(rsc, pcmk_rsc_failed); demote_action(rsc, node, FALSE); break; case pcmk_on_fail_fence_node: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(rsc->cluster, node, reason, FALSE); free(reason); break; case pcmk_on_fail_standby_node: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case pcmk_on_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ pe__clear_resource_flags(rsc, pcmk_rsc_managed); pe__set_resource_flags(rsc, pcmk_rsc_blocked); break; case pcmk_on_fail_ban: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__", rsc->cluster); break; case pcmk_on_fail_stop: pe__set_next_role(rsc, pcmk_role_stopped, "on-fail=stop"); break; case pcmk_on_fail_restart: if ((rsc->role != pcmk_role_stopped) && (rsc->role != pcmk_role_unknown)) { pe__set_resource_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); stop_action(rsc, node, FALSE); } break; case pcmk_on_fail_restart_container: pe__set_resource_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); if (rsc->container && pe_rsc_is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ rsc->cluster->stop_needed = g_list_prepend(rsc->cluster->stop_needed, rsc->container); } else if (rsc->container) { stop_action(rsc->container, node, FALSE); } else if ((rsc->role != pcmk_role_stopped) && (rsc->role != pcmk_role_unknown)) { stop_action(rsc, node, FALSE); } break; case pcmk_on_fail_reset_remote: pe__set_resource_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id); } if (tmpnode && pe__is_remote_node(tmpnode) && tmpnode->details->remote_was_fenced == 0) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(rsc->cluster, tmpnode, "remote connection is unrecoverable", FALSE); } } /* require the stop action regardless if fencing is occurring or not. */ if (rsc->role > pcmk_role_stopped) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->remote_reconnect_ms) { pe__set_next_role(rsc, pcmk_role_stopped, "remote reset"); } break; } /* ensure a remote-node connection failure forces an unclean remote-node * to be fenced. By setting unseen = FALSE, the remote-node failure will * result in a fencing operation regardless if we're going to attempt to * reconnect to the remote-node in this transition or not. */ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) { tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id); if (tmpnode && tmpnode->details->unclean) { tmpnode->details->unseen = FALSE; } } if ((rsc->role != pcmk_role_stopped) && (rsc->role != pcmk_role_unknown)) { if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) { if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { pcmk__config_warn("Detected active orphan %s running on %s", rsc->id, pe__node_name(node)); } else { pcmk__config_warn("Resource '%s' must be stopped manually on " "%s because cluster is configured not to " "stop active orphans", rsc->id, pe__node_name(node)); } } native_add_running(rsc, node, rsc->cluster, (save_on_fail != pcmk_on_fail_ignore)); switch (on_fail) { case pcmk_on_fail_ignore: break; case pcmk_on_fail_demote: case pcmk_on_fail_block: pe__set_resource_flags(rsc, pcmk_rsc_failed); break; default: pe__set_resource_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); break; } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pcmk__rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); free(rsc->clone_name); rsc->clone_name = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, FALSE); GList *gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { pcmk_action_t *stop = (pcmk_action_t *) gIter->data; pe__set_action_flags(stop, pcmk_action_optional); } g_list_free(possible_matches); } /* A successful stop after migrate_to on the migration source doesn't make * the partially migrated resource stopped on the migration target. */ if ((rsc->role == pcmk_role_stopped) && rsc->partial_migration_source && rsc->partial_migration_source->details == node->details && rsc->partial_migration_target && rsc->running_on) { rsc->role = pcmk_role_started; } } /* create active recurring operations as optional */ static void process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc, int start_index, int stop_index, GList *sorted_op_list, pcmk_scheduler_t *scheduler) { int counter = -1; const char *task = NULL; const char *status = NULL; GList *gIter = sorted_op_list; CRM_ASSERT(rsc); pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = ID(rsc_op); counter++; if (node->details->online == FALSE) { pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline", rsc->id, pe__node_name(node)); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active", id, pe__node_name(node)); continue; } else if (counter < start_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d", id, pe__node_name(node), counter); continue; } crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if (interval_ms == 0) { pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring", id, pe__node_name(node)); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (pcmk__str_eq(status, "-1", pcmk__str_casei)) { pcmk__rsc_trace(rsc, "Skipping %s on %s: status", id, pe__node_name(node)); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = pcmk__op_key(rsc->id, task, interval_ms); pcmk__rsc_trace(rsc, "Creating %s on %s", key, pe__node_name(node)); custom_action(rsc, key, task, node, TRUE, scheduler); } } void calculate_active_ops(const GList *sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; *stop_index = -1; *start_index = -1; for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) { const xmlNode *rsc_op = (const xmlNode *) iter->data; counter++; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei) && pcmk__str_eq(status, "0", pcmk__str_casei)) { *stop_index = counter; } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if (pcmk__strcase_any_of(rc, "0", "8", NULL)) { implied_monitor_start = counter; } } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, NULL)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } // If resource history entry has shutdown lock, remember lock node and time static void unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { time_t lock_time = 0; // When lock started (i.e. node shutdown time) if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK, &lock_time) == pcmk_ok) && (lock_time != 0)) { if ((scheduler->shutdown_lock > 0) && (get_effective_time(scheduler) > (lock_time + scheduler->shutdown_lock))) { pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired", rsc->id, pe__node_name(node)); pe__clear_resource_history(rsc, node); } else { /* @COMPAT I don't like breaking const signatures, but * rsc->lock_node should really be const -- we just can't change it * until the next API compatibility break. */ rsc->lock_node = (pcmk_node_t *) node; rsc->lock_time = lock_time; } } } /*! * \internal * \brief Unpack one lrm_resource entry from a node's CIB status * * \param[in,out] node Node whose status is being unpacked * \param[in] rsc_entry lrm_resource XML being unpacked * \param[in,out] scheduler Scheduler data * * \return Resource corresponding to the entry, or NULL if no operation history */ static pcmk_resource_t * unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource, pcmk_scheduler_t *scheduler) { GList *gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = pcmk_role_unknown; const char *rsc_id = ID(lrm_resource); pcmk_resource_t *rsc = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum action_fail_response on_fail = pcmk_on_fail_ignore; enum rsc_role_e saved_role = pcmk_role_unknown; if (rsc_id == NULL) { pcmk__config_err("Ignoring invalid " XML_LRM_TAG_RESOURCE " entry: No " XML_ATTR_ID); crm_log_xml_info(lrm_resource, "missing-id"); return NULL; } crm_trace("Unpacking " XML_LRM_TAG_RESOURCE " for %s on %s", rsc_id, pe__node_name(node)); // Build a list of individual lrm_rsc_op entries, so we can sort them for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP); rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) { op_list = g_list_prepend(op_list, rsc_op); } if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } } /* find the resource */ rsc = unpack_find_resource(scheduler, node, rsc_id); if (rsc == NULL) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } else { rsc = process_orphan_resource(lrm_resource, node, scheduler); } } CRM_ASSERT(rsc != NULL); // Check whether the resource is "shutdown-locked" to this node if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) { unpack_shutdown_lock(lrm_resource, rsc, node, scheduler); } /* process operations */ saved_role = rsc->role; rsc->role = pcmk_role_unknown; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, scheduler); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail); if (get_target_role(rsc, &req_role)) { if ((rsc->next_role == pcmk_role_unknown) || (req_role < rsc->next_role)) { pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE); } else if (req_role > rsc->next_role) { pcmk__rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if (saved_role > rsc->role) { rsc->role = saved_role; } return rsc; } static void handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list, pcmk_scheduler_t *scheduler) { for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) { pcmk_resource_t *rsc; pcmk_resource_t *container; const char *rsc_id; const char *container_id; if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_casei)) { continue; } container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER); rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); if (container_id == NULL || rsc_id == NULL) { continue; } container = pe_find_resource(scheduler->resources, container_id); if (container == NULL) { continue; } rsc = pe_find_resource(scheduler->resources, rsc_id); if ((rsc == NULL) || (rsc->container != NULL) || !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) { continue; } pcmk__rsc_trace(rsc, "Mapped container of orphaned resource %s to %s", rsc->id, container_id); rsc->container = container; container->fillers = g_list_append(container->fillers, rsc); } } /*! * \internal * \brief Unpack one node's lrm status section * * \param[in,out] node Node whose status is being unpacked * \param[in] xml CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler) { bool found_orphaned_container_filler = false; // Drill down to lrm_resources section xml = find_xml_node(xml, XML_CIB_TAG_LRM, FALSE); if (xml == NULL) { return; } xml = find_xml_node(xml, XML_LRM_TAG_RESOURCES, FALSE); if (xml == NULL) { return; } // Unpack each lrm_resource entry for (const xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE); rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler); if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) { found_orphaned_container_filler = true; } } /* Now that all resource state has been unpacked for this node, map any * orphaned container fillers to their container resource. */ if (found_orphaned_container_filler) { handle_orphaned_container_fillers(xml, scheduler); } } static void set_active(pcmk_resource_t *rsc) { const pcmk_resource_t *top = pe__const_top_resource(rsc, false); if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) { rsc->role = pcmk_role_unpromoted; } else { rsc->role = pcmk_role_started; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { pcmk_node_t *node = value; int *score = user_data; node->weight = *score; } #define XPATH_NODE_STATE "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE #define SUB_XPATH_LRM_RESOURCE "/" XML_CIB_TAG_LRM \ "/" XML_LRM_TAG_RESOURCES \ "/" XML_LRM_TAG_RESOURCE #define SUB_XPATH_LRM_RSC_OP "/" XML_LRM_TAG_RSC_OP static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, int target_rc, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node, "']" SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", resource, "']" SUB_XPATH_LRM_RSC_OP "[@" XML_LRM_ATTR_TASK "='", op, "'", NULL); /* Need to check against transition_magic too? */ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) { pcmk__g_strcat(xpath, " and @" XML_LRM_ATTR_MIGRATE_TARGET "='", source, "']", NULL); } else if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) { pcmk__g_strcat(xpath, " and @" XML_LRM_ATTR_MIGRATE_SOURCE "='", source, "']", NULL); } else { g_string_append_c(xpath, ']'); } xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); if (xml && target_rc >= 0) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_EXEC_ERROR; crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status); if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) { return NULL; } } return xml; } static xmlNode * find_lrm_resource(const char *rsc_id, const char *node_name, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node_name, "']" SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc_id, "']", NULL); xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); return xml; } /*! * \internal * \brief Check whether a resource has no completed action history on a node * * \param[in,out] rsc Resource to check * \param[in] node_name Node to check * * \return true if \p rsc_id is unknown on \p node_name, otherwise false */ static bool unknown_on_node(pcmk_resource_t *rsc, const char *node_name) { bool result = false; xmlXPathObjectPtr search; GString *xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node_name, "']" SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc->id, "']" SUB_XPATH_LRM_RSC_OP "[@" XML_LRM_ATTR_RC "!='193']", NULL); search = xpath_search(rsc->cluster->input, (const char *) xpath->str); result = (numXpathResults(search) == 0); freeXpathObject(search); g_string_free(xpath, TRUE); return result; } /*! * \brief Check whether a probe/monitor indicating the resource was not running * on a node happened after some event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that monitor is being compared to * \param[in] same_node Whether the operations are on the same node * \param[in,out] scheduler Scheduler data * * \return true if such a monitor happened after event, false otherwise */ static bool monitor_not_running_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, bool same_node, pcmk_scheduler_t *scheduler) { /* Any probe/monitor operation on the node indicating it was not running * there */ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name, NULL, PCMK_OCF_NOT_RUNNING, scheduler); return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0); } /*! * \brief Check whether any non-monitor operation on a node happened after some * event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that non-monitor is being compared to * \param[in] same_node Whether the operations are on the same node * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool non_monitor_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, bool same_node, pcmk_scheduler_t *scheduler) { xmlNode *lrm_resource = NULL; lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler); if (lrm_resource == NULL) { return false; } for (xmlNode *op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP); op != NULL; op = crm_next_same_xml(op)) { const char * task = NULL; if (op == xml_op) { continue; } task = crm_element_value(op, XML_LRM_ATTR_TASK); if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL) && pe__is_newer_op(op, xml_op, same_node) > 0) { return true; } } return false; } /*! * \brief Check whether the resource has newer state on a node after a migration * attempt * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] migrate_to Any migrate_to event that is being compared to * \param[in] migrate_from Any migrate_from event that is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool newer_state_after_migrate(const char *rsc_id, const char *node_name, const xmlNode *migrate_to, const xmlNode *migrate_from, pcmk_scheduler_t *scheduler) { const xmlNode *xml_op = migrate_to; const char *source = NULL; const char *target = NULL; bool same_node = false; if (migrate_from) { xml_op = migrate_from; } source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); /* It's preferred to compare to the migrate event on the same node if * existing, since call ids are more reliable. */ if (pcmk__str_eq(node_name, target, pcmk__str_casei)) { if (migrate_from) { xml_op = migrate_from; same_node = true; } else { xml_op = migrate_to; } } else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) { if (migrate_to) { xml_op = migrate_to; same_node = true; } else { xml_op = migrate_from; } } /* If there's any newer non-monitor operation on the node, or any newer * probe/monitor operation on the node indicating it was not running there, * the migration events potentially no longer matter for the node. */ return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler) || monitor_not_running_after(rsc_id, node_name, xml_op, same_node, scheduler); } /*! * \internal * \brief Parse migration source and target node names from history entry * * \param[in] entry Resource history entry for a migration action * \param[in] source_node If not NULL, source must match this node * \param[in] target_node If not NULL, target must match this node * \param[out] source_name Where to store migration source node name * \param[out] target_name Where to store migration target node name * * \return Standard Pacemaker return code */ static int get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node, const pcmk_node_t *target_node, const char **source_name, const char **target_name) { *source_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_SOURCE); *target_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_TARGET); if ((*source_name == NULL) || (*target_name == NULL)) { pcmk__config_err("Ignoring resource history entry %s without " XML_LRM_ATTR_MIGRATE_SOURCE " and " XML_LRM_ATTR_MIGRATE_TARGET, ID(entry)); return pcmk_rc_unpack_error; } if ((source_node != NULL) && !pcmk__str_eq(*source_name, source_node->details->uname, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " XML_LRM_ATTR_MIGRATE_SOURCE "='%s' does not match %s", ID(entry), *source_name, pe__node_name(source_node)); return pcmk_rc_unpack_error; } if ((target_node != NULL) && !pcmk__str_eq(*target_name, target_node->details->uname, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " XML_LRM_ATTR_MIGRATE_TARGET "='%s' does not match %s", ID(entry), *target_name, pe__node_name(target_node)); return pcmk_rc_unpack_error; } return pcmk_rc_ok; } /* * \internal * \brief Add a migration source to a resource's list of dangling migrations * * If the migrate_to and migrate_from actions in a live migration both * succeeded, but there is no stop on the source, the migration is considered * "dangling." Add the source to the resource's dangling migration list, which * will be used to schedule a stop on the source without affecting the target. * * \param[in,out] rsc Resource involved in migration * \param[in] node Migration source */ static void add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node) { pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s", rsc->id, pe__node_name(node)); rsc->role = pcmk_role_stopped; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, (gpointer) node); } /*! * \internal * \brief Update resource role etc. after a successful migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_success(struct action_history *history) { /* A complete migration sequence is: * 1. migrate_to on source node (which succeeded if we get to this function) * 2. migrate_from on target node * 3. stop on source node * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from succeeded but no stop has happened, the * migration is considered to be "dangling". * * If a successful migrate_to and stop have happened on the source node, we * still need to check for a partial migration, due to scenarios (easier to * produce with batch-limit=1) like: * * - A resource is migrating from node1 to node2, and a migrate_to is * initiated for it on node1. * * - node2 goes into standby mode while the migrate_to is pending, which * aborts the transition. * * - Upon completion of the migrate_to, a new transition schedules a stop * on both nodes and a start on node1. * * - If the new transition is aborted for any reason while the resource is * stopping on node1, the transition after that stop completes will see * the migrate_to and stop on the source, but it's still a partial * migration, and the resource must be stopped on node2 because it is * potentially active there due to the migrate_to. * * We also need to take into account that either node's history may be * cleared at any point in the migration process. */ int from_rc = PCMK_OCF_OK; int from_status = PCMK_EXEC_PENDING; pcmk_node_t *target_node = NULL; xmlNode *migrate_from = NULL; const char *source = NULL; const char *target = NULL; bool source_newer_op = false; bool target_newer_state = false; bool active_on_target = false; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } // Check for newer state on the source source_newer_op = non_monitor_after(history->rsc->id, source, history->xml, true, history->rsc->cluster); // Check for a migrate_from action from this source on the target migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, -1, history->rsc->cluster); if (migrate_from != NULL) { if (source_newer_op) { /* There's a newer non-monitor operation on the source and a * migrate_from on the target, so this migrate_to is irrelevant to * the resource's state. */ return; } crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc); crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status); } /* If the resource has newer state on both the source and target after the * migration events, this migrate_to is irrelevant to the resource's state. */ target_newer_state = newer_state_after_migrate(history->rsc->id, target, history->xml, migrate_from, history->rsc->cluster); if (source_newer_op && target_newer_state) { return; } /* Check for dangling migration (migrate_from succeeded but stop not done). * We know there's no stop because we already returned if the target has a * migrate_from and the source has any newer non-monitor operation. */ if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) { add_dangling_migration(history->rsc, history->node); return; } /* Without newer state, this migrate_to implies the resource is active. * (Clones are not allowed to migrate, so role can't be promoted.) */ history->rsc->role = pcmk_role_started; target_node = pe_find_node(history->rsc->cluster->nodes, target); active_on_target = !target_newer_state && (target_node != NULL) && target_node->details->online; if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target if (active_on_target) { native_add_running(history->rsc, target_node, history->rsc->cluster, TRUE); } else { // Mark resource as failed, require recovery, and prevent migration pe__set_resource_flags(history->rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable); } return; } // The migrate_from is pending, complete but erased, or to be scheduled /* If there is no history at all for the resource on an online target, then * it was likely cleaned. Just return, and we'll schedule a probe. Once we * have the probe result, it will be reflected in target_newer_state. */ if ((target_node != NULL) && target_node->details->online && unknown_on_node(history->rsc, target)) { return; } if (active_on_target) { pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes, source); native_add_running(history->rsc, target_node, history->rsc->cluster, FALSE); if ((source_node != NULL) && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ history->rsc->partial_migration_target = target_node; history->rsc->partial_migration_source = source_node; } } else if (!source_newer_op) { // Mark resource as failed, require recovery, and prevent migration pe__set_resource_flags(history->rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_failure(struct action_history *history) { xmlNode *target_migrate_from = NULL; const char *source = NULL; const char *target = NULL; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->role = pcmk_role_started; // Check for migrate_from on the target target_migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, PCMK_OCF_OK, history->rsc->cluster); if (/* If the resource state is unknown on the target, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, target) /* If the resource has newer state on the target after the migration * events, this migrate_to no longer matters for the target. */ && !newer_state_after_migrate(history->rsc->id, target, history->xml, target_migrate_from, history->rsc->cluster)) { /* The resource has no newer state on the target, so assume it's still * active there. * (if it is up). */ pcmk_node_t *target_node = pe_find_node(history->rsc->cluster->nodes, target); if (target_node && target_node->details->online) { native_add_running(history->rsc, target_node, history->rsc->cluster, FALSE); } } else if (!non_monitor_after(history->rsc->id, source, history->xml, true, history->rsc->cluster)) { /* We know the resource has newer state on the target, but this * migrate_to still matters for the source as long as there's no newer * non-monitor operation there. */ // Mark node as having dangling migration so we can force a stop later history->rsc->dangling_migrations = g_list_prepend(history->rsc->dangling_migrations, (gpointer) history->node); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_from action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_from_failure(struct action_history *history) { xmlNode *source_migrate_to = NULL; const char *source = NULL; const char *target = NULL; // Get source and target node names from XML if (get_migration_node_names(history->xml, NULL, history->node, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->role = pcmk_role_started; // Check for a migrate_to on the source source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO, source, target, PCMK_OCF_OK, history->rsc->cluster); if (/* If the resource state is unknown on the source, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, source) /* If the resource has newer state on the source after the migration * events, this migrate_from no longer matters for the source. */ && !newer_state_after_migrate(history->rsc->id, source, source_migrate_to, history->xml, history->rsc->cluster)) { /* The resource has no newer state on the source, so assume it's still * active there (if it is up). */ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes, source); if (source_node && source_node->details->online) { native_add_running(history->rsc, source_node, history->rsc->cluster, TRUE); } } } /*! * \internal * \brief Add an action to cluster's list of failed actions * * \param[in,out] history Parsed action result history */ static void record_failed_op(struct action_history *history) { if (!(history->node->details->online)) { return; } for (const xmlNode *xIter = history->rsc->cluster->failed->children; xIter != NULL; xIter = xIter->next) { const char *key = pe__xe_history_key(xIter); const char *uname = crm_element_value(xIter, XML_ATTR_UNAME); if (pcmk__str_eq(history->key, key, pcmk__str_none) && pcmk__str_eq(uname, history->node->details->uname, pcmk__str_casei)) { crm_trace("Skipping duplicate entry %s on %s", history->key, pe__node_name(history->node)); return; } } crm_trace("Adding entry for %s on %s to failed action list", history->key, pe__node_name(history->node)); crm_xml_add(history->xml, XML_ATTR_UNAME, history->node->details->uname); crm_xml_add(history->xml, XML_LRM_ATTR_RSCID, history->rsc->id); add_node_copy(history->rsc->cluster->failed, history->xml); } static char * last_change_str(const xmlNode *xml_op) { time_t when; char *result = NULL; if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &when) == pcmk_ok) { char *when_s = pcmk__epoch2str(&when, 0); const char *p = strchr(when_s, ' '); // Skip day of week to make message shorter if ((p != NULL) && (*(++p) != '\0')) { result = strdup(p); CRM_ASSERT(result != NULL); } free(when_s); } if (result == NULL) { result = strdup("unknown time"); CRM_ASSERT(result != NULL); } return result; } /*! * \internal * \brief Compare two on-fail values * * \param[in] first One on-fail value to compare * \param[in] second The other on-fail value to compare * * \return A negative number if second is more severe than first, zero if they * are equal, or a positive number if first is more severe than second. * \note This is only needed until the action_fail_response values can be * renumbered at the next API compatibility break. */ static int cmp_on_fail(enum action_fail_response first, enum action_fail_response second) { switch (first) { case pcmk_on_fail_demote: switch (second) { case pcmk_on_fail_ignore: return 1; case pcmk_on_fail_demote: return 0; default: return -1; } break; case pcmk_on_fail_reset_remote: switch (second) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: return 1; case pcmk_on_fail_reset_remote: return 0; default: return -1; } break; case pcmk_on_fail_restart_container: switch (second) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: case pcmk_on_fail_reset_remote: return 1; case pcmk_on_fail_restart_container: return 0; default: return -1; } break; default: break; } switch (second) { case pcmk_on_fail_demote: return (first == pcmk_on_fail_ignore)? -1 : 1; case pcmk_on_fail_reset_remote: switch (first) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: return -1; default: return 1; } break; case pcmk_on_fail_restart_container: switch (first) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: case pcmk_on_fail_reset_remote: return -1; default: return 1; } break; default: break; } return first - second; } /*! * \internal * \brief Ban a resource (or its clone if an anonymous instance) from all nodes * * \param[in,out] rsc Resource to ban */ static void ban_from_all_nodes(pcmk_resource_t *rsc) { int score = -INFINITY; pcmk_resource_t *fail_rsc = rsc; if (fail_rsc->parent != NULL) { pcmk_resource_t *parent = uber_parent(fail_rsc); if (pe_rsc_is_anon_clone(parent)) { /* For anonymous clones, if an operation with on-fail=stop fails for * any instance, the entire clone must stop. */ fail_rsc = parent; } } // Ban the resource from all nodes crm_notice("%s will not be started under current conditions", fail_rsc->id); if (fail_rsc->allowed_nodes != NULL) { g_hash_table_destroy(fail_rsc->allowed_nodes); } fail_rsc->allowed_nodes = pe__node_list2table(rsc->cluster->nodes); g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score); } /*! * \internal * \brief Get configured failure handling and role after failure for an action * * \param[in,out] history Unpacked action history entry * \param[out] on_fail Where to set configured failure handling * \param[out] fail_role Where to set to role after failure */ static void unpack_failure_handling(struct action_history *history, enum action_fail_response *on_fail, enum rsc_role_e *fail_role) { xmlNode *config = pcmk__find_action_config(history->rsc, history->task, history->interval_ms, true); GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node, history->task, history->interval_ms, config); const char *on_fail_str = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL); *on_fail = pcmk__parse_on_fail(history->rsc, history->task, history->interval_ms, on_fail_str); *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail, meta); g_hash_table_destroy(meta); } /*! * \internal * \brief Update resource role, failure handling, etc., after a failed action * * \param[in,out] history Parsed action result history * \param[in] config_on_fail Action failure handling from configuration * \param[in] fail_role Resource's role after failure of this action * \param[out] last_failure This will be set to the history XML * \param[in,out] on_fail Actual handling of action result */ static void unpack_rsc_op_failure(struct action_history *history, enum action_fail_response config_on_fail, enum rsc_role_e fail_role, xmlNode **last_failure, enum action_fail_response *on_fail) { bool is_probe = false; char *last_change_s = NULL; *last_failure = history->xml; is_probe = pcmk_xe_is_probe(history->xml); last_change_s = last_change_str(history->xml); if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster) && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s", services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pe__node_name(history->node), last_change_s, history->exit_status, history->id); } else { pcmk__sched_warn("Unexpected result (%s%s%s) was recorded for %s of " "%s on %s at %s " CRM_XS " exit-status=%d id=%s", services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pe__node_name(history->node), last_change_s, history->exit_status, history->id); if (is_probe && (history->exit_status != PCMK_OCF_OK) && (history->exit_status != PCMK_OCF_NOT_RUNNING) && (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the resource-discovery option for location constraints", history->rsc->id, pe__node_name(history->node)); } record_failed_op(history); } free(last_change_s); if (cmp_on_fail(*on_fail, config_on_fail) < 0) { pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s", fail2text(*on_fail), fail2text(config_on_fail), history->key); *on_fail = config_on_fail; } if (strcmp(history->task, PCMK_ACTION_STOP) == 0) { resource_location(history->rsc, history->node, -INFINITY, "__stop_fail__", history->rsc->cluster); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) { unpack_migrate_to_failure(history); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) { unpack_migrate_from_failure(history); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->role = pcmk_role_promoted; } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) { if (config_on_fail == pcmk_on_fail_block) { history->rsc->role = pcmk_role_promoted; pe__set_next_role(history->rsc, pcmk_role_stopped, "demote with on-fail=block"); } else if (history->exit_status == PCMK_OCF_NOT_RUNNING) { history->rsc->role = pcmk_role_stopped; } else { /* Staying in the promoted role would put the scheduler and * controller into a loop. Setting the role to unpromoted is not * dangerous because the resource will be stopped as part of * recovery, and any promotion will be ordered after that stop. */ history->rsc->role = pcmk_role_unpromoted; } } if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { /* leave stopped */ pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id); history->rsc->role = pcmk_role_stopped; } else if (history->rsc->role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id); set_active(history->rsc); } pcmk__rsc_trace(history->rsc, "Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s", history->rsc->id, role2text(history->rsc->role), pcmk__btoa(history->node->details->unclean), fail2text(config_on_fail), role2text(fail_role)); if ((fail_role != pcmk_role_started) && (history->rsc->next_role < fail_role)) { pe__set_next_role(history->rsc, fail_role, "failure"); } if (fail_role == pcmk_role_stopped) { ban_from_all_nodes(history->rsc); } } /*! * \internal * \brief Block a resource with a failed action if it cannot be recovered * * If resource action is a failed stop and fencing is not possible, mark the * resource as unmanaged and blocked, since recovery cannot be done. * * \param[in,out] history Parsed action history entry */ static void block_if_unrecoverable(struct action_history *history) { char *last_change_s = NULL; if (strcmp(history->task, PCMK_ACTION_STOP) != 0) { return; // All actions besides stop are always recoverable } if (pe_can_fence(history->node->details->data_set, history->node)) { return; // Failed stops are recoverable via fencing } last_change_s = last_change_str(history->xml); pcmk__sched_err("No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " CRM_XS " rc=%d id=%s", history->rsc->id, history->task, pe__node_name(history->node), services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), last_change_s, history->exit_status, history->id); free(last_change_s); pe__clear_resource_flags(history->rsc, pcmk_rsc_managed); pe__set_resource_flags(history->rsc, pcmk_rsc_blocked); } /*! * \internal * \brief Update action history's execution status and why * * \param[in,out] history Parsed action history entry * \param[out] why Where to store reason for update * \param[in] value New value * \param[in] reason Description of why value was changed */ static inline void remap_because(struct action_history *history, const char **why, int value, const char *reason) { if (history->execution_status != value) { history->execution_status = value; *why = reason; } } /*! * \internal * \brief Remap informational monitor results and operation status * * For the monitor results, certain OCF codes are for providing extended information * to the user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by Pacemaker. * * For operation status, the action result can be used to determine an appropriate * status for the purposes of responding to the action. The status provided by the * executor is not directly usable since the executor does not know what was expected. * * \param[in,out] history Parsed action history entry * \param[in,out] on_fail What should be done about the result * \param[in] expired Whether result is expired * * \note If the result is remapped and the node is not shutting down or failed, * the operation will be recorded in the scheduler data's list of failed * operations to highlight it for the user. * * \note This may update the resource's current and next role. */ static void remap_operation(struct action_history *history, enum action_fail_response *on_fail, bool expired) { bool is_probe = false; int orig_exit_status = history->exit_status; int orig_exec_status = history->execution_status; const char *why = NULL; const char *task = history->task; // Remap degraded results to their successful counterparts history->exit_status = pcmk__effective_rc(history->exit_status); if (history->exit_status != orig_exit_status) { why = "degraded result"; if (!expired && (!history->node->details->shutdown || history->node->details->online)) { record_failed_op(history); } } if (!pe_rsc_is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && ((history->execution_status != PCMK_EXEC_DONE) || (history->exit_status != PCMK_OCF_NOT_RUNNING))) { history->execution_status = PCMK_EXEC_DONE; history->exit_status = PCMK_OCF_NOT_RUNNING; why = "equivalent probe result"; } /* If the executor reported an execution status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ switch (history->execution_status) { case PCMK_EXEC_DONE: case PCMK_EXEC_ERROR: break; // These should be treated as node-fatal case PCMK_EXEC_NO_FENCE_DEVICE: case PCMK_EXEC_NO_SECRETS: remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "node-fatal error"); goto remap_done; default: goto remap_done; } is_probe = pcmk_xe_is_probe(history->xml); if (is_probe) { task = "probe"; } if (history->expected_exit_status < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * expected exit status in the transition key, which (along with the * similar case of a corrupted transition key in the CIB) will be * reported to this function as -1. Pacemaker 2.0+ does not support * rolling upgrades from those versions or processing of saved CIB files * from those versions, so we do not need to care much about this case. */ remap_because(history, &why, PCMK_EXEC_ERROR, "obsolete history format"); pcmk__config_warn("Expected result not found for %s on %s " "(corrupt or obsolete CIB?)", history->key, pe__node_name(history->node)); } else if (history->exit_status == history->expected_exit_status) { remap_because(history, &why, PCMK_EXEC_DONE, "expected result"); } else { remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result"); pcmk__rsc_debug(history->rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", history->key, pe__node_name(history->node), history->expected_exit_status, services_ocf_exitcode_str(history->expected_exit_status), history->exit_status, services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, "")); } switch (history->exit_status) { case PCMK_OCF_OK: if (is_probe && (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active on %s at %s", history->rsc->id, pe__node_name(history->node), last_change_s); free(last_change_s); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || (history->expected_exit_status == history->exit_status) || !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) { /* For probes, recurring monitors for the Stopped role, and * unmanaged resources, "not running" is not considered a * failure. */ remap_because(history, &why, PCMK_EXEC_DONE, "exit status"); history->rsc->role = pcmk_role_stopped; *on_fail = pcmk_on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "not running"); } break; case PCMK_OCF_RUNNING_PROMOTED: if (is_probe && (history->exit_status != history->expected_exit_status)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active and promoted on %s at %s", history->rsc->id, pe__node_name(history->node), last_change_s); free(last_change_s); } if (!expired || (history->exit_status == history->expected_exit_status)) { history->rsc->role = pcmk_role_promoted; } break; case PCMK_OCF_FAILED_PROMOTED: if (!expired) { history->rsc->role = pcmk_role_promoted; } remap_because(history, &why, PCMK_EXEC_ERROR, "exit status"); break; case PCMK_OCF_NOT_CONFIGURED: remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status"); break; case PCMK_OCF_UNIMPLEMENT_FEATURE: { guint interval_ms = 0; crm_element_value_ms(history->xml, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if (interval_ms == 0) { if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); } else { remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED, "exit status"); } } break; case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); break; default: if (history->execution_status == PCMK_EXEC_DONE) { char *last_change_s = last_change_str(history->xml); crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", history->exit_status, task, history->rsc->id, pe__node_name(history->node), last_change_s); remap_because(history, &why, PCMK_EXEC_ERROR, "unknown exit status"); free(last_change_s); } break; } remap_done: if (why != NULL) { pcmk__rsc_trace(history->rsc, "Remapped %s result from [%s: %s] to [%s: %s] " "because of %s", history->key, pcmk_exec_status_str(orig_exec_status), crm_exit_str(orig_exit_status), pcmk_exec_status_str(history->execution_status), crm_exit_str(history->exit_status), why); } } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(const xmlNode *xml_op, const char *task, pcmk_resource_t *rsc, pcmk_node_t *node) { if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) { if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure, rsc->cluster); } else { pcmk__op_digest_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster); switch (digest_data->rc) { case pcmk__digest_unknown: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, pe__xe_history_key(xml_op), node->details->id); break; case pcmk__digest_match: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn, pcmk_scheduler_t *scheduler) { pcmk_node_t *remote_node = pe_find_node(scheduler->nodes, remote_conn->id); if (remote_node) { pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, FALSE, scheduler); order_actions(fence, action, pcmk__ar_first_implies_then); } } static bool should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task, guint interval_ms, bool is_last_failure) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if (rsc->remote_reconnect_ms && pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled) && (interval_ms != 0) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { pcmk_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id); if (remote_node && !remote_node->details->remote_was_fenced) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in,out] history Parsed action result history * * \return true if operation history entry is expired, otherwise false */ static bool check_operation_expiry(struct action_history *history) { bool expired = false; bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0"); time_t last_run = 0; int unexpired_fail_count = 0; const char *clear_reason = NULL; if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) { pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not expired: " "Not Installed does not expire", history->id, pe__node_name(history->node)); return false; // "Not installed" must always be cleared manually } if ((history->rsc->failure_timeout > 0) && (crm_element_value_epoch(history->xml, XML_RSC_OP_LAST_CHANGE, &last_run) == 0)) { // Resource has a failure-timeout, and history entry has a timestamp time_t now = get_effective_time(history->rsc->cluster); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + history->rsc->failure_timeout)) && !should_ignore_failure_timeout(history->rsc, history->task, history->interval_ms, is_last_failure)) { expired = true; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(history->node, history->rsc, &last_failure, pcmk__fc_effective, history->xml); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds" " last-failure@%lld", history->id, (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, history->rsc->failure_timeout, (long long) last_failure); last_failure += history->rsc->failure_timeout + 1; if (unexpired_fail_count && (now < last_failure)) { pe__update_recheck_time(last_failure, history->rsc->cluster, "fail count expiration"); } } if (expired) { if (pe_get_failcount(history->node, history->rsc, NULL, pcmk__fc_default, history->xml)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Unexpired fail count", history->id, pe__node_name(history->node)); expired = false; } } else if (is_last_failure && (history->rsc->remote_reconnect_ms != 0)) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(history->xml, history->task, history->rsc, history->node)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { pcmk_action_t *clear_op = NULL; // Schedule clearing of the fail count clear_op = pe__clear_failcount(history->rsc, history->node, clear_reason, history->rsc->cluster); if (pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_fencing_enabled) && (history->rsc->remote_reconnect_ms != 0)) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until * after unpack_node_history() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", history->task, history->rsc->id); order_after_remote_fencing(clear_op, history->rsc, history->rsc->cluster); } } if (expired && (history->interval_ms == 0) && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { switch (history->exit_status) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_PROMOTED: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_PROMOTED: // Don't expire probes that return these values pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Probe result", history->id, pe__node_name(history->node)); expired = false; break; } } return expired; } int pe__target_rc_from_xml(const xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } /*! * \internal * \brief Update a resource's state for an action result * * \param[in,out] history Parsed action history entry * \param[in] exit_status Exit status to base new state on * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void update_resource_state(struct action_history *history, int exit_status, const xmlNode *last_failure, enum action_fail_response *on_fail) { bool clear_past_failure = false; if ((exit_status == PCMK_OCF_NOT_INSTALLED) || (!pe_rsc_is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml))) { history->rsc->role = pcmk_role_stopped; } else if (exit_status == PCMK_OCF_NOT_RUNNING) { clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { if ((last_failure != NULL) && pcmk__str_eq(history->key, pe__xe_history_key(last_failure), pcmk__str_none)) { clear_past_failure = true; } if (history->rsc->role < pcmk_role_started) { set_active(history->rsc); } } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) { history->rsc->role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) { history->rsc->role = pcmk_role_stopped; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) { history->rsc->role = pcmk_role_promoted; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE, pcmk__str_none)) { if (*on_fail == pcmk_on_fail_demote) { // Demote clears an error only if on-fail=demote clear_past_failure = true; } history->rsc->role = pcmk_role_unpromoted; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { history->rsc->role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { unpack_migrate_to_success(history); } else if (history->rsc->role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "%s active on %s", history->rsc->id, pe__node_name(history->node)); set_active(history->rsc); } if (!clear_past_failure) { return; } switch (*on_fail) { case pcmk_on_fail_stop: case pcmk_on_fail_ban: case pcmk_on_fail_standby_node: case pcmk_on_fail_fence_node: pcmk__rsc_trace(history->rsc, "%s (%s) is not cleared by a completed %s", history->rsc->id, fail2text(*on_fail), history->task); break; case pcmk_on_fail_block: case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: case pcmk_on_fail_restart_container: *on_fail = pcmk_on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures"); break; case pcmk_on_fail_reset_remote: if (history->rsc->remote_reconnect_ms == 0) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = pcmk_on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures and reset remote"); } break; } } /*! * \internal * \brief Check whether a given history entry matters for resource state * * \param[in] history Parsed action history entry * * \return true if action can affect resource state, otherwise false */ static inline bool can_affect_state(struct action_history *history) { #if 0 /* @COMPAT It might be better to parse only actions we know we're interested * in, rather than exclude a couple we don't. However that would be a * behavioral change that should be done at a major or minor series release. * Currently, unknown operations can affect whether a resource is considered * active and/or failed. */ return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, "asyncmon", NULL); #else return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY, PCMK_ACTION_META_DATA, NULL); #endif } /*! * \internal * \brief Unpack execution/exit status and exit reason from a history entry * * \param[in,out] history Action history entry to unpack * * \return Standard Pacemaker return code */ static int unpack_action_result(struct action_history *history) { if ((crm_element_value_int(history->xml, XML_LRM_ATTR_OPSTATUS, &(history->execution_status)) < 0) || (history->execution_status < PCMK_EXEC_PENDING) || (history->execution_status > PCMK_EXEC_MAX) || (history->execution_status == PCMK_EXEC_CANCELLED)) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " XML_LRM_ATTR_OPSTATUS " '%s'", history->id, history->rsc->id, pe__node_name(history->node), pcmk__s(crm_element_value(history->xml, XML_LRM_ATTR_OPSTATUS), "")); return pcmk_rc_unpack_error; } if ((crm_element_value_int(history->xml, XML_LRM_ATTR_RC, &(history->exit_status)) < 0) || (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) { #if 0 /* @COMPAT We should ignore malformed entries, but since that would * change behavior, it should be done at a major or minor series * release. */ pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " XML_LRM_ATTR_RC " '%s'", history->id, history->rsc->id, pe__node_name(history->node), pcmk__s(crm_element_value(history->xml, XML_LRM_ATTR_RC), "")); return pcmk_rc_unpack_error; #else history->exit_status = CRM_EX_ERROR; #endif } history->exit_reason = crm_element_value(history->xml, XML_LRM_ATTR_EXIT_REASON); return pcmk_rc_ok; } /*! * \internal * \brief Process an action history entry whose result expired * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * * \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the * entry needs no further processing) */ static int process_expired_result(struct action_history *history, int orig_exit_status) { if (!pe_rsc_is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && (orig_exit_status != history->expected_exit_status)) { if (history->rsc->role <= pcmk_role_stopped) { history->rsc->role = pcmk_role_unknown; } crm_trace("Ignoring resource history entry %s for probe of %s on %s: " "Masked failure expired", history->id, history->rsc->id, pe__node_name(history->node)); return pcmk_rc_ok; } if (history->exit_status == history->expected_exit_status) { return pcmk_rc_undetermined; // Only failures expire } if (history->interval_ms == 0) { crm_notice("Ignoring resource history entry %s for %s of %s on %s: " "Expired failure", history->id, history->task, history->rsc->id, pe__node_name(history->node)); return pcmk_rc_ok; } if (history->node->details->online && !history->node->details->unclean) { /* Reschedule the recurring action. schedule_cancel() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so pcmk__check_action_config() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has an op-digest (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s-interval %s of %s on %s " "after failure expired", pcmk__readable_interval(history->interval_ms), history->task, history->rsc->id, pe__node_name(history->node)); crm_xml_add(history->xml, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout"); return pcmk_rc_ok; } return pcmk_rc_undetermined; } /*! * \internal * \brief Process a masked probe failure * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void mask_probe_failure(struct action_history *history, int orig_exit_status, const xmlNode *last_failure, enum action_fail_response *on_fail) { pcmk_resource_t *ban_rsc = history->rsc; if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) { ban_rsc = uber_parent(history->rsc); } crm_notice("Treating probe result '%s' for %s on %s as 'not running'", services_ocf_exitcode_str(orig_exit_status), history->rsc->id, pe__node_name(history->node)); update_resource_state(history, history->expected_exit_status, last_failure, on_fail); crm_xml_add(history->xml, XML_ATTR_UNAME, history->node->details->uname); record_failed_op(history); resource_location(ban_rsc, history->node, -INFINITY, "masked-probe-failure", history->rsc->cluster); } /*! * \internal Check whether a given failure is for a given pending action * * \param[in] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known * * \return true if \p last_failure is failure of pending action in \p history, * otherwise false * \note Both \p history and \p last_failure must come from the same * lrm_resource block, as node and resource are assumed to be the same. */ static bool failure_is_newer(const struct action_history *history, const xmlNode *last_failure) { guint failure_interval_ms = 0U; long long failure_change = 0LL; long long this_change = 0LL; if (last_failure == NULL) { return false; // Resource has no last_failure entry } if (!pcmk__str_eq(history->task, crm_element_value(last_failure, XML_LRM_ATTR_TASK), pcmk__str_none)) { return false; // last_failure is for different action } if ((crm_element_value_ms(last_failure, XML_LRM_ATTR_INTERVAL_MS, &failure_interval_ms) != pcmk_ok) || (history->interval_ms != failure_interval_ms)) { return false; // last_failure is for action with different interval } if ((pcmk__scan_ll(crm_element_value(history->xml, XML_RSC_OP_LAST_CHANGE), &this_change, 0LL) != pcmk_rc_ok) || (pcmk__scan_ll(crm_element_value(last_failure, XML_RSC_OP_LAST_CHANGE), &failure_change, 0LL) != pcmk_rc_ok) || (failure_change < this_change)) { return false; // Failure is not known to be newer } return true; } /*! * \internal * \brief Update a resource's role etc. for a pending action * * \param[in,out] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known */ static void process_pending_action(struct action_history *history, const xmlNode *last_failure) { /* For recurring monitors, a failure is recorded only in RSC_last_failure_0, * and there might be a RSC_monitor_INTERVAL entry with the last successful * or pending result. * * If last_failure contains the failure of the pending recurring monitor * we're processing here, and is newer, the action is no longer pending. * (Pending results have call ID -1, which sorts last, so the last failure * if any should be known.) */ if (failure_is_newer(history, last_failure)) { return; } if (strcmp(history->task, PCMK_ACTION_START) == 0) { pe__set_resource_flags(history->rsc, pcmk_rsc_start_pending); set_active(history->rsc); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->role = pcmk_role_promoted; } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) && history->node->details->unclean) { /* A migrate_to action is pending on a unclean source, so force a stop * on the target. */ const char *migrate_target = NULL; pcmk_node_t *target = NULL; migrate_target = crm_element_value(history->xml, XML_LRM_ATTR_MIGRATE_TARGET); target = pe_find_node(history->rsc->cluster->nodes, migrate_target); if (target != NULL) { stop_action(history->rsc, target, FALSE); } } if (history->rsc->pending_task != NULL) { /* There should never be multiple pending actions, but as a failsafe, * just remember the first one processed for display purposes. */ return; } if (pcmk_is_probe(history->task, history->interval_ms)) { /* Pending probes are currently never displayed, even if pending * operations are requested. If we ever want to change that, * enable the below and the corresponding part of * native.c:native_pending_task(). */ #if 0 history->rsc->pending_task = strdup("probe"); history->rsc->pending_node = history->node; #endif } else { history->rsc->pending_task = strdup(history->task); history->rsc->pending_node = history->node; } } static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *on_fail) { int old_rc = 0; bool expired = false; pcmk_resource_t *parent = rsc; enum rsc_role_e fail_role = pcmk_role_unknown; enum action_fail_response failure_strategy = pcmk_on_fail_restart; struct action_history history = { .rsc = rsc, .node = node, .xml = xml_op, .execution_status = PCMK_EXEC_UNKNOWN, }; CRM_CHECK(rsc && node && xml_op, return); history.id = ID(xml_op); if (history.id == NULL) { pcmk__config_err("Ignoring resource history entry for %s on %s " "without ID", rsc->id, pe__node_name(node)); return; } // Task and interval history.task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); if (history.task == NULL) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "without " XML_LRM_ATTR_TASK, history.id, rsc->id, pe__node_name(node)); return; } crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &(history.interval_ms)); if (!can_affect_state(&history)) { pcmk__rsc_trace(rsc, "Ignoring resource history entry %s for %s on %s " "with irrelevant action '%s'", history.id, rsc->id, pe__node_name(node), history.task); return; } if (unpack_action_result(&history) != pcmk_rc_ok) { return; // Error already logged } history.expected_exit_status = pe__target_rc_from_xml(xml_op); history.key = pe__xe_history_key(xml_op); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &(history.call_id)); pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)", history.id, history.task, history.call_id, pe__node_name(node), pcmk_exec_status_str(history.execution_status), crm_exit_str(history.exit_status)); if (node->details->unclean) { pcmk__rsc_trace(rsc, "%s is running on %s, which is unclean (further action " "depends on value of stop's on-fail attribute)", rsc->id, pe__node_name(node)); } expired = check_operation_expiry(&history); old_rc = history.exit_status; remap_operation(&history, on_fail, expired); if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) { goto done; } if (!pe_rsc_is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) { mask_probe_failure(&history, old_rc, *last_failure, on_fail); goto done; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) { parent = uber_parent(rsc); } switch (history.execution_status) { case PCMK_EXEC_PENDING: process_pending_action(&history, *last_failure); goto done; case PCMK_EXEC_DONE: update_resource_state(&history, history.exit_status, *last_failure, on_fail); goto done; case PCMK_EXEC_NOT_INSTALLED: unpack_failure_handling(&history, &failure_strategy, &fail_role); if (failure_strategy == pcmk_on_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " CRM_XS " status=%d rc=%d id=%s", history.task, rsc->id, pe__node_name(node), history.execution_status, history.exit_status, history.id); /* Also for printing it as "FAILED" by marking it as * pcmk_rsc_failed later */ *on_fail = pcmk_on_fail_ban; } resource_location(parent, node, -INFINITY, "hard-error", rsc->cluster); unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); goto done; case PCMK_EXEC_NOT_CONNECTED: if (pe__is_guest_or_remote_node(node) && pcmk_is_set(node->details->remote_rsc->flags, pcmk_rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ pe__set_resource_flags(node->details->remote_rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); } break; // Not done, do error handling case PCMK_EXEC_ERROR: case PCMK_EXEC_ERROR_HARD: case PCMK_EXEC_ERROR_FATAL: case PCMK_EXEC_TIMEOUT: case PCMK_EXEC_NOT_SUPPORTED: case PCMK_EXEC_INVALID: break; // Not done, do error handling default: // No other value should be possible at this point break; } unpack_failure_handling(&history, &failure_strategy, &fail_role); if ((failure_strategy == pcmk_on_fail_ignore) || ((failure_strategy == pcmk_on_fail_restart_container) && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) { char *last_change_s = last_change_str(xml_op); crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded " CRM_XS " %s", history.task, services_ocf_exitcode_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), rsc->id, pe__node_name(node), last_change_s, history.id); free(last_change_s); update_resource_state(&history, history.expected_exit_status, *last_failure, on_fail); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); pe__set_resource_flags(rsc, pcmk_rsc_ignore_failure); record_failed_op(&history); if ((failure_strategy == pcmk_on_fail_restart_container) && cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); if (history.execution_status == PCMK_EXEC_ERROR_HARD) { uint8_t log_level = LOG_ERR; if (history.exit_status == PCMK_OCF_NOT_INSTALLED) { log_level = LOG_NOTICE; } do_crm_log(log_level, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s) " CRM_XS " %s", parent->id, pe__node_name(node), services_ocf_exitcode_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, node, -INFINITY, "hard-error", rsc->cluster); } else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) { pcmk__sched_err("Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " CRM_XS " %s", parent->id, services_ocf_exitcode_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, NULL, -INFINITY, "fatal-error", rsc->cluster); } } done: pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)", rsc->id, pe__node_name(node), history.id, role2text(rsc->role), role2text(rsc->next_role)); } static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler) { const char *cluster_name = NULL; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = pcmk_role_unknown, .now = scheduler->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID), strdup(node->details->id)); if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) { scheduler->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE)); } cluster_name = g_hash_table_lookup(scheduler->config_hash, PCMK_OPT_CLUSTER_NAME); if (cluster_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME), strdup(cluster_name)); } pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data, node->details->attrs, NULL, overwrite, scheduler); pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data, node->details->utilization, NULL, FALSE, scheduler); if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { const char *site_name = pe_node_attribute_raw(node, "site-name"); if (site_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(site_name)); } else if (cluster_name) { /* Default to cluster-name if unset */ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(cluster_name)); } } } static GList * extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GList *gIter = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) { if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) { crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", ID(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GList * find_operations(const char *rsc, const char *node, gboolean active_filter, pcmk_scheduler_t *scheduler) { GList *output = NULL; GList *intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(scheduler->input, XML_CIB_TAG_STATUS, TRUE); pcmk_node_t *this_node = NULL; xmlNode *node_state = NULL; for (node_state = pcmk__xe_first_child(status); node_state != NULL; node_state = pcmk__xe_next(node_state)) { if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { const char *uname = crm_element_value(node_state, XML_ATTR_UNAME); if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) { continue; } this_node = pe_find_node(scheduler->nodes, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pe__is_guest_or_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); } else { determine_online_status(node_state, this_node, scheduler); } if (this_node->details->online || pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) { if (pcmk__str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) { const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; }