diff --git a/crmd/membership.c b/crmd/membership.c index c36dbedcf1..4f2fa8a81f 100644 --- a/crmd/membership.c +++ b/crmd/membership.c @@ -1,450 +1,462 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* put these first so that uuid_t is defined without conflicts */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean membership_flux_hack = FALSE; void post_cache_update(int instance); int last_peer_update = 0; guint highest_born_on = -1; extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); static void reap_dead_nodes(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; if (crm_is_peer_active(node) == FALSE) { crm_update_peer_join(__FUNCTION__, node, crm_join_none); if(node && node->uname) { election_remove(fsa_election, node->uname); if (safe_str_eq(fsa_our_uname, node->uname)) { crm_err("We're not part of the cluster anymore"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); } else if (AM_I_DC == FALSE && safe_str_eq(node->uname, fsa_our_dc)) { crm_warn("Our DC node (%s) left the cluster", node->uname); register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); } } if (fsa_state == S_INTEGRATION || fsa_state == S_FINALIZE_JOIN) { check_join_state(fsa_state, __FUNCTION__); } if(node && node->uuid) { fail_incompletable_actions(transition_graph, node->uuid); } } } gboolean ever_had_quorum = FALSE; void post_cache_update(int instance) { xmlNode *no_op = NULL; crm_peer_seq = instance; crm_debug("Updated cache after membership event %d.", instance); g_hash_table_foreach(crm_peer_cache, reap_dead_nodes, NULL); set_bit(fsa_input_register, R_MEMBERSHIP); if (AM_I_DC) { populate_cib_nodes(node_update_quick | node_update_cluster | node_update_peer | node_update_expected, __FUNCTION__); } /* * If we lost nodes, we should re-check the election status * Safe to call outside of an election */ register_fsa_action(A_ELECTION_CHECK); /* Membership changed, remind everyone we're here. * This will aid detection of duplicate DCs */ no_op = create_request(CRM_OP_NOOP, NULL, NULL, CRM_SYSTEM_CRMD, AM_I_DC ? CRM_SYSTEM_DC : CRM_SYSTEM_CRMD, NULL); send_cluster_message(NULL, crm_msg_crmd, no_op, FALSE); free_xml(no_op); } static void crmd_node_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { fsa_data_t *msg_data = NULL; last_peer_update = 0; if (rc == pcmk_ok) { crm_trace("Node update %d complete", call_id); } else if(call_id < pcmk_ok) { crm_err("Node update failed: %s (%d)", pcmk_strerror(call_id), call_id); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } else { crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } /*! * \internal * \brief Create an XML node state tag with updates * * \param[in,out] node Node whose state will be used for update * \param[in] flags Bitmask of node_update_flags indicating what to update * \param[in,out] parent XML node to contain update (or NULL) * \param[in] source Who requested the update (only used for logging) * * \return Pointer to created node state tag */ xmlNode * create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, const char *source) { const char *value = NULL; xmlNode *node_state; if (!node->state) { crm_info("Node update for %s cancelled: no state, not seen yet", node->uname); return NULL; } node_state = create_xml_node(parent, XML_CIB_TAG_STATE); if (is_set(node->flags, crm_remote_node)) { crm_xml_add(node_state, XML_NODE_IS_REMOTE, XML_BOOLEAN_TRUE); } set_uuid(node_state, XML_ATTR_UUID, node); if (crm_element_value(node_state, XML_ATTR_UUID) == NULL) { crm_info("Node update for %s cancelled: no id", node->uname); free_xml(node_state); return NULL; } crm_xml_add(node_state, XML_ATTR_UNAME, node->uname); if ((flags & node_update_cluster) && node->state) { crm_xml_add_boolean(node_state, XML_NODE_IN_CLUSTER, safe_str_eq(node->state, CRM_NODE_MEMBER)); } if (!is_set(node->flags, crm_remote_node)) { if (flags & node_update_peer) { value = OFFLINESTATUS; if (node->processes & proc_flags) { value = ONLINESTATUS; } crm_xml_add(node_state, XML_NODE_IS_PEER, value); } if (flags & node_update_join) { if (node->join <= crm_join_none) { value = CRMD_JOINSTATE_DOWN; } else { value = CRMD_JOINSTATE_MEMBER; } crm_xml_add(node_state, XML_NODE_JOIN_STATE, value); } if (flags & node_update_expected) { crm_xml_add(node_state, XML_NODE_EXPECTED, node->expected); } } crm_xml_add(node_state, XML_ATTR_ORIGIN, source); return node_state; } static void remove_conflicting_node_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *node_uuid = user_data; do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, "Deletion of the unknown conflicting node \"%s\": %s (rc=%d)", node_uuid, pcmk_strerror(rc), rc); } static void search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *new_node_uuid = user_data; xmlNode *node_xml = NULL; if (rc != pcmk_ok) { if (rc != -ENXIO) { crm_notice("Searching conflicting nodes for %s failed: %s (%d)", new_node_uuid, pcmk_strerror(rc), rc); } return; } else if (output == NULL) { return; } if (safe_str_eq(crm_element_name(output), XML_CIB_TAG_NODE)) { node_xml = output; } else { node_xml = __xml_first_child(output); } for (; node_xml != NULL; node_xml = __xml_next(node_xml)) { const char *node_uuid = NULL; const char *node_uname = NULL; GHashTableIter iter; crm_node_t *node = NULL; gboolean known = FALSE; if (safe_str_neq(crm_element_name(node_xml), XML_CIB_TAG_NODE)) { continue; } node_uuid = crm_element_value(node_xml, XML_ATTR_ID); node_uname = crm_element_value(node_xml, XML_ATTR_UNAME); if (node_uuid == NULL || node_uname == NULL) { continue; } g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if (node->uuid && safe_str_eq(node->uuid, node_uuid) && node->uname && safe_str_eq(node->uname, node_uname)) { known = TRUE; break; } } if (known == FALSE) { int delete_call_id = 0; xmlNode *node_state_xml = NULL; crm_notice("Deleting unknown node %s/%s which has conflicting uname with %s", node_uuid, node_uname, new_node_uuid); delete_call_id = fsa_cib_conn->cmds->delete(fsa_cib_conn, XML_CIB_TAG_NODES, node_xml, cib_scope_local | cib_quorum_override); fsa_register_cib_callback(delete_call_id, FALSE, strdup(node_uuid), remove_conflicting_node_callback); node_state_xml = create_xml_node(NULL, XML_CIB_TAG_STATE); crm_xml_add(node_state_xml, XML_ATTR_ID, node_uuid); crm_xml_add(node_state_xml, XML_ATTR_UNAME, node_uname); delete_call_id = fsa_cib_conn->cmds->delete(fsa_cib_conn, XML_CIB_TAG_STATUS, node_state_xml, cib_scope_local | cib_quorum_override); fsa_register_cib_callback(delete_call_id, FALSE, strdup(node_uuid), remove_conflicting_node_callback); free_xml(node_state_xml); } } } static void node_list_update_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { fsa_data_t *msg_data = NULL; if(call_id < pcmk_ok) { crm_err("Node list update failed: %s (%d)", pcmk_strerror(call_id), call_id); crm_log_xml_debug(msg, "update:failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } else if(rc < pcmk_ok) { crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "update:failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } #define NODE_PATH_MAX 512 void populate_cib_nodes(enum node_update_flags flags, const char *source) { int call_id = 0; gboolean from_hashtable = TRUE; int call_options = cib_scope_local | cib_quorum_override; xmlNode *node_list = create_xml_node(NULL, XML_CIB_TAG_NODES); #if SUPPORT_HEARTBEAT if (is_not_set(flags, node_update_quick) && is_heartbeat_cluster()) { from_hashtable = heartbeat_initialize_nodelist(fsa_cluster_conn, FALSE, node_list); } #endif #if SUPPORT_COROSYNC # if !SUPPORT_PLUGIN if (is_not_set(flags, node_update_quick) && is_corosync_cluster()) { from_hashtable = corosync_initialize_nodelist(NULL, FALSE, node_list); } # endif #endif if (from_hashtable) { GHashTableIter iter; crm_node_t *node = NULL; g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { xmlNode *new_node = NULL; crm_trace("Creating node entry for %s/%s", node->uname, node->uuid); if(node->uuid && node->uname) { char xpath[NODE_PATH_MAX]; /* We need both to be valid */ new_node = create_xml_node(node_list, XML_CIB_TAG_NODE); crm_xml_add(new_node, XML_ATTR_ID, node->uuid); crm_xml_add(new_node, XML_ATTR_UNAME, node->uname); /* Search and remove unknown nodes with the conflicting uname from CIB */ snprintf(xpath, NODE_PATH_MAX, "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE "[@uname='%s'][@id!='%s']", node->uname, node->uuid); call_id = fsa_cib_conn->cmds->query(fsa_cib_conn, xpath, NULL, cib_scope_local | cib_xpath); fsa_register_cib_callback(call_id, FALSE, strdup(node->uuid), search_conflicting_node_callback); } } } crm_trace("Populating section from %s", from_hashtable ? "hashtable" : "cluster"); fsa_cib_update(XML_CIB_TAG_NODES, node_list, call_options, call_id, NULL); fsa_register_cib_callback(call_id, FALSE, NULL, node_list_update_callback); free_xml(node_list); if (call_id >= pcmk_ok && crm_peer_cache != NULL && AM_I_DC) { /* * There is no need to update the local CIB with our values if * we've not seen valid membership data */ GHashTableIter iter; crm_node_t *node = NULL; node_list = create_xml_node(NULL, XML_CIB_TAG_STATUS); g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { create_node_state_update(node, flags, node_list, source); } if (crm_remote_peer_cache) { g_hash_table_iter_init(&iter, crm_remote_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { create_node_state_update(node, flags, node_list, source); } } fsa_cib_update(XML_CIB_TAG_STATUS, node_list, call_options, call_id, NULL); fsa_register_cib_callback(call_id, FALSE, NULL, crmd_node_update_complete); last_peer_update = call_id; free_xml(node_list); } } static void cib_quorum_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { fsa_data_t *msg_data = NULL; if (rc == pcmk_ok) { crm_trace("Quorum update %d complete", call_id); } else { crm_err("Quorum update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } void crm_update_quorum(gboolean quorum, gboolean force_update) { ever_had_quorum |= quorum; if(ever_had_quorum && quorum == FALSE && no_quorum_suicide_escalation) { pcmk_panic(__FUNCTION__); } if (AM_I_DC && (force_update || fsa_has_quorum != quorum)) { int call_id = 0; xmlNode *update = NULL; int call_options = cib_scope_local | cib_quorum_override; update = create_xml_node(NULL, XML_TAG_CIB); crm_xml_add_int(update, XML_ATTR_HAVE_QUORUM, quorum); crm_xml_add(update, XML_ATTR_DC_UUID, fsa_our_uuid); fsa_cib_update(XML_TAG_CIB, update, call_options, call_id, NULL); crm_debug("Updating quorum status to %s (call=%d)", quorum ? "true" : "false", call_id); fsa_register_cib_callback(call_id, FALSE, NULL, cib_quorum_update_complete); free_xml(update); - /* If a node not running any resources is cleanly shut down and drops us - * below quorum, we won't necessarily abort the transition, so abort it - * here to be safe. + /* Quorum changes usually cause a new transition via other activity: + * quorum gained via a node joining will abort via the node join, + * and quorum lost via a node leaving will usually abort via resource + * activity and/or fencing. + * + * However, it is possible that nothing else causes a transition (e.g. + * someone forces quorum via corosync-cmaptcl, or quorum is lost due to + * a node in standby shutting down cleanly), so here ensure a new + * transition is triggered. */ - if (quorum == FALSE) { - abort_transition(INFINITY, tg_restart, "Quorum loss", NULL); + if (quorum) { + /* If quorum was gained, abort after a short delay, in case multiple + * nodes are joining around the same time, so the one that brings us + * to quorum doesn't cause all the remaining ones to be fenced. + */ + abort_after_delay(INFINITY, tg_restart, "Quorum gained", 5000); + } else { + abort_transition(INFINITY, tg_restart, "Quorum lost", NULL); } } fsa_has_quorum = quorum; } diff --git a/crmd/te_utils.c b/crmd/te_utils.c index dab02d3608..8d105dc31b 100644 --- a/crmd/te_utils.c +++ b/crmd/te_utils.c @@ -1,682 +1,726 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include crm_trigger_t *stonith_reconnect = NULL; /* * stonith cleanup list * * If the DC is shot, proper notifications might not go out. * The stonith cleanup list allows the cluster to (re-)send * notifications once a new DC is elected. */ static GListPtr stonith_cleanup_list = NULL; /*! * \internal * \brief Add a node to the stonith cleanup list * * \param[in] target Name of node to add */ void add_stonith_cleanup(const char *target) { stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target)); } /*! * \internal * \brief Remove a node from the stonith cleanup list * * \param[in] Name of node to remove */ void remove_stonith_cleanup(const char *target) { GListPtr iter = stonith_cleanup_list; while (iter != NULL) { GListPtr tmp = iter; char *iter_name = tmp->data; iter = iter->next; if (safe_str_eq(target, iter_name)) { crm_trace("Removing %s from the cleanup list", iter_name); stonith_cleanup_list = g_list_delete_link(stonith_cleanup_list, tmp); free(iter_name); } } } /*! * \internal * \brief Purge all entries from the stonith cleanup list */ void purge_stonith_cleanup() { if (stonith_cleanup_list) { GListPtr iter = NULL; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_info("Purging %s from stonith cleanup list", target); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } } /*! * \internal * \brief Send stonith updates for all entries in cleanup list, then purge it */ void execute_stonith_cleanup() { GListPtr iter; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_node_t *target_node = crm_get_peer(0, target); const char *uuid = crm_peer_uuid(target_node); crm_notice("Marking %s, target of a previous stonith action, as clean", target); send_stonith_update(NULL, target, uuid); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } /* end stonith cleanup list functions */ static gboolean fail_incompletable_stonith(crm_graph_t * graph) { GListPtr lpc = NULL; const char *task = NULL; xmlNode *last_action = NULL; if (graph == NULL) { return FALSE; } for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { GListPtr lpc2 = NULL; synapse_t *synapse = (synapse_t *) lpc->data; if (synapse->confirmed) { continue; } for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) { crm_action_t *action = (crm_action_t *) lpc2->data; if (action->type != action_type_crm || action->confirmed) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (task && safe_str_eq(task, CRM_OP_FENCE)) { action->failed = TRUE; last_action = action->xml; update_graph(graph, action); crm_notice("Failing action %d (%s): STONITHd terminated", action->id, ID(action->xml)); } } } if (last_action != NULL) { crm_warn("STONITHd failure resulted in un-runnable actions"); abort_for_stonith_failure(tg_restart, NULL, last_action); return TRUE; } return FALSE; } static void tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e) { if (is_set(fsa_input_register, R_ST_REQUIRED)) { crm_crit("Fencing daemon connection failed"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Fencing daemon disconnected"); } /* cbchan will be garbage at this point, arrange for it to be reset */ if(stonith_api) { stonith_api->state = stonith_disconnected; } if (AM_I_DC) { fail_incompletable_stonith(transition_graph); trigger_graph(); } } #if SUPPORT_CMAN # include #endif char *te_client_id = NULL; #ifdef HAVE_SYS_REBOOT_H # include # include #endif static void tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event) { if(te_client_id == NULL) { te_client_id = crm_strdup_printf("%s.%lu", crm_system_name, (unsigned long) getpid()); } if (st_event == NULL) { crm_err("Notify data not found"); return; } crmd_alert_fencing_op(st_event); if (st_event->result == pcmk_ok && safe_str_eq("on", st_event->action)) { crm_notice("%s was successfully unfenced by %s (at the request of %s)", st_event->target, st_event->executioner ? st_event->executioner : "", st_event->origin); /* TODO: Hook up st_event->device */ return; } else if (safe_str_eq("on", st_event->action)) { crm_err("Unfencing of %s by %s failed: %s (%d)", st_event->target, st_event->executioner ? st_event->executioner : "", pcmk_strerror(st_event->result), st_event->result); return; } else if (st_event->result == pcmk_ok && crm_str_eq(st_event->target, fsa_our_uname, TRUE)) { crm_crit("We were allegedly just fenced by %s for %s!", st_event->executioner ? st_event->executioner : "", st_event->origin); /* Dumps blackbox if enabled */ qb_log_fini(); /* Try to get the above log message to disk - somehow */ /* Get out ASAP and do not come back up. * * Triggering a reboot is also not the worst idea either since * the rest of the cluster thinks we're safely down */ #ifdef RB_HALT_SYSTEM reboot(RB_HALT_SYSTEM); #endif /* * If reboot() fails or is not supported, coming back up will * probably lead to a situation where the other nodes set our * status to 'lost' because of the fencing callback and will * discard subsequent election votes with: * * Election 87 (current: 5171, owner: 103): Processed vote from east-03 (Peer is not part of our cluster) * * So just stay dead, something is seriously messed up anyway. * */ exit(100); /* None of our wrappers since we already called qb_log_fini() */ return; } /* Update the count of stonith failures for this target, in case we become * DC later. The current DC has already updated its fail count in * tengine_stonith_callback(). */ if (!AM_I_DC && safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) { if (st_event->result == pcmk_ok) { st_fail_count_reset(st_event->target); } else { st_fail_count_increment(st_event->target); } } crm_notice("Peer %s was%s terminated (%s) by %s on behalf of %s: %s " CRM_XS " initiator=%s ref=%s", st_event->target, st_event->result == pcmk_ok ? "" : " not", st_event->action, st_event->executioner ? st_event->executioner : "", (st_event->client_origin? st_event->client_origin : ""), pcmk_strerror(st_event->result), st_event->origin, st_event->id); #if SUPPORT_CMAN if (st_event->result == pcmk_ok && is_cman_cluster()) { int local_rc = 0; int confirm = 0; char *target_copy = strdup(st_event->target); /* In case fenced hasn't noticed yet * * Any fencing that has been inititated will be completed by way of the fence_pcmk redirect */ local_rc = fenced_external(target_copy); if (local_rc != 0) { crm_err("Could not notify CMAN that '%s' is now fenced: %d", st_event->target, local_rc); } else { crm_notice("Notified CMAN that '%s' is now fenced", st_event->target); } /* In case fenced is already trying to shoot it */ confirm = open("/var/run/cluster/fenced_override", O_NONBLOCK|O_WRONLY); if (confirm >= 0) { int ignore = 0; int len = strlen(target_copy); errno = 0; local_rc = write(confirm, target_copy, len); ignore = write(confirm, "\n", 1); if(ignore < 0 && errno == EBADF) { crm_trace("CMAN not expecting %s to be fenced (yet)", st_event->target); } else if (local_rc < len) { crm_perror(LOG_ERR, "Confirmation of CMAN fencing event for '%s' failed: %d", st_event->target, local_rc); } else { fsync(confirm); crm_notice("Confirmed CMAN fencing event for '%s'", st_event->target); } close(confirm); } free(target_copy); } #endif if (st_event->result == pcmk_ok) { crm_node_t *peer = crm_find_peer_full(0, st_event->target, CRM_GET_PEER_ANY); const char *uuid = NULL; gboolean we_are_executioner = safe_str_eq(st_event->executioner, fsa_our_uname); if (peer == NULL) { return; } uuid = crm_peer_uuid(peer); crm_trace("target=%s dc=%s", st_event->target, fsa_our_dc); if(AM_I_DC) { /* The DC always sends updates */ send_stonith_update(NULL, st_event->target, uuid); /* @TODO Ideally, at this point, we'd check whether the fenced node * hosted any guest nodes, and call remote_node_down() for them. * Unfortunately, the crmd doesn't have a simple, reliable way to * map hosts to guests. It might be possible to track this in the * peer cache via crm_remote_peer_cache_refresh(). For now, we rely * on the PE creating fence pseudo-events for the guests. */ if (st_event->client_origin && safe_str_neq(st_event->client_origin, te_client_id)) { /* Abort the current transition graph if it wasn't us * that invoked stonith to fence someone */ crm_info("External fencing operation from %s fenced %s", st_event->client_origin, st_event->target); abort_transition(INFINITY, tg_restart, "External Fencing Operation", NULL); } /* Assume it was our leader if we don't currently have one */ } else if (((fsa_our_dc == NULL) || safe_str_eq(fsa_our_dc, st_event->target)) && !is_set(peer->flags, crm_remote_node)) { crm_notice("Target %s our leader %s (recorded: %s)", fsa_our_dc ? "was" : "may have been", st_event->target, fsa_our_dc ? fsa_our_dc : ""); /* Given the CIB resyncing that occurs around elections, * have one node update the CIB now and, if the new DC is different, * have them do so too after the election */ if (we_are_executioner) { send_stonith_update(NULL, st_event->target, uuid); } add_stonith_cleanup(st_event->target); } /* If the target is a remote node, and we host its connection, * immediately fail all monitors so it can be recovered quickly. * The connection won't necessarily drop when a remote node is fenced, * so the failure might not otherwise be detected until the next poke. */ if (is_set(peer->flags, crm_remote_node)) { remote_ra_fail(st_event->target); } crmd_peer_down(peer, TRUE); } } gboolean te_connect_stonith(gpointer user_data) { int lpc = 0; int rc = pcmk_ok; if (stonith_api == NULL) { stonith_api = stonith_api_new(); } if (stonith_api->state != stonith_disconnected) { crm_trace("Still connected"); return TRUE; } for (lpc = 0; lpc < 30; lpc++) { crm_debug("Attempting connection to fencing daemon..."); sleep(1); rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL); if (rc == pcmk_ok) { break; } if (user_data != NULL) { if (is_set(fsa_input_register, R_ST_REQUIRED)) { crm_err("Sign-in failed: triggered a retry"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Sign-in failed, but no longer required"); } return TRUE; } crm_err("Sign-in failed: pausing and trying again in 2s..."); sleep(1); } CRM_CHECK(rc == pcmk_ok, return TRUE); /* If not, we failed 30 times... just get out */ stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT, tengine_stonith_connection_destroy); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE, tengine_stonith_notify); crm_trace("Connected"); return TRUE; } gboolean stop_te_timer(crm_action_timer_t * timer) { const char *timer_desc = "action timer"; if (timer == NULL) { return FALSE; } if (timer->reason == timeout_abort) { timer_desc = "global timer"; crm_trace("Stopping %s", timer_desc); } if (timer->source_id != 0) { crm_trace("Stopping %s", timer_desc); g_source_remove(timer->source_id); timer->source_id = 0; } else { crm_trace("%s was already stopped", timer_desc); return FALSE; } return TRUE; } gboolean te_graph_trigger(gpointer user_data) { enum transition_status graph_rc = -1; if (transition_graph == NULL) { crm_debug("Nothing to do"); return TRUE; } crm_trace("Invoking graph %d in state %s", transition_graph->id, fsa_state2string(fsa_state)); switch (fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: return TRUE; break; default: break; } if (transition_graph->complete == FALSE) { int limit = transition_graph->batch_limit; transition_graph->batch_limit = throttle_get_total_job_limit(limit); graph_rc = run_graph(transition_graph); transition_graph->batch_limit = limit; /* Restore the configured value */ /* significant overhead... */ /* print_graph(LOG_DEBUG_3, transition_graph); */ if (graph_rc == transition_active) { crm_trace("Transition not yet complete"); return TRUE; } else if (graph_rc == transition_pending) { crm_trace("Transition not yet complete - no actions fired"); return TRUE; } if (graph_rc != transition_complete) { crm_warn("Transition failed: %s", transition_status(graph_rc)); print_graph(LOG_NOTICE, transition_graph); } } crm_debug("Transition %d is now complete", transition_graph->id); transition_graph->complete = TRUE; notify_crmd(transition_graph); return TRUE; } void trigger_graph_processing(const char *fn, int line) { crm_trace("%s:%d - Triggered graph processing", fn, line); mainloop_set_trigger(transition_trigger); } +static struct abort_timer_s { + bool aborted; + guint id; + int priority; + enum transition_action action; + const char *text; +} abort_timer = { 0, }; + +static gboolean +abort_timer_popped(gpointer data) +{ + if (abort_timer.aborted == FALSE) { + abort_transition(abort_timer.priority, abort_timer.action, + abort_timer.text, NULL); + } + abort_timer.id = 0; + return FALSE; // do not immediately reschedule timer +} + +/*! + * \internal + * \brief Abort transition after delay, if not already aborted in that time + * + * \param[in] abort_text Must be literal string + */ +void +abort_after_delay(int abort_priority, enum transition_action abort_action, + const char *abort_text, guint delay_ms) +{ + if (abort_timer.id) { + // Timer already in progress, stop and reschedule + g_source_remove(abort_timer.id); + } + abort_timer.aborted = FALSE; + abort_timer.priority = abort_priority; + abort_timer.action = abort_action; + abort_timer.text = abort_text; + abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, NULL); +} + void abort_transition_graph(int abort_priority, enum transition_action abort_action, const char *abort_text, xmlNode * reason, const char *fn, int line) { int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; int level = LOG_INFO; xmlNode *diff = NULL; xmlNode *change = NULL; CRM_CHECK(transition_graph != NULL, return); switch (fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: crm_info("Abort %s suppressed: state=%s (complete=%d)", abort_text, fsa_state2string(fsa_state), transition_graph->complete); return; default: break; } + abort_timer.aborted = TRUE; + /* Make sure any queued calculations are discarded ASAP */ free(fsa_pe_ref); fsa_pe_ref = NULL; if (transition_graph->complete == FALSE) { if(update_abort_priority(transition_graph, abort_priority, abort_action, abort_text)) { level = LOG_NOTICE; } } if(reason) { xmlNode *search = NULL; for(search = reason; search; search = search->parent) { if (safe_str_eq(XML_TAG_DIFF, TYPE(search))) { diff = search; break; } } if(diff) { xml_patch_versions(diff, add, del); for(search = reason; search; search = search->parent) { if (safe_str_eq(XML_DIFF_CHANGE, TYPE(search))) { change = search; break; } } } } if(reason == NULL) { do_crm_log(level, "Transition aborted: %s "CRM_XS" source=%s:%d complete=%s", abort_text, fn, line, (transition_graph->complete? "true" : "false")); } else if(change == NULL) { char *local_path = xml_get_path(reason); do_crm_log(level, "Transition aborted by %s.%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", TYPE(reason), ID(reason), abort_text, add[0], add[1], add[2], fn, line, local_path, (transition_graph->complete? "true" : "false")); free(local_path); } else { const char *kind = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *path = crm_element_value(change, XML_DIFF_PATH); if(change == reason) { if(strcmp(op, "create") == 0) { reason = reason->children; } else if(strcmp(op, "modify") == 0) { reason = first_named_child(reason, XML_DIFF_RESULT); if(reason) { reason = reason->children; } } } kind = TYPE(reason); if(strcmp(op, "delete") == 0) { const char *shortpath = strrchr(path, '/'); do_crm_log(level, "Transition aborted by deletion of %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", (shortpath? (shortpath + 1) : path), abort_text, add[0], add[1], add[2], fn, line, path, (transition_graph->complete? "true" : "false")); } else if (safe_str_eq(XML_CIB_TAG_NVPAIR, kind)) { do_crm_log(level, "Transition aborted by %s doing %s %s=%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", crm_element_value(reason, XML_ATTR_ID), op, crm_element_value(reason, XML_NVPAIR_ATTR_NAME), crm_element_value(reason, XML_NVPAIR_ATTR_VALUE), abort_text, add[0], add[1], add[2], fn, line, path, (transition_graph->complete? "true" : "false")); } else if (safe_str_eq(XML_LRM_TAG_RSC_OP, kind)) { const char *magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC); do_crm_log(level, "Transition aborted by operation %s '%s' on %s: %s " CRM_XS " magic=%s cib=%d.%d.%d source=%s:%d complete=%s", crm_element_value(reason, XML_LRM_ATTR_TASK_KEY), op, crm_element_value(reason, XML_LRM_ATTR_TARGET), abort_text, magic, add[0], add[1], add[2], fn, line, (transition_graph->complete? "true" : "false")); } else if (safe_str_eq(XML_CIB_TAG_STATE, kind) || safe_str_eq(XML_CIB_TAG_NODE, kind)) { const char *uname = crm_peer_uname(ID(reason)); do_crm_log(level, "Transition aborted by %s '%s' on %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d complete=%s", kind, op, (uname? uname : ID(reason)), abort_text, add[0], add[1], add[2], fn, line, (transition_graph->complete? "true" : "false")); } else { + const char *id = ID(reason); + do_crm_log(level, "Transition aborted by %s.%s '%s': %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", - TYPE(reason), ID(reason), (op? op : "change"), abort_text, - add[0], add[1], add[2], fn, line, path, + TYPE(reason), (id? id : ""), (op? op : "change"), + abort_text, add[0], add[1], add[2], fn, line, path, (transition_graph->complete? "true" : "false")); } } if (transition_graph->complete) { if (transition_timer->period_ms > 0) { crm_timer_stop(transition_timer); crm_timer_start(transition_timer); } else { register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); } return; } mainloop_set_trigger(transition_trigger); } diff --git a/crmd/tengine.h b/crmd/tengine.h index 7205c16cc4..6a75a08c5c 100644 --- a/crmd/tengine.h +++ b/crmd/tengine.h @@ -1,83 +1,85 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef TENGINE__H # define TENGINE__H # include # include # include # include extern stonith_t *stonith_api; extern void send_stonith_update(crm_action_t * stonith_action, const char *target, const char *uuid); /* stonith cleanup list */ void add_stonith_cleanup(const char *target); void remove_stonith_cleanup(const char *target); void purge_stonith_cleanup(void); void execute_stonith_cleanup(void); /* tengine */ extern crm_action_t *match_down_event(const char *target, bool quiet); extern crm_action_t *get_cancel_action(const char *id, const char *node); extern gboolean cib_action_update(crm_action_t * action, int status, int op_rc); extern gboolean fail_incompletable_actions(crm_graph_t * graph, const char *down_node); extern gboolean process_graph_event(xmlNode * event, const char *event_node); /* utils */ extern crm_action_t *get_action(int id, gboolean confirmed); extern gboolean start_global_timer(crm_action_timer_t * timer, int timeout); extern gboolean stop_te_timer(crm_action_timer_t * timer); extern const char *get_rsc_state(const char *task, enum op_status status); /* unpack */ extern gboolean process_te_message(xmlNode * msg, xmlNode * xml_data); extern crm_graph_t *transition_graph; extern crm_trigger_t *transition_trigger; extern char *te_uuid; extern void notify_crmd(crm_graph_t * graph); # include extern void trigger_graph_processing(const char *fn, int line); +void abort_after_delay(int abort_priority, enum transition_action abort_action, + const char *abort_text, guint delay_ms); extern void abort_transition_graph(int abort_priority, enum transition_action abort_action, const char *abort_text, xmlNode * reason, const char *fn, int line); # define trigger_graph() trigger_graph_processing(__FUNCTION__, __LINE__) # define abort_transition(pri, action, text, reason) \ abort_transition_graph(pri, action, text, reason,__FUNCTION__,__LINE__); extern gboolean te_connect_stonith(gpointer user_data); extern crm_trigger_t *transition_trigger; extern crm_trigger_t *stonith_reconnect; extern char *failed_stop_offset; extern char *failed_start_offset; extern int active_timeout; extern int stonith_op_active; void te_action_confirmed(crm_action_t * action); void te_reset_job_counts(void); #endif diff --git a/pengine/container.c b/pengine/container.c index f5d916c805..15d094db60 100644 --- a/pengine/container.c +++ b/pengine/container.c @@ -1,971 +1,979 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #define VARIANT_CONTAINER 1 #include static bool is_child_container_node(container_variant_data_t *data, pe_node_t *node) { for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(node->details == tuple->node->details) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set); static GListPtr get_container_list(resource_t *rsc) { GListPtr containers = NULL; container_variant_data_t *data = NULL; if(rsc->variant == pe_container) { get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; containers = g_list_append(containers, tuple->docker); } } return containers; } static GListPtr get_containers_or_children(resource_t *rsc) { GListPtr containers = NULL; container_variant_data_t *data = NULL; if(rsc->variant == pe_container) { get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; containers = g_list_append(containers, tuple->docker); } return containers; } else { return rsc->children; } } static bool migration_threshold_reached(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { int fail_count, countdown; /* Migration threshold of 0 means never force away */ if (rsc->migration_threshold == 0) { return FALSE; } // If we're ignoring failures, also ignore the migration threshold if (is_set(rsc->flags, pe_rsc_failure_ignored)) { return FALSE; } /* If there are no failures, there's no need to force away */ fail_count = pe_get_failcount(node, rsc, NULL, pe_fc_effective|pe_fc_fillers, NULL, data_set); if (fail_count <= 0) { return FALSE; } /* How many more times recovery will be tried on this node */ countdown = QB_MAX(rsc->migration_threshold - fail_count, 0); if (countdown == 0) { crm_warn("Forcing %s away from %s after %d failures (max=%d)", rsc->id, node->details->uname, fail_count, rsc->migration_threshold); return TRUE; } crm_info("%s can fail %d more times on %s before being forced off", rsc->id, countdown, node->details->uname); return FALSE; } node_t * container_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr containers = NULL; GListPtr nodes = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return NULL); get_container_variant_data(container_data, rsc); set_bit(rsc->flags, pe_rsc_allocating); containers = get_container_list(rsc); dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); containers = g_list_sort_with_data(containers, sort_clone_instance, data_set); distribute_children(rsc, containers, nodes, container_data->replicas, container_data->replicas_per_host, data_set); g_list_free(nodes); g_list_free(containers); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; pe_node_t *docker_host = tuple->docker->allocated_to; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->cmds->allocate(tuple->ip, prefer, data_set); } if(tuple->remote && is_remote_node(docker_host)) { /* We need 'nested' connection resources to be on the same * host because pacemaker-remoted only supports a single * active connection */ rsc_colocation_new("child-remote-with-docker-remote", NULL, INFINITY, tuple->remote, docker_host->details->remote_rsc, NULL, NULL, data_set); } if(tuple->remote) { tuple->remote->cmds->allocate(tuple->remote, prefer, data_set); } // Explicitly allocate tuple->child before the container->child if(tuple->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, tuple->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if(node->details != tuple->node->details) { node->weight = -INFINITY; } else if(migration_threshold_reached(tuple->child, node, data_set) == FALSE) { node->weight = INFINITY; } } set_bit(tuple->child->parent->flags, pe_rsc_allocating); tuple->child->cmds->allocate(tuple->child, tuple->node, data_set); clear_bit(tuple->child->parent->flags, pe_rsc_allocating); } } if(container_data->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, container_data->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if(is_child_container_node(container_data, node)) { node->weight = 0; } else { node->weight = -INFINITY; } } container_data->child->cmds->allocate(container_data->child, prefer, data_set); } clear_bit(rsc->flags, pe_rsc_allocating); clear_bit(rsc->flags, pe_rsc_provisional); return NULL; } void container_create_actions(resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *action = NULL; GListPtr containers = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); containers = get_container_list(rsc); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->cmds->create_actions(tuple->ip, data_set); } if(tuple->docker) { tuple->docker->cmds->create_actions(tuple->docker, data_set); } if(tuple->remote) { tuple->remote->cmds->create_actions(tuple->remote, data_set); } } clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set); if(container_data->child) { container_data->child->cmds->create_actions(container_data->child, data_set); if(container_data->child->variant == pe_master) { /* promote */ action = create_pseudo_resource_op(rsc, RSC_PROMOTE, TRUE, TRUE, data_set); action = create_pseudo_resource_op(rsc, RSC_PROMOTED, TRUE, TRUE, data_set); action->priority = INFINITY; /* demote */ action = create_pseudo_resource_op(rsc, RSC_DEMOTE, TRUE, TRUE, data_set); action = create_pseudo_resource_op(rsc, RSC_DEMOTED, TRUE, TRUE, data_set); action->priority = INFINITY; } } g_list_free(containers); } void container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); if(container_data->child) { new_rsc_order(rsc, RSC_START, container_data->child, RSC_START, pe_order_implies_first_printed, data_set); new_rsc_order(rsc, RSC_STOP, container_data->child, RSC_STOP, pe_order_implies_first_printed, data_set); if(container_data->child->children) { new_rsc_order(container_data->child, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(container_data->child, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } else { new_rsc_order(container_data->child, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(container_data->child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); CRM_ASSERT(tuple->docker); tuple->docker->cmds->internal_constraints(tuple->docker, data_set); order_start_start(rsc, tuple->docker, pe_order_runnable_left | pe_order_implies_first_printed); if(tuple->child) { order_stop_stop(rsc, tuple->child, pe_order_implies_first_printed); } order_stop_stop(rsc, tuple->docker, pe_order_implies_first_printed); new_rsc_order(tuple->docker, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(tuple->docker, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if(tuple->ip) { tuple->ip->cmds->internal_constraints(tuple->ip, data_set); // Start ip then docker new_rsc_order(tuple->ip, RSC_START, tuple->docker, RSC_START, pe_order_runnable_left|pe_order_preserve, data_set); new_rsc_order(tuple->docker, RSC_STOP, tuple->ip, RSC_STOP, pe_order_implies_first|pe_order_preserve, data_set); rsc_colocation_new("ip-with-docker", NULL, INFINITY, tuple->ip, tuple->docker, NULL, NULL, data_set); } if(tuple->remote) { /* This handles ordering and colocating remote relative to docker * (via "resource-with-container"). Since IP is also ordered and * colocated relative to docker, we don't need to do anything * explicit here with IP. */ tuple->remote->cmds->internal_constraints(tuple->remote, data_set); } if(tuple->child) { CRM_ASSERT(tuple->remote); // Start of the remote then child is implicit in the PE's remote logic } } if(container_data->child) { container_data->child->cmds->internal_constraints(container_data->child, data_set); if(container_data->child->variant == pe_master) { master_promotion_constraints(rsc, data_set); /* child demoted before global demoted */ new_rsc_order(container_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ new_rsc_order(rsc, RSC_DEMOTE, container_data->child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); /* child promoted before global promoted */ new_rsc_order(container_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed, data_set); /* global promote before child promote */ new_rsc_order(rsc, RSC_PROMOTE, container_data->child, RSC_PROMOTE, pe_order_implies_first_printed, data_set); } } else { // int type = pe_order_optional | pe_order_implies_then | pe_order_restart; // custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, // rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); } } static resource_t * find_compatible_tuple_by_node(resource_t * rsc_lh, node_t * candidate, resource_t * rsc, enum rsc_role_e filter, gboolean current) { container_variant_data_t *container_data = NULL; CRM_CHECK(candidate != NULL, return NULL); get_container_variant_data(container_data, rsc); crm_trace("Looking for compatible child from %s for %s on %s", rsc_lh->id, rsc->id, candidate->details->uname); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(is_child_compatible(tuple->docker, candidate, filter, current)) { crm_trace("Pairing %s with %s on %s", rsc_lh->id, tuple->docker->id, candidate->details->uname); return tuple->docker; } } crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id); return NULL; } static resource_t * find_compatible_tuple(resource_t *rsc_lh, resource_t * rsc, enum rsc_role_e filter, gboolean current) { GListPtr scratch = NULL; resource_t *pair = NULL; node_t *active_node_lh = NULL; active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current); if (active_node_lh) { return find_compatible_tuple_by_node(rsc_lh, active_node_lh, rsc, filter, current); } scratch = g_hash_table_get_values(rsc_lh->allowed_nodes); scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL); for (GListPtr gIter = scratch; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pair = find_compatible_tuple_by_node(rsc_lh, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, rsc->id); done: g_list_free(scratch); return pair; } void container_rsc_colocation_lh(resource_t * rsc, resource_t * rsc_rh, rsc_colocation_t * constraint) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } int copies_per_node(resource_t * rsc) { /* Strictly speaking, there should be a 'copies_per_node' addition * to the resource function table and each case would be a * function. However that would be serious overkill to return an * int. In fact, it seems to me that both function tables * could/should be replaced by resources.{c,h} full of * rsc_{some_operation} functions containing a switch as below * which calls out to functions named {variant}_{some_operation} * as needed. */ switch(rsc->variant) { case pe_unknown: return 0; case pe_native: case pe_group: return 1; case pe_clone: case pe_master: { const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); return crm_parse_int(max_clones_node, "1"); } case pe_container: { container_variant_data_t *data = NULL; get_container_variant_data(data, rsc); return data->replicas_per_host; } } return 0; } void container_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc, rsc_colocation_t * constraint) { GListPtr allocated_rhs = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return); CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return); CRM_ASSERT(rsc_lh->variant == pe_native); if (is_set(rsc->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc, "%s is still provisional", rsc->id); return; } else if(constraint->rsc_lh->variant > pe_group) { resource_t *rh_child = find_compatible_tuple(rsc_lh, rsc, RSC_ROLE_UNKNOWN, FALSE); if (rh_child) { pe_rsc_debug(rsc, "Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc->id); assign_node(rsc_lh, NULL, TRUE); } else { pe_rsc_debug(rsc, "Cannot pair %s with instance of %s", rsc_lh->id, rsc->id); } return; } get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc->id, constraint->score); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if (constraint->score < INFINITY) { tuple->docker->cmds->rsc_colocation_rh(rsc_lh, tuple->docker, constraint); } else { node_t *chosen = tuple->docker->fns->location(tuple->docker, NULL, FALSE); - if (chosen != NULL && is_set_recursive(tuple->docker, pe_rsc_block, TRUE) == FALSE) { - pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); - allocated_rhs = g_list_prepend(allocated_rhs, chosen); + if (chosen == NULL || is_set_recursive(tuple->docker, pe_rsc_block, TRUE)) { + continue; + } + if(constraint->role_rh >= RSC_ROLE_MASTER && tuple->child == NULL) { + continue; } + if(constraint->role_rh >= RSC_ROLE_MASTER && tuple->child->next_role < RSC_ROLE_MASTER) { + continue; + } + + pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); + allocated_rhs = g_list_prepend(allocated_rhs, chosen); } } if (constraint->score >= INFINITY) { node_list_exclude(rsc_lh->allowed_nodes, allocated_rhs, FALSE); } g_list_free(allocated_rhs); } enum pe_action_flags container_action_flags(action_t * action, node_t * node) { GListPtr containers = NULL; enum pe_action_flags flags = 0; container_variant_data_t *data = NULL; get_container_variant_data(data, action->rsc); if(data->child) { enum action_tasks task = get_complex_task(data->child, action->task, TRUE); switch(task) { case no_action: case action_notify: case action_notified: case action_promote: case action_promoted: case action_demote: case action_demoted: return summary_action_flags(action, data->child->children, node); default: break; } } containers = get_container_list(action->rsc); flags = summary_action_flags(action, containers, node); g_list_free(containers); return flags; } resource_t * find_compatible_child_by_node(resource_t * local_child, node_t * local_node, resource_t * rsc, enum rsc_role_e filter, gboolean current) { GListPtr gIter = NULL; GListPtr children = NULL; if (local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, local_node->details->uname); children = get_containers_or_children(rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if(is_child_compatible(child_rsc, local_node, filter, current)) { crm_trace("Pairing %s with %s on %s", local_child->id, child_rsc->id, local_node->details->uname); return child_rsc; } } crm_trace("Can't pair %s with %s", local_child->id, rsc->id); if(children != rsc->children) { g_list_free(children); } return NULL; } static container_grouping_t * tuple_for_docker(resource_t *rsc, resource_t *docker, node_t *node) { if(rsc->variant == pe_container) { container_variant_data_t *data = NULL; get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(tuple->child && docker == tuple->docker && node->details == tuple->node->details) { return tuple; } } } return NULL; } static enum pe_graph_flags container_update_interleave_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { GListPtr gIter = NULL; GListPtr children = NULL; gboolean current = FALSE; enum pe_graph_flags changed = pe_graph_none; /* Fix this - lazy */ if (crm_ends_with(first->uuid, "_stopped_0") || crm_ends_with(first->uuid, "_demoted_0")) { current = TRUE; } children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; resource_t *first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current); if (first_child == NULL && current) { crm_trace("Ignore"); } else if (first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) { pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); if(assign_node(then_child, NULL, TRUE)) { changed |= pe_graph_updated_then; } } } else { pe_action_t *first_action = NULL; pe_action_t *then_action = NULL; enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); container_grouping_t *first_tuple = tuple_for_docker(first->rsc, first_child, node); container_grouping_t *then_tuple = tuple_for_docker(then->rsc, then_child, node); if(strstr(first->task, "stop") && first_tuple && first_tuple->child) { /* Except for 'stopped' we should be looking at the * in-container resource, actions for the child will * happen later and are therefor more likely to align * with the user's intent. */ first_action = find_first_action(first_tuple->child->actions, NULL, task2text(task), node); } else { first_action = find_first_action(first_child->actions, NULL, task2text(task), node); } if(strstr(then->task, "mote") && then_tuple && then_tuple->child) { /* Promote/demote actions will never be found for the * docker resource, look in the child instead * * Alternatively treat: * 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and * 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY' */ then_action = find_first_action(then_tuple->child->actions, NULL, then->task, node); } else { then_action = find_first_action(then_child->actions, NULL, then->task, node); } if (first_action == NULL) { if (is_not_set(first_child->flags, pe_rsc_orphan) && crm_str_eq(first_task, RSC_STOP, TRUE) == FALSE && crm_str_eq(first_task, RSC_DEMOTE, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (first)", first_task, first_child->id); } else { crm_trace("No action found for %s in %s%s (first)", first_task, first_child->id, is_set(first_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : ""); } continue; } /* We're only interested if 'then' is neither stopping nor being demoted */ if (then_action == NULL) { if (is_not_set(then_child->flags, pe_rsc_orphan) && crm_str_eq(then->task, RSC_STOP, TRUE) == FALSE && crm_str_eq(then->task, RSC_DEMOTE, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } else { crm_trace("No action found for %s in %s%s (then)", then->task, then_child->id, is_set(then_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : ""); } continue; } if (order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x", first_action->uuid, is_set(first_action->flags, pe_action_optional), then_action->uuid, is_set(then_action->flags, pe_action_optional), type); changed |= (pe_graph_updated_first | pe_graph_updated_then); } if(first_action && then_action) { changed |= then_child->cmds->update_actions(first_action, then_action, node, first_child->cmds->action_flags(first_action, node), filter, type); } else { crm_err("Nothing found either for %s (%p) or %s (%p) %s", first_child->id, first_action, then_child->id, then_action, task2text(task)); } } } if(children != then->rsc->children) { g_list_free(children); } return changed; } bool can_interleave_actions(pe_action_t *first, pe_action_t *then) { bool interleave = FALSE; resource_t *rsc = NULL; const char *interleave_s = NULL; if(first->rsc == NULL || then->rsc == NULL) { crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc == then->rsc) { crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) { crm_trace("Not interleaving %s with %s (both sides must be clones, masters, or bundles)", first->uuid, then->uuid); return FALSE; } if (crm_ends_with(then->uuid, "_stop_0") || crm_ends_with(then->uuid, "_demote_0")) { rsc = first->rsc; } else { rsc = then->rsc; } interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id); return interleave; } enum pe_graph_flags container_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; crm_trace("%s -> %s", first->uuid, then->uuid); if(can_interleave_actions(first, then)) { changed = container_update_interleave_actions(first, then, node, flags, filter, type); } else if(then->rsc) { GListPtr gIter = NULL; GListPtr children = NULL; // Handle the 'primitive' ordering case changed |= native_update_actions(first, then, node, flags, filter, type); // Now any children (or containers in the case of a bundle) children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; enum pe_graph_flags then_child_changed = pe_graph_none; action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node); if (then_child_action) { enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node); if (is_set(then_child_flags, pe_action_runnable)) { then_child_changed |= then_child->cmds->update_actions(first, then_child_action, node, flags, filter, type); } changed |= then_child_changed; if (then_child_changed & pe_graph_updated_then) { for (GListPtr lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *next = (action_wrapper_t *) lpc->data; update_action(next->action); } } } } if(children != then->rsc->children) { g_list_free(children); } } return changed; } void container_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { container_variant_data_t *container_data = NULL; get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if (tuple->docker) { tuple->docker->cmds->rsc_location(tuple->docker, constraint); } if(tuple->ip) { tuple->ip->cmds->rsc_location(tuple->ip, constraint); } } if(container_data->child && (constraint->role_filter == RSC_ROLE_SLAVE || constraint->role_filter == RSC_ROLE_MASTER)) { container_data->child->cmds->rsc_location(container_data->child, constraint); container_data->child->rsc_location = g_list_prepend(container_data->child->rsc_location, constraint); } } void container_expand(resource_t * rsc, pe_working_set_t * data_set) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); if(container_data->child) { container_data->child->cmds->expand(container_data->child, data_set); } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if (tuple->remote && tuple->docker && container_fix_remote_addr(tuple->remote)) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside xmlNode *nvpair = get_xpath_object("//nvpair[@name='addr']", tuple->remote->xml, LOG_ERR); const char *calculated_addr = container_fix_remote_addr_in(tuple->remote, nvpair, "value"); if (calculated_addr) { crm_trace("Fixed addr for %s on %s", tuple->remote->id, calculated_addr); g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(calculated_addr)); } else { crm_err("Could not fix addr for %s", tuple->remote->id); } } if(tuple->ip) { tuple->ip->cmds->expand(tuple->ip, data_set); } if(tuple->docker) { tuple->docker->cmds->expand(tuple->docker, data_set); } if(tuple->remote) { tuple->remote->cmds->expand(tuple->remote, data_set); } } } gboolean container_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { bool any_created = FALSE; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return FALSE); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { any_created |= tuple->ip->cmds->create_probe(tuple->ip, node, complete, force, data_set); } if(tuple->child && node->details == tuple->node->details) { any_created |= tuple->child->cmds->create_probe(tuple->child, node, complete, force, data_set); } if(tuple->docker) { bool created = tuple->docker->cmds->create_probe(tuple->docker, node, complete, force, data_set); if(created) { any_created = TRUE; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that replicas_per_host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ for (GListPtr tIter = container_data->tuples; tIter != NULL && container_data->replicas_per_host == 1; tIter = tIter->next) { container_grouping_t *other = (container_grouping_t *)tIter->data; if ((other != tuple) && (other != NULL) && (other->docker != NULL)) { custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_STATUS, 0), NULL, other->docker, generate_op_key(other->docker->id, RSC_START, 0), NULL, pe_order_optional|pe_order_same_node, data_set); } } } } if (tuple->docker && tuple->remote && tuple->remote->cmds->create_probe(tuple->remote, node, complete, force, data_set)) { /* Do not probe the remote resource until we know where docker is running * Required for REMOTE_CONTAINER_HACK to correctly probe remote resources */ char *probe_uuid = generate_op_key(tuple->remote->id, RSC_STATUS, 0); action_t *probe = find_first_action(tuple->remote->actions, probe_uuid, NULL, node); free(probe_uuid); if (probe) { any_created = TRUE; crm_trace("Ordering %s probe on %s", tuple->remote->id, node->details->uname); custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_START, 0), NULL, tuple->remote, NULL, probe, pe_order_probe, data_set); } } } return any_created; } void container_append_meta(resource_t * rsc, xmlNode * xml) { } GHashTable * container_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } void container_LogActions( resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { LogActions(tuple->ip, data_set, terminal); } if(tuple->docker) { LogActions(tuple->docker, data_set, terminal); } if(tuple->remote) { LogActions(tuple->remote, data_set, terminal); } if(tuple->child) { LogActions(tuple->child, data_set, terminal); } } } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index f3eb190c3f..c1b8892d50 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,1254 +1,1261 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include bool BE_QUIET = FALSE; bool scope_master = FALSE; int cib_options = cib_sync_call; GMainLoop *mainloop = NULL; #define message_timeout_ms 60*1000 static gboolean resource_ipc_timeout(gpointer data) { fprintf(stderr, "No messages received in %d seconds.. aborting\n", (int)message_timeout_ms / 1000); crm_err("No messages received in %d seconds", (int)message_timeout_ms / 1000); return crm_exit(-1); } static void resource_ipc_connection_destroy(gpointer user_data) { crm_info("Connection to CRMd was terminated"); crm_exit(1); } static void start_mainloop(void) { if (crmd_replies_needed == 0) { return; } mainloop = g_main_new(FALSE); fprintf(stderr, "Waiting for %d replies from the CRMd", crmd_replies_needed); crm_debug("Waiting for %d replies from the CRMd", crmd_replies_needed); g_timeout_add(message_timeout_ms, resource_ipc_timeout, NULL); g_main_run(mainloop); } static int resource_ipc_callback(const char *buffer, ssize_t length, gpointer userdata) { xmlNode *msg = string2xml(buffer); fprintf(stderr, "."); crm_log_xml_trace(msg, "[inbound]"); crmd_replies_needed--; if ((crmd_replies_needed == 0) && mainloop && g_main_loop_is_running(mainloop)) { fprintf(stderr, " OK\n"); crm_debug("Got all the replies we expected"); return crm_exit(pcmk_ok); } free_xml(msg); return 0; } struct ipc_client_callbacks crm_callbacks = { .dispatch = resource_ipc_callback, .destroy = resource_ipc_connection_destroy, }; /* short option letters still available: eEJkKXyYZ */ /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ { "help", no_argument, NULL, '?', "\t\tDisplay this text and exit" }, { "version", no_argument, NULL, '$', "\t\tDisplay version information and exit" }, { "verbose", no_argument, NULL, 'V', "\t\tIncrease debug output (may be specified multiple times)" }, { "quiet", no_argument, NULL, 'Q', "\t\tBe less descriptive in results" }, { "resource", required_argument, NULL, 'r', "\tResource ID" }, { "-spacer-", no_argument, NULL, '-', "\nQueries:" }, { "list", no_argument, NULL, 'L', "\t\tList all cluster resources with status"}, { "list-raw", no_argument, NULL, 'l', "\t\tList IDs of all instantiated resources (individual members rather than groups etc.)" }, { "list-cts", no_argument, NULL, 'c', NULL, pcmk_option_hidden }, { "list-operations", no_argument, NULL, 'O', "\tList active resource operations, optionally filtered by --resource and/or --node" }, { "list-all-operations", no_argument, NULL, 'o', "List all resource operations, optionally filtered by --resource and/or --node" }, { "pending", no_argument, NULL, 'j', "\t\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden }, { "list-standards", no_argument, NULL, 0, "\tList supported standards" }, { "list-ocf-providers", no_argument, NULL, 0, "List all available OCF providers" }, { "list-agents", required_argument, NULL, 0, "List all agents available for the named standard and/or provider." }, { "list-ocf-alternatives", required_argument, NULL, 0, "List all available providers for the named OCF agent" }, { "show-metadata", required_argument, NULL, 0, "Show the metadata for the named class:provider:agent" }, { "query-xml", no_argument, NULL, 'q', "\tShow XML configuration of resource (after any template expansion)" }, { "query-xml-raw", no_argument, NULL, 'w', "\tShow XML configuration of resource (before any template expansion)" }, { "get-parameter", required_argument, NULL, 'g', "Display named parameter for resource.\n" "\t\t\t\tUse instance attribute unless --meta or --utilization is specified" }, { "get-property", required_argument, NULL, 'G', "Display named property of resource ('class', 'type', or 'provider') (requires --resource)", pcmk_option_hidden }, { "locate", no_argument, NULL, 'W', "\t\tShow node(s) currently running resource" }, { "stack", no_argument, NULL, 'A', "\t\tDisplay the prerequisites and dependents of a resource" }, { "constraints", no_argument, NULL, 'a', "\tDisplay the (co)location constraints that apply to a resource" }, { "why", no_argument, NULL, 'Y', "\t\tShow why resources are not running, optionally filtered by --resource and/or --node" }, { "-spacer-", no_argument, NULL, '-', "\nCommands:" }, { "validate", no_argument, NULL, 0, "\t\tCall the validate-all action of the local given resource" }, { "cleanup", no_argument, NULL, 'C', - "\t\tDelete failed operations from a resource's history allowing its current state to be rechecked.\n" + "\t\tIf resource has any past failures, clear its history and fail count.\n" "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" + "\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n" + "\t\t\t\tto allow current state to be rechecked.\n" }, { "refresh", no_argument, NULL, 'R', "\t\tDelete resource's history (including failures) so its current state is rechecked.\n" - "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" - "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be cleaned" + "\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n" + "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed." }, { "set-parameter", required_argument, NULL, 'p', "Set named parameter for resource (requires -v).\n" "\t\t\t\tUse instance attribute unless --meta or --utilization is specified." }, { "delete-parameter", required_argument, NULL, 'd', "Delete named parameter for resource.\n" "\t\t\t\tUse instance attribute unless --meta or --utilization is specified." }, { "set-property", required_argument, NULL, 'S', "Set named property of resource ('class', 'type', or 'provider') (requires -r, -t, -v)", pcmk_option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nResource location:" }, { "move", no_argument, NULL, 'M', "\t\tCreate a constraint to move resource. If --node is specified, the constraint\n" "\t\t\t\twill be to move to that node, otherwise it will be to ban the current node.\n" "\t\t\t\tUnless --force is specified, this will return an error if the resource is\n" "\t\t\t\talready running on the specified node. If --force is specified, this will\n" "\t\t\t\talways ban the current node. Optional: --lifetime, --master.\n" "\t\t\t\tNOTE: This may prevent the resource from running on its previous location\n" "\t\t\t\tuntil the implicit constraint expires or is removed with --clear." }, { "ban", no_argument, NULL, 'B', "\t\tCreate a constraint to keep resource off a node. Optional: --node, --lifetime, --master.\n" "\t\t\t\tNOTE: This will prevent the resource from running on the affected node\n" "\t\t\t\tuntil the implicit constraint expires or is removed with --clear.\n" "\t\t\t\tIf --node is not specified, it defaults to the node currently running the resource\n" "\t\t\t\tfor primitives and groups, or the master for master/slave clones with master-max=1\n" "\t\t\t\t(all other situations result in an error as there is no sane default).\n" }, { "clear", no_argument, NULL, 'U', "\t\tRemove all constraints created by the --ban and/or --move commands.\n" "\t\t\t\tRequires: --resource. Optional: --node, --master.\n" "\t\t\t\tIf --node is not specified, all constraints created by --ban and --move\n" "\t\t\t\twill be removed for the named resource. If --node and --force are specified,\n" "\t\t\t\tany constraint created by --move will be cleared, even if it is not for the specified node." }, { "lifetime", required_argument, NULL, 'u', "\tLifespan (as ISO 8601 duration) of created constraints (with -B, -M)\n" "\t\t\t\t(see https://en.wikipedia.org/wiki/ISO_8601#Durations)" }, { "master", no_argument, NULL, 0, "\t\tLimit scope of command to the Master role (with -B, -M, -U).\n" "\t\t\t\tFor -B and -M, the previous master may remain active in the Slave role." }, { "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:" }, { "delete", no_argument, NULL, 'D', "\t\t(Advanced) Delete a resource from the CIB. Required: -t" }, { "fail", no_argument, NULL, 'F', "\t\t(Advanced) Tell the cluster this resource has failed" }, { "restart", no_argument, NULL, 0, "\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it" }, { "wait", no_argument, NULL, 0, "\t\t(Advanced) Wait until the cluster settles into a stable state" }, { "force-demote", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and demote a resource on the local node.\n" "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n" "\t\t\t\tbelieves the resource is a clone instance already running on the local node." }, { "force-stop", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and stop a resource on the local node." }, { "force-start", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and start a resource on the local node.\n" "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n" "\t\t\t\tbelieves the resource is a clone instance already running on the local node." }, { "force-promote", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and promote a resource on the local node.\n" "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n" "\t\t\t\tbelieves the resource is a clone instance already running on the local node." }, { "force-check", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and check the state of a resource on the local node." }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:" }, { "node", required_argument, NULL, 'N', "\tNode name" }, { "recursive", no_argument, NULL, 0, "\tFollow colocation chains when using --set-parameter" }, { "resource-type", required_argument, NULL, 't', "Resource XML element (primitive, group, etc.) (with -D)" }, { "parameter-value", required_argument, NULL, 'v', "Value to use with -p" }, { "meta", no_argument, NULL, 'm', "\t\tUse resource meta-attribute instead of instance attribute (with -p, -g, -d)" }, { "utilization", no_argument, NULL, 'z', "\tUse resource utilization attribute instead of instance attribute (with -p, -g, -d)" }, { "operation", required_argument, NULL, 'n', "\tOperation to clear instead of all (with -C -r)" }, { "interval", required_argument, NULL, 'I', "\tInterval of operation to clear (default 0) (with -C -r -n)" }, { "set-name", required_argument, NULL, 's', "\t(Advanced) XML ID of attributes element to use (with -p, -d)" }, { "nvpair", required_argument, NULL, 'i', "\t(Advanced) XML ID of nvpair element to use (with -p, -d)" }, { "timeout", required_argument, NULL, 'T', "\t(Advanced) Abort if command does not finish in this time (with --restart, --wait, --force-*)" }, { "force", no_argument, NULL, 'f', "\t\tIf making CIB changes, do so regardless of quorum.\n" "\t\t\t\tSee help for individual commands for additional behavior.\n" }, { "xml-file", required_argument, NULL, 'x', NULL, pcmk_option_hidden }, /* legacy options */ {"host-uname", required_argument, NULL, 'H', NULL, pcmk_option_hidden}, {"migrate", no_argument, NULL, 'M', NULL, pcmk_option_hidden}, {"un-migrate", no_argument, NULL, 'U', NULL, pcmk_option_hidden}, {"un-move", no_argument, NULL, 'U', NULL, pcmk_option_hidden}, {"reprobe", no_argument, NULL, 'P', NULL, pcmk_option_hidden}, {"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', "List the available OCF agents:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Move 'myResource' to a specific node:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --clear", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', "The cluster will not attempt to start or stop the resource under any circumstances."}, {"-spacer-", 1, NULL, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."}, {"-spacer-", 1, NULL, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example}, {0, 0, 0, 0} }; /* *INDENT-ON* */ int main(int argc, char **argv) { char rsc_cmd = 'L'; const char *rsc_id = NULL; const char *host_uname = NULL; const char *prop_name = NULL; const char *prop_value = NULL; const char *rsc_type = NULL; const char *prop_id = NULL; const char *prop_set = NULL; const char *rsc_long_cmd = NULL; const char *longname = NULL; const char *operation = NULL; const char *interval = NULL; const char *cib_file = getenv("CIB_file"); GHashTable *override_params = NULL; char *xml_file = NULL; crm_ipc_t *crmd_channel = NULL; pe_working_set_t data_set = { 0, }; cib_t *cib_conn = NULL; resource_t *rsc = NULL; bool recursive = FALSE; char *our_pid = NULL; bool require_resource = TRUE; /* whether command requires that resource be specified */ bool require_dataset = TRUE; /* whether command requires populated dataset instance */ bool require_crmd = FALSE; /* whether command requires connection to CRMd */ - bool just_errors = TRUE; /* whether cleanup command deletes all history or just errors */ int rc = pcmk_ok; int is_ocf_rc = 0; int option_index = 0; int timeout_ms = 0; int argerr = 0; int flag; int find_flags = 0; // Flags to use when searching for resource crm_log_cli_init("crm_resource"); crm_set_options(NULL, "(query|command) [options]", long_options, "Perform tasks related to cluster resources.\nAllows resources to be queried (definition and location), modified, and moved around the cluster.\n"); while (1) { flag = crm_get_option_long(argc, argv, &option_index, &longname); if (flag == -1) break; switch (flag) { case 0: /* long options with no short equivalent */ if (safe_str_eq("master", longname)) { scope_master = TRUE; } else if(safe_str_eq(longname, "recursive")) { recursive = TRUE; } else if (safe_str_eq("wait", longname)) { rsc_cmd = flag; rsc_long_cmd = longname; require_resource = FALSE; require_dataset = FALSE; } else if ( safe_str_eq("validate", longname) || safe_str_eq("restart", longname) || safe_str_eq("force-demote", longname) || safe_str_eq("force-stop", longname) || safe_str_eq("force-start", longname) || safe_str_eq("force-promote", longname) || safe_str_eq("force-check", longname)) { rsc_cmd = flag; rsc_long_cmd = longname; find_flags = pe_find_renamed|pe_find_anon; crm_log_args(argc, argv); } else if (safe_str_eq("list-ocf-providers", longname) || safe_str_eq("list-ocf-alternatives", longname) || safe_str_eq("list-standards", longname)) { const char *text = NULL; lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); if (safe_str_eq("list-ocf-providers", longname) || safe_str_eq("list-ocf-alternatives", longname)) { rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list); text = "OCF providers"; } else if (safe_str_eq("list-standards", longname)) { rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); text = "standards"; } if (rc > 0) { rc = 0; for (iter = list; iter != NULL; iter = iter->next) { rc++; printf("%s\n", iter->val); } lrmd_list_freeall(list); } else if (optarg) { fprintf(stderr, "No %s found for %s\n", text, optarg); } else { fprintf(stderr, "No %s found\n", text); } lrmd_api_delete(lrmd_conn); return crm_exit(rc); } else if (safe_str_eq("show-metadata", longname)) { char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); rc = crm_parse_agent_spec(optarg, &standard, &provider, &type); if (rc == pcmk_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); } else { fprintf(stderr, "'%s' is not a valid agent specification\n", optarg); } if (metadata) { printf("%s\n", metadata); } else { fprintf(stderr, "Metadata query for %s failed: %s\n", optarg, pcmk_strerror(rc)); } lrmd_api_delete(lrmd_conn); return crm_exit(rc); } else if (safe_str_eq("list-agents", longname)) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; char *provider = strchr (optarg, ':'); lrmd_t *lrmd_conn = lrmd_api_new(); if (provider) { *provider++ = 0; } rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider); if (rc > 0) { rc = 0; for (iter = list; iter != NULL; iter = iter->next) { printf("%s\n", iter->val); rc++; } lrmd_list_freeall(list); rc = 0; } else { fprintf(stderr, "No agents found for standard=%s, provider=%s\n", optarg, (provider? provider : "*")); rc = -1; } lrmd_api_delete(lrmd_conn); return crm_exit(rc); } else { crm_err("Unhandled long option: %s", longname); } break; case 'V': resource_verbose++; crm_bump_log_level(argc, argv); break; case '$': case '?': crm_help(flag, EX_OK); break; case 'x': xml_file = strdup(optarg); break; case 'Q': BE_QUIET = TRUE; break; case 'm': attr_set_type = XML_TAG_META_SETS; break; case 'z': attr_set_type = XML_TAG_UTILIZATION; break; case 'u': move_lifetime = strdup(optarg); break; case 'f': do_force = TRUE; crm_log_args(argc, argv); break; case 'i': prop_id = optarg; break; case 's': prop_set = optarg; break; case 'r': rsc_id = optarg; break; case 'v': prop_value = optarg; break; case 't': rsc_type = optarg; break; case 'T': timeout_ms = crm_get_msec(optarg); break; case 'R': case 'P': crm_log_args(argc, argv); require_resource = FALSE; if (cib_file == NULL) { require_crmd = TRUE; } - just_errors = FALSE; - rsc_cmd = 'C'; + rsc_cmd = 'R'; find_flags = pe_find_renamed|pe_find_anon; break; case 'C': crm_log_args(argc, argv); require_resource = FALSE; if (cib_file == NULL) { require_crmd = TRUE; } - just_errors = TRUE; rsc_cmd = 'C'; find_flags = pe_find_renamed|pe_find_anon; break; case 'n': operation = optarg; break; case 'I': interval = optarg; break; case 'D': require_dataset = FALSE; crm_log_args(argc, argv); rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'F': require_crmd = TRUE; crm_log_args(argc, argv); rsc_cmd = flag; break; case 'U': case 'B': case 'M': crm_log_args(argc, argv); rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'c': case 'L': case 'l': case 'O': case 'o': require_resource = FALSE; rsc_cmd = flag; break; case 'Y': require_resource = FALSE; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'q': case 'w': rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'W': case 'A': case 'a': rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'j': print_pending = TRUE; break; case 'S': require_dataset = FALSE; crm_log_args(argc, argv); prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'p': case 'd': crm_log_args(argc, argv); prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'G': case 'g': prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'h': case 'H': case 'N': crm_trace("Option %c => %s", flag, optarg); host_uname = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag); ++argerr; break; } } // Catch the case where the user didn't specify a command if (rsc_cmd == 'L') { require_resource = FALSE; } if (optind < argc && argv[optind] != NULL && rsc_cmd == 0 && rsc_long_cmd) { override_params = crm_str_table_new(); while (optind < argc && argv[optind] != NULL) { char *name = calloc(1, strlen(argv[optind])); char *value = calloc(1, strlen(argv[optind])); int rc = sscanf(argv[optind], "%[^=]=%s", name, value); if(rc == 2) { g_hash_table_replace(override_params, name, value); } else { CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd); free(value); free(name); argerr++; } optind++; } } else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) { CMD_ERR("non-option ARGV-elements: "); while (optind < argc && argv[optind] != NULL) { CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]); optind++; argerr++; } } if (optind > argc) { ++argerr; } if (argerr) { CMD_ERR("Invalid option(s) supplied, use --help for valid usage"); return crm_exit(EX_USAGE); } our_pid = crm_getpid_s(); if (do_force) { crm_debug("Forcing..."); cib_options |= cib_quorum_override; } data_set.input = NULL; /* make clean-up easier */ if (require_resource && !rsc_id) { CMD_ERR("Must supply a resource id with -r"); rc = -ENXIO; goto bail; } if (find_flags && rsc_id) { require_dataset = TRUE; } /* Establish a connection to the CIB */ cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc != pcmk_ok) { CMD_ERR("Error signing on to the CIB service: %s", pcmk_strerror(rc)); goto bail; } /* Populate working set from XML file if specified or CIB query otherwise */ if (require_dataset) { xmlNode *cib_xml_copy = NULL; if (xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); } if(rc != pcmk_ok) { goto bail; } /* Populate the working set instance */ set_working_set_defaults(&data_set); rc = update_working_set_xml(&data_set, &cib_xml_copy); if (rc != pcmk_ok) { goto bail; } cluster_status(&data_set); } // If command requires that resource exist if specified, find it if (find_flags && rsc_id) { rsc = pe_find_resource_with_flags(data_set.resources, rsc_id, find_flags); if (rsc == NULL) { CMD_ERR("Resource '%s' not found", rsc_id); rc = -ENXIO; goto bail; } } /* Establish a connection to the CRMd if needed */ if (require_crmd) { xmlNode *xml = NULL; mainloop_io_t *source = mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks); crmd_channel = mainloop_get_ipc_client(source); if (crmd_channel == NULL) { CMD_ERR("Error signing on to the CRMd service"); rc = -ENOTCONN; goto bail; } xml = create_hello_message(our_pid, crm_system_name, "0", "1"); crm_ipc_send(crmd_channel, xml, 0, 0, NULL); free_xml(xml); } /* Handle rsc_cmd appropriately */ if (rsc_cmd == 'L') { rc = pcmk_ok; cli_resource_print_list(&data_set, FALSE); } else if (rsc_cmd == 'l') { int found = 0; GListPtr lpc = NULL; rc = pcmk_ok; for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { rsc = (resource_t *) lpc->data; found++; cli_resource_print_raw(rsc); } if (found == 0) { printf("NO resources configured\n"); rc = -ENXIO; } } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) { rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn); } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) { rc = wait_till_stable(timeout_ms, cib_conn); } else if (rsc_cmd == 0 && rsc_long_cmd) { // validate, force-(stop|start|demote|promote|check) rc = cli_resource_execute(rsc, rsc_id, rsc_long_cmd, override_params, timeout_ms, cib_conn, &data_set); if (rc >= 0) { is_ocf_rc = 1; } } else if (rsc_cmd == 'A' || rsc_cmd == 'a') { GListPtr lpc = NULL; xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input); unpack_constraints(cib_constraints, &data_set); // Constraints apply to group/clone, not member/instance rsc = uber_parent(rsc); for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1); fprintf(stdout, "* %s\n", rsc->id); cli_resource_print_location(rsc, NULL); for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1); } else if (rsc_cmd == 'c') { GListPtr lpc = NULL; rc = pcmk_ok; for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { rsc = (resource_t *) lpc->data; cli_resource_print_cts(rsc); } cli_resource_print_cts_constraints(&data_set); } else if (rsc_cmd == 'F') { rc = cli_resource_fail(crmd_channel, host_uname, rsc_id, &data_set); if (rc == pcmk_ok) { start_mainloop(); } } else if (rsc_cmd == 'O') { rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, &data_set); } else if (rsc_cmd == 'o') { rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, &data_set); } else if (rsc_cmd == 'W') { rc = cli_resource_search(rsc, rsc_id, &data_set); if (rc >= 0) { rc = pcmk_ok; } } else if (rsc_cmd == 'q') { rc = cli_resource_print(rsc, &data_set, TRUE); } else if (rsc_cmd == 'w') { rc = cli_resource_print(rsc, &data_set, FALSE); } else if (rsc_cmd == 'Y') { node_t *dest = NULL; if (host_uname) { dest = pe_find_node(data_set.nodes, host_uname); if (dest == NULL) { CMD_ERR("Unknown node: %s", host_uname); rc = -ENXIO; goto bail; } } cli_resource_why(cib_conn, data_set.resources, rsc, dest); rc = pcmk_ok; } else if (rsc_cmd == 'U') { node_t *dest = NULL; if (host_uname) { dest = pe_find_node(data_set.nodes, host_uname); if (dest == NULL) { CMD_ERR("Unknown node: %s", host_uname); rc = -ENXIO; goto bail; } rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn); } else { rc = cli_resource_clear(rsc_id, NULL, data_set.nodes, cib_conn); } } else if (rsc_cmd == 'M' && host_uname) { rc = cli_resource_move(rsc, rsc_id, host_uname, cib_conn, &data_set); } else if (rsc_cmd == 'B' && host_uname) { node_t *dest = pe_find_node(data_set.nodes, host_uname); if (dest == NULL) { CMD_ERR("Error performing operation: node '%s' is unknown", host_uname); rc = -ENXIO; goto bail; } rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn); } else if (rsc_cmd == 'B' || rsc_cmd == 'M') { rc = -EINVAL; if (g_list_length(rsc->running_on) == 1) { node_t *current = rsc->running_on->data; rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn); } else if(rsc->variant == pe_master) { int count = 0; GListPtr iter = NULL; node_t *current = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { count++; current = child->running_on->data; } } if(count == 1 && current) { rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn); } else { CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).", rsc_id, g_list_length(rsc->running_on), count); CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node ", rsc_id); CMD_ERR("You can prevent '%s' from being promoted at a specific location with:" " --ban --master --node ", rsc_id); } } else { CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, g_list_length(rsc->running_on)); CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node ", rsc_id); } } else if (rsc_cmd == 'G') { rc = cli_resource_print_property(rsc, prop_name, &data_set); } else if (rsc_cmd == 'S') { xmlNode *msg_data = NULL; if ((rsc_type == NULL) || !strlen(rsc_type)) { CMD_ERR("Must specify -t with resource type"); rc = -ENXIO; goto bail; } else if ((prop_value == NULL) || !strlen(prop_value)) { CMD_ERR("Must supply -v with new value"); rc = -EINVAL; goto bail; } CRM_LOG_ASSERT(prop_name != NULL); msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); crm_xml_add(msg_data, prop_name, prop_value); rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else if (rsc_cmd == 'g') { rc = cli_resource_print_attribute(rsc, prop_name, &data_set); } else if (rsc_cmd == 'p') { if (prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option"); rc = -EINVAL; goto bail; } /* coverity[var_deref_model] False positive */ rc = cli_resource_update_attribute(rsc, rsc_id, prop_set, prop_id, prop_name, prop_value, recursive, cib_conn, &data_set); } else if (rsc_cmd == 'd') { /* coverity[var_deref_model] False positive */ rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id, prop_name, cib_conn, &data_set); - } else if (rsc_cmd == 'C' && just_errors) { + } else if (rsc_cmd == 'C') { crmd_replies_needed = 0; for (xmlNode *xml_op = __xml_first_child(data_set.failed); xml_op != NULL; xml_op = __xml_next(xml_op)) { const char *node = crm_element_value(xml_op, XML_ATTR_UNAME); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); - if(resource_name == NULL) { + if (resource_name == NULL) { continue; } else if(host_uname && safe_str_neq(host_uname, node)) { continue; - } else if(rsc_id && safe_str_neq(rsc_id, resource_name)) { - continue; } else if(operation && safe_str_neq(operation, task)) { continue; } else if(interval && safe_str_neq(interval, task_interval)) { continue; } + if (rsc_id) { + resource_t *fail_rsc = pe_find_resource_with_flags(data_set.resources, + resource_name, + find_flags); + + if (!fail_rsc || safe_str_neq(rsc->id, fail_rsc->id)) { + continue; + } + } + crm_debug("Erasing %s failure for %s (%s detected) on %s", task, rsc->id, resource_name, node); rc = cli_resource_delete(crmd_channel, node, rsc, task, task_interval, &data_set); } if(rsc && (rc == pcmk_ok) && (BE_QUIET == FALSE)) { /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */ cli_resource_check(cib_conn, rsc); } if (rc == pcmk_ok) { start_mainloop(); } - } else if ((rsc_cmd == 'C') && rsc) { + } else if ((rsc_cmd == 'R') && rsc) { if(do_force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, rsc_id, host_uname); crmd_replies_needed = 0; - rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation, - interval, &data_set); + rc = cli_resource_delete(crmd_channel, host_uname, rsc, NULL, 0, + &data_set); if(rc == pcmk_ok && BE_QUIET == FALSE) { /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */ cli_resource_check(cib_conn, rsc); } if (rc == pcmk_ok) { start_mainloop(); } - } else if (rsc_cmd == 'C') { + } else if (rsc_cmd == 'R') { #if HAVE_ATOMIC_ATTRD const char *router_node = host_uname; xmlNode *msg_data = NULL; xmlNode *cmd = NULL; int attr_options = attrd_opt_none; if (host_uname) { node_t *node = pe_find_node(data_set.nodes, host_uname); if (node && is_remote_node(node)) { if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) { CMD_ERR("No lrmd connection detected to remote node %s", host_uname); rc = -ENXIO; goto bail; } node = node->details->remote_rsc->running_on->data; router_node = node->details->uname; attr_options |= attrd_opt_remote; } } if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s due to CIB_file\n", host_uname? host_uname : "all nodes"); rc = pcmk_ok; goto bail; } msg_data = create_xml_node(NULL, "crm-resource-reprobe-op"); crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname); if (safe_str_neq(router_node, host_uname)) { crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); } cmd = create_request(CRM_OP_REPROBE, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid); free_xml(msg_data); crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes"); rc = attrd_clear_delegate(NULL, host_uname, NULL, NULL, NULL, NULL, attr_options); if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) { start_mainloop(); } free_xml(cmd); #else GListPtr rIter = NULL; crmd_replies_needed = 0; for (rIter = data_set.resources; rIter; rIter = rIter->next) { rsc = rIter->data; cli_resource_delete(crmd_channel, host_uname, rsc, NULL, NULL, &data_set); } start_mainloop(); #endif } else if (rsc_cmd == 'D') { xmlNode *msg_data = NULL; if (rsc_type == NULL) { CMD_ERR("You need to specify a resource type with -t"); rc = -ENXIO; goto bail; } msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); rc = cib_conn->cmds->delete(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else { CMD_ERR("Unknown command: %c", rsc_cmd); } bail: free(our_pid); if (data_set.input != NULL) { cleanup_alloc_calculations(&data_set); } if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } if (rc == -pcmk_err_no_quorum) { CMD_ERR("Error performing operation: %s", pcmk_strerror(rc)); CMD_ERR("Try using -f"); } else if (rc != pcmk_ok && !is_ocf_rc) { CMD_ERR("Error performing operation: %s", pcmk_strerror(rc)); } return crm_exit(rc); } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 22bdebf8e3..189d1b3e07 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,1814 +1,1825 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include int resource_verbose = 0; bool do_force = FALSE; int crmd_replies_needed = 1; /* The welcome message */ const char *attr_set_type = XML_TAG_ATTR_SETS; static int do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set) { int found = 0; GListPtr lpc = NULL; for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) { node_t *node = (node_t *) lpc->data; if (BE_QUIET) { fprintf(stdout, "%s\n", node->details->uname); } else { const char *state = ""; if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) { state = "Master"; } fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state); } found++; } if (BE_QUIET == FALSE && found == 0) { fprintf(stderr, "resource %s is NOT running\n", rsc); } return found; } int cli_resource_search(resource_t *rsc, const char *requested_name, pe_working_set_t *data_set) { int found = 0; resource_t *parent = uber_parent(rsc); if (pe_rsc_is_clone(rsc)) { for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { found += do_find_resource(requested_name, iter->data, data_set); } /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) && is_not_set(rsc->flags, pe_rsc_unique) && rsc->clone_name && safe_str_eq(requested_name, rsc->clone_name) && safe_str_neq(requested_name, rsc->id)) { for (GListPtr iter = parent->children; iter; iter = iter->next) { found += do_find_resource(requested_name, iter->data, data_set); } } else { found += do_find_resource(requested_name, rsc, data_set); } return found; } #define XPATH_MAX 1024 static int find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; if(value) { *value = NULL; } if(the_cib == NULL) { return -ENOTCONN; } xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources")); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc); if (set_type) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type); if (set_name) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name); } } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair["); if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id); } if (attr_name) { if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and "); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]"); CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { xmlNode *child = NULL; rc = -EINVAL; printf("Multiple attributes match name=%s\n", attr_name); for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) { printf(" Value: %s \t(id=%s)\n", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } } else if(value) { const char *tmp = crm_element_value(xml_search, attr); if (tmp) { *value = strdup(tmp); } } bail: free(xpath_string); free_xml(xml_search); return rc; } static resource_t * find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd) { int rc = pcmk_ok; char *lookup_id = NULL; char *local_attr_id = NULL; if(do_force == TRUE) { return rsc; } else if(rsc->parent) { switch(rsc->parent->variant) { case pe_group: if (BE_QUIET == FALSE) { printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id); } break; case pe_master: case pe_clone: rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); free(local_attr_id); if(rc != pcmk_ok) { rsc = rsc->parent; if (BE_QUIET == FALSE) { printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id); } } break; default: break; } } else if (rsc->parent && BE_QUIET == FALSE) { printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id); } else if(rsc->parent == NULL && rsc->children) { resource_t *child = rsc->children->data; if(child->variant == pe_native) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == pcmk_ok) { rsc = child; if (BE_QUIET == FALSE) { printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id); } } free(local_attr_id); free(lookup_id); } } return rsc; } int cli_resource_update_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, const char *attr_value, bool recursive, cib_t *cib, pe_working_set_t *data_set) { int rc = pcmk_ok; static bool need_init = TRUE; char *lookup_id = NULL; char *local_attr_id = NULL; char *local_attr_set = NULL; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; bool use_attributes_tag = FALSE; if(attr_id == NULL && do_force == FALSE && pcmk_ok != find_resource_attr( cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL)) { printf("\n"); } if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { if (do_force == FALSE) { rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", uber_parent(rsc)->id, attr_name, local_attr_id); printf(" Delete '%s' first or use --force to override\n", local_attr_id); } free(local_attr_id); if (rc == pcmk_ok) { return -ENOTUNIQ; } } } else { rsc = find_matching_attr_resource(rsc, requested_name, attr_set, attr_id, attr_name, cib, "update"); } lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_ok) { crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); attr_id = local_attr_id; } else if (rc != -ENXIO) { free(lookup_id); free(local_attr_id); return rc; } else { const char *value = NULL; xmlNode *cib_top = NULL; const char *tag = crm_element_name(rsc->xml); cib->cmds->query(cib, "/cib", &cib_top, cib_sync_call | cib_scope_local | cib_xpath | cib_no_children); value = crm_element_value(cib_top, "ignore_dtd"); if (value != NULL) { use_attributes_tag = TRUE; } else { value = crm_element_value(cib_top, XML_ATTR_VALIDATION); if (crm_ends_with_ext(value, "-0.6")) { use_attributes_tag = TRUE; } } free_xml(cib_top); if (attr_set == NULL) { local_attr_set = crm_concat(lookup_id, attr_set_type, '-'); attr_set = local_attr_set; } if (attr_id == NULL) { local_attr_id = crm_concat(attr_set, attr_name, '-'); attr_id = local_attr_id; } if (use_attributes_tag && safe_str_eq(tag, XML_CIB_TAG_MASTER)) { tag = "master_slave"; /* use the old name */ } xml_top = create_xml_node(NULL, tag); crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); if (use_attributes_tag) { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTRS); } } xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("Set '%s' option: id=%s%s%s%s%s=%s\n", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); } free_xml(xml_top); free(lookup_id); free(local_attr_id); free(local_attr_set); if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { GListPtr lpc = NULL; if(need_init) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); need_init = FALSE; unpack_constraints(cib_constraints, data_set); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } } crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs); set_bit(rsc->flags, pe_rsc_allocating); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; resource_t *peer = cons->rsc_lh; crm_debug("Checking %s %d", cons->id, cons->score); if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) { /* Don't get into colocation loops */ crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id); cli_resource_update_attribute(peer, peer->id, NULL, NULL, attr_name, attr_value, recursive, cib, data_set); } } } return rc; } int cli_resource_delete_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, cib_t *cib, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; int rc = pcmk_ok; char *lookup_id = NULL; char *local_attr_id = NULL; if(attr_id == NULL && do_force == FALSE && find_resource_attr( cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) != pcmk_ok) { printf("\n"); } if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { rsc = find_matching_attr_resource(rsc, requested_name, attr_set, attr_id, attr_name, cib, "delete"); } lookup_id = clone_strip(rsc->id); rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == -ENXIO) { free(lookup_id); return pcmk_ok; } else if (rc != pcmk_ok) { free(lookup_id); return rc; } if (attr_id == NULL) { attr_id = local_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); CRM_ASSERT(cib); rc = cib->cmds->delete(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : ""); } free(lookup_id); free_xml(xml_obj); free(local_attr_id); return rc; } static int send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, const char *host_uname, const char *rsc_id, bool only_failed, pe_working_set_t * data_set) { char *our_pid = NULL; char *key = NULL; int rc = -ECOMM; xmlNode *cmd = NULL; xmlNode *xml_rsc = NULL; const char *value = NULL; const char *router_node = host_uname; xmlNode *params = NULL; xmlNode *msg_data = NULL; resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { CMD_ERR("Resource %s not found", rsc_id); return -ENXIO; } else if (rsc->variant != pe_native) { CMD_ERR("We can only process primitive resources, not %s", rsc_id); return -EINVAL; } else if (host_uname == NULL) { CMD_ERR("Please supply a node name with --node"); return -EINVAL; } else { node_t *node = pe_find_node(data_set->nodes, host_uname); if (node && is_remote_node(node)) { if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) { CMD_ERR("No lrmd connection detected to remote node %s", host_uname); return -ENXIO; } node = node->details->remote_rsc->running_on->data; router_node = node->details->uname; } } key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); free(key); crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname); if (safe_str_neq(router_node, host_uname)) { crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); } xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); if (rsc->clone_name) { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id); } else { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id); } value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE); if (value == NULL) { CMD_ERR("%s has no type! Aborting...", rsc_id); return -ENXIO; } value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS); if (value == NULL) { CMD_ERR("%s has no class! Aborting...", rsc_id); return -ENXIO; } crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER); params = create_xml_node(msg_data, XML_TAG_ATTRS); crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); key = crm_meta_name(XML_LRM_ATTR_INTERVAL); crm_xml_add(params, key, "60000"); /* 1 minute */ free(key); our_pid = crm_getpid_s(); cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid); /* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */ free_xml(msg_data); if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) { rc = 0; } else { crm_debug("Could not send %s op to the crmd", op); rc = -ENOTCONN; } free_xml(cmd); return rc; } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); } int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, resource_t *rsc, const char *operation, const char *interval, pe_working_set_t *data_set) { int rc = pcmk_ok; node_t *node = NULL; char *rsc_name = NULL; int attr_options = attrd_opt_none; if (rsc == NULL) { return -ENXIO; } else if (rsc->children) { GListPtr lpc = NULL; for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { resource_t *child = (resource_t *) lpc->data; rc = cli_resource_delete(crmd_channel, host_uname, child, operation, interval, data_set); if(rc != pcmk_ok) { return rc; } } return pcmk_ok; } else if (host_uname == NULL) { GListPtr lpc = NULL; GListPtr nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && do_force) { nodes = node_list_dup(data_set->nodes, FALSE, FALSE); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { if(node->weight >= 0) { nodes = g_list_prepend(nodes, node); } } } else if(nodes == NULL) { nodes = g_hash_table_get_values(rsc->allowed_nodes); } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { node = (node_t *) lpc->data; if (node->details->online) { cli_resource_delete(crmd_channel, node->details->uname, rsc, operation, interval, data_set); } } g_list_free(nodes); return pcmk_ok; } node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { printf("Unable to clean up %s because node %s not found\n", rsc->id, host_uname); return -ENODEV; } if (!node->details->rsc_discovery_enabled) { printf("Unable to clean up %s because resource discovery disabled on %s\n", rsc->id, host_uname); return -EOPNOTSUPP; } if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n", rsc->id, host_uname); return rc; } /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id, TRUE, data_set); if (rc != pcmk_ok) { printf("Unable to clean up %s history on %s: %s\n", rsc->id, host_uname, pcmk_strerror(rc)); return rc; } crmd_replies_needed++; crm_trace("Processing %d mainloop inputs", crmd_replies_needed); while(g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", crmd_replies_needed); } if(crmd_replies_needed < 0) { crmd_replies_needed = 0; } rsc_name = rsc_fail_name(rsc); if (is_remote_node(node)) { attr_options |= attrd_opt_remote; } rc = attrd_clear_delegate(NULL, host_uname, rsc_name, operation, interval, NULL, attr_options); if (rc != pcmk_ok) { printf("Cleaned %s history on %s, but unable to clear failures: %s\n", rsc->id, host_uname, pcmk_strerror(rc)); } else { printf("Cleaned up %s on %s\n", rsc->id, host_uname); } free(rsc_name); return rc; } void cli_resource_check(cib_t * cib_conn, resource_t *rsc) { int need_nl = 0; char *role_s = NULL; char *managed = NULL; resource_t *parent = uber_parent(rsc); find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); if(role_s) { enum rsc_role_e role = text2role(role_s); if(role == RSC_ROLE_UNKNOWN) { // Treated as if unset } else if(role == RSC_ROLE_STOPPED) { printf("\n * The configuration specifies that '%s' should remain stopped\n", parent->id); need_nl++; } else if(parent->variant == pe_master && role == RSC_ROLE_SLAVE) { printf("\n * The configuration specifies that '%s' should not be promoted\n", parent->id); need_nl++; } } if(managed && crm_is_true(managed) == FALSE) { printf("%s * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id); need_nl++; } if(need_nl) { printf("\n"); } } int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set) { crm_warn("Failing: %s", rsc_id); return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set); } static GHashTable * generate_resource_params(resource_t * rsc, pe_working_set_t * data_set) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; if (!rsc) { crm_err("Resource does not exist in config"); return NULL; } params = crm_str_table_new(); meta = crm_str_table_new(); combined = crm_str_table_new(); get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set); get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set); if (params) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { g_hash_table_insert(combined, strdup(key), strdup(value)); } g_hash_table_destroy(params); } if (meta) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } static bool resource_is_running_on(resource_t *rsc, const char *host) { bool found = TRUE; GListPtr hIter = NULL; GListPtr hosts = NULL; if(rsc == NULL) { return FALSE; } rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pe_node_t *node = (pe_node_t *) hIter->data; if(strcmp(host, node->details->uname) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } else if(strcmp(host, node->details->id) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if(host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = FALSE; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = FALSE; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { resource_t *rsc = (resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (rsc->variant == pe_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static GList* subtract_lists(GList *from, GList *items) { GList *item = NULL; GList *result = g_list_copy(from); for (item = items; item != NULL; item = item->next) { GList *candidate = NULL; for (candidate = from; candidate != NULL; candidate = candidate->next) { crm_info("Comparing %s with %s", candidate->data, item->data); if(strcmp(candidate->data, item->data) == 0) { result = g_list_remove(result, candidate->data); break; } } } return result; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { fprintf(stdout, "%s%s\n", tag, (const char *)item->data); } } /*! * \internal * \brief Upgrade XML to latest schema version and use it as working set input * * This also updates the working set timestamp to the current time. * * \param[in] data_set Working set instance to update * \param[in] xml XML to use as input * * \return pcmk_ok on success, -ENOKEY if unable to upgrade XML * \note On success, caller is responsible for freeing memory allocated for * data_set->now. * \todo This follows the example of other callers of cli_config_update() * and returns -ENOKEY ("Required key not available") if that fails, * but perhaps -pcmk_err_schema_validation would be better in that case. */ int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return -ENOKEY; } data_set->input = *xml; data_set->now = crm_time_new(NULL); return pcmk_ok; } /*! * \internal * \brief Update a working set's XML input based on a CIB query * * \param[in] data_set Data set instance to initialize * \param[in] cib Connection to the CIB * * \return pcmk_ok on success, -errno on failure * \note On success, caller is responsible for freeing memory allocated for * data_set->input and data_set->now. */ static int update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc; rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc); return rc; } rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_ok) { fprintf(stderr, "Could not upgrade the current CIB XML\n"); free_xml(cib_xml_copy); return rc; } return pcmk_ok; } static int update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc; cleanup_alloc_calculations(data_set); rc = update_working_set_from_cib(data_set, cib); if (rc != pcmk_ok) { return rc; } if(simulate) { pid = crm_getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { fprintf(stderr, "Could not create shadow cib: '%s'\n", pid); rc = -ENXIO; goto cleanup; } rc = write_xml_file(data_set->input, shadow_file, FALSE); if (rc < 0) { fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); goto cleanup; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); if(rc != pcmk_ok) { fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); goto cleanup; } do_calculations(data_set, data_set->input, NULL); run_simulation(data_set, shadow_cib, NULL, TRUE); rc = update_dataset(shadow_cib, data_set, FALSE); } else { cluster_status(data_set); } cleanup: /* Do not free data_set->input here, we need rsc->xml to be valid later on */ cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } static int max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc) { int delay = 0; int max_delay = 0; if(rsc && rsc->children) { GList *iter = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; delay = max_delay_for_resource(data_set, child); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id); max_delay = delay; } } } else if(rsc) { char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP); action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set); const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT); max_delay = crm_int_helper(value, NULL); pe_free_action(stop); } return max_delay; } static int max_delay_in(pe_working_set_t * data_set, GList *resources) { int max_delay = 0; GList *item = NULL; for (item = resources; item != NULL; item = item->next) { int delay = 0; resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data); delay = max_delay_for_resource(data_set, rsc); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id); max_delay = delay; } } return 5 + (max_delay / 1000); } #define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \ (resource_is_running_on((r), (h)) == FALSE)) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in] rsc The resource to restart * \param[in] host The host to restart the resource on (or NULL for all) * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but a two-second * granularity is actually used; if 0, a timeout will be * calculated based on the resource timeout) * \param[in] cib Connection to the CIB for modifying/checking resource * * \return pcmk_ok on success, -errno on failure (exits on certain failures) */ int cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib) { int rc = 0; int lpc = 0; int before = 0; int step_timeout_s = 0; int sleep_interval = 2; int timeout = timeout_ms / 1000; bool stop_via_ban = FALSE; char *rsc_id = NULL; char *orig_target_role = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pe_working_set_t data_set; if(resource_is_running_on(rsc, host) == FALSE) { const char *id = rsc->clone_name?rsc->clone_name:rsc->id; if(host) { printf("%s is not running on %s and so cannot be restarted\n", id, host); } else { printf("%s is not running anywhere and so cannot be restarted\n", id); } return -ENXIO; } /* We might set the target-role meta-attribute */ attr_set_type = XML_TAG_META_SETS; rsc_id = strdup(rsc->id); if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) { stop_via_ban = TRUE; } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ set_working_set_defaults(&data_set); rc = update_dataset(cib, &data_set, FALSE); if(rc != pcmk_ok) { fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc); free(rsc_id); return rc; } restart_target_active = get_active_resources(host, data_set.resources); current_active = get_active_resources(host, data_set.resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ BE_QUIET = TRUE; rc = cli_resource_ban(rsc_id, host, NULL, cib); } else { /* Stop the resource by setting target-role to Stopped. * Remember any existing target-role so we can restore it later * (though it only makes any difference if it's Slave). */ char *lookup_id = clone_strip(rsc->id); find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); free(lookup_id); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, RSC_STOPPED, FALSE, cib, &data_set); } if(rc != pcmk_ok) { fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); if (current_active) { g_list_free_full(current_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } free(rsc_id); return crm_exit(rc); } rc = update_dataset(cib, &data_set, TRUE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources would be stopped\n"); goto failure; } target_active = get_active_resources(host, data_set.resources); dump_list(target_active, "Target"); list_delta = subtract_lists(current_active, target_active); fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta)); display_list(list_delta, " * "); step_timeout_s = timeout / sleep_interval; while(g_list_length(list_delta) > 0) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, &data_set, FALSE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources were stopped\n"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, data_set.resources); g_list_free(list_delta); list_delta = subtract_lists(current_active, target_active); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); display_list(list_delta, " * "); rc = -ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(rsc_id, host, NULL, cib); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, &data_set); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set); } if(rc != pcmk_ok) { fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); free(rsc_id); return crm_exit(rc); } if (target_active) { g_list_free_full(target_active, free); } target_active = restart_target_active; if (list_delta) { g_list_free(list_delta); } list_delta = subtract_lists(target_active, current_active); fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta)); display_list(list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, &data_set, FALSE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources were started\n"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ current_active = get_active_resources(NULL, data_set.resources); g_list_free(list_delta); list_delta = subtract_lists(target_active, current_active); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); display_list(list_delta, " * "); rc = -ETIME; goto failure; } } rc = pcmk_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(rsc_id, host, NULL, cib); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, &data_set); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set); } done: if (list_delta) { g_list_free(list_delta); } if (current_active) { g_list_free_full(current_active, free); } if (target_active && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } cleanup_alloc_calculations(&data_set); free(rsc_id); return rc; } -#define action_is_pending(action) \ - ((is_set((action)->flags, pe_action_optional) == FALSE) \ - && (is_set((action)->flags, pe_action_runnable) == TRUE) \ - && (is_set((action)->flags, pe_action_pseudo) == FALSE)) +static inline int action_is_pending(action_t *action) +{ + if(is_set(action->flags, pe_action_optional)) { + return FALSE; + } else if(is_set(action->flags, pe_action_runnable) == FALSE) { + return FALSE; + } else if(is_set(action->flags, pe_action_pseudo)) { + return FALSE; + } else if(safe_str_eq("notify", action->task)) { + return FALSE; + } + return TRUE; +} /*! * \internal * \brief Return TRUE if any actions in a list are pending * * \param[in] actions List of actions to check * * \return TRUE if any actions in the list are pending, FALSE otherwise */ static bool actions_are_pending(GListPtr actions) { GListPtr action; for (action = actions; action != NULL; action = action->next) { - if (action_is_pending((action_t *) action->data)) { + action_t *a = (action_t *)action->data; + if (action_is_pending(a)) { + crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags); return TRUE; } } return FALSE; } /*! * \internal * \brief Print pending actions to stderr * * \param[in] actions List of actions to check * * \return void */ static void print_pending_actions(GListPtr actions) { GListPtr action; fprintf(stderr, "Pending actions:\n"); for (action = actions; action != NULL; action = action->next) { action_t *a = (action_t *) action->data; if (action_is_pending(a)) { fprintf(stderr, "\tAction %d: %s", a->id, a->uuid); if (a->node) { fprintf(stderr, "\ton %s", a->node->details->uname); } fprintf(stderr, "\n"); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but one-second granularity * is actually used; if 0, a default will be used) * \param[in] cib Connection to the CIB * * \return pcmk_ok on success, -errno on failure */ int wait_till_stable(int timeout_ms, cib_t * cib) { pe_working_set_t data_set; int rc = -1; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; set_working_set_defaults(&data_set); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff > 0) { crm_info("Waiting up to %d seconds for cluster actions to complete", time_diff); } else { print_pending_actions(data_set.actions); cleanup_alloc_calculations(&data_set); return -ETIME; } if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ cleanup_alloc_calculations(&data_set); rc = update_working_set_from_cib(&data_set, cib); if (rc != pcmk_ok) { cleanup_alloc_calculations(&data_set); return rc; } do_calculations(&data_set, data_set.input, NULL); } while (actions_are_pending(data_set.actions)); return pcmk_ok; } int cli_resource_execute(resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t * cib, pe_working_set_t *data_set) { int rc = pcmk_ok; svc_action_t *op = NULL; const char *rid = NULL; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; const char *action = NULL; GHashTable *params = NULL; if (safe_str_eq(rsc_action, "validate")) { action = "validate-all"; } else if (safe_str_eq(rsc_action, "force-check")) { action = "monitor"; } else if (safe_str_eq(rsc_action, "force-stop")) { action = rsc_action+6; } else if (safe_str_eq(rsc_action, "force-start") || safe_str_eq(rsc_action, "force-demote") || safe_str_eq(rsc_action, "force-promote")) { action = rsc_action+6; if(pe_rsc_is_clone(rsc)) { rc = cli_resource_search(rsc, requested_name, data_set); if(rc > 0 && do_force == FALSE) { CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active", action, rsc->id); CMD_ERR("Try setting target-role=stopped first or specifying --force"); crm_exit(EPERM); } } } if(pe_rsc_is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->children->data; } if(rsc->variant == pe_group) { CMD_ERR("Sorry, --%s doesn't support group resources", rsc_action); crm_exit(EOPNOTSUPP); } rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) { CMD_ERR("Sorry, --%s doesn't support %s resources yet", rsc_action, rclass); crm_exit(EOPNOTSUPP); } params = generate_resource_params(rsc, data_set); /* add meta_timeout env needed by some resource agents */ if (timeout_ms == 0) { timeout_ms = pe_get_configured_timeout(rsc, action, data_set); } g_hash_table_insert(params, strdup("CRM_meta_timeout"), crm_strdup_printf("%d", timeout_ms)); /* add crm_feature_set env needed by some resource agents */ g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; op = resources_action_create(rid, rclass, rprov, rtype, action, 0, timeout_ms, params, 0); if (op == NULL) { /* Re-run with stderr enabled so we can display a sane error message */ crm_enable_stderr(TRUE); op = resources_action_create(rid, rclass, rprov, rtype, action, 0, timeout_ms, params, 0); /* We know op will be NULL, but this makes static analysis happy */ services_action_free(op); return crm_exit(EINVAL); } setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1); if(resource_verbose > 1) { setenv("OCF_TRACE_RA", "1", 1); } if (override_hash) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, override_hash); while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n", rsc->id, name, value); g_hash_table_replace(op->params, strdup(name), strdup(value)); } } if (services_action_sync(op)) { int more, lpc, last; char *local_copy = NULL; if (op->status == PCMK_LRM_OP_DONE) { printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n", action, rsc->id, rclass, rprov ? rprov : "", rtype, services_ocf_exitcode_str(op->rc), op->rc); } else { printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n", action, rsc->id, rclass, rprov ? rprov : "", rtype, services_lrm_status_str(op->status), op->status); } /* hide output for validate-all if not in verbose */ if (resource_verbose == 0 && safe_str_eq(action, "validate-all")) goto done; if (op->stdout_data) { local_copy = strdup(op->stdout_data); more = strlen(local_copy); last = 0; for (lpc = 0; lpc < more; lpc++) { if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; printf(" > stdout: %s\n", local_copy + last); last = lpc + 1; } } free(local_copy); } if (op->stderr_data) { local_copy = strdup(op->stderr_data); more = strlen(local_copy); last = 0; for (lpc = 0; lpc < more; lpc++) { if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; printf(" > stderr: %s\n", local_copy + last); last = lpc + 1; } } free(local_copy); } } done: rc = op->rc; services_action_free(op); return rc; } int cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name, cib_t *cib, pe_working_set_t *data_set) { int rc = -EINVAL; int count = 0; node_t *current = NULL; node_t *dest = pe_find_node(data_set->nodes, host_name); bool cur_is_dest = FALSE; if (scope_master && rsc->variant != pe_master) { resource_t *p = uber_parent(rsc); if(p->variant == pe_master) { CMD_ERR("Using parent '%s' for --move command instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { CMD_ERR("Ignoring '--master' option: not valid for %s resources.", get_resource_typename(rsc->variant)); scope_master = FALSE; } } if(rsc->variant == pe_master) { GListPtr iter = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { rsc = child; count++; } } if(scope_master == FALSE && count == 0) { count = g_list_length(rsc->running_on); } } else if (pe_rsc_is_clone(rsc)) { count = g_list_length(rsc->running_on); } else if (g_list_length(rsc->running_on) > 1) { CMD_ERR("Resource '%s' not moved: active on multiple nodes", rsc_id); return rc; } if(dest == NULL) { CMD_ERR("Error performing operation: node '%s' is unknown", host_name); return -ENXIO; } if(g_list_length(rsc->running_on) == 1) { current = rsc->running_on->data; } if(current == NULL) { /* Nothing to check */ } else if(scope_master && rsc->fns->state(rsc, TRUE) != RSC_ROLE_MASTER) { crm_trace("%s is already active on %s but not in correct state", rsc_id, dest->details->uname); } else if (safe_str_eq(current->details->uname, dest->details->uname)) { cur_is_dest = TRUE; if (do_force) { crm_info("%s is already %s on %s, reinforcing placement with location constraint.", rsc_id, scope_master?"promoted":"active", dest->details->uname); } else { CMD_ERR("Error performing operation: %s is already %s on %s", rsc_id, scope_master?"promoted":"active", dest->details->uname); return rc; } } /* Clear any previous constraints for 'dest' */ cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(rsc_id, dest->details->uname, cib); crm_trace("%s%s now prefers node %s%s", rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":""); /* only ban the previous location if current location != destination location. * it is possible to use -M to enforce a location without regard of where the * resource is currently located */ if(do_force && (cur_is_dest == FALSE)) { /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib); } else if(count > 1) { CMD_ERR("Resource '%s' is currently %s in %d locations. One may now move one to %s", rsc_id, scope_master?"promoted":"active", count, dest->details->uname); CMD_ERR("You can prevent '%s' from being %s at a specific location with:" " --ban %s--host ", rsc_id, scope_master?"promoted":"active", scope_master?"--master ":""); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; } static void cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources) { GListPtr lpc = NULL; GListPtr hosts = NULL; for (lpc = resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc->fns->location(rsc, &hosts, TRUE); if (hosts == NULL) { printf("Resource %s is not running\n", rsc->id); } else { printf("Resource %s is running\n", rsc->id); } cli_resource_check(cib_conn, rsc); g_list_free(hosts); hosts = NULL; } } static void cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources, resource_t *rsc, const char *host_uname) { if (resource_is_running_on(rsc, host_uname)) { printf("Resource %s is running on host %s\n",rsc->id,host_uname); } else { printf("Resource %s is not running on host %s\n", rsc->id, host_uname); } cli_resource_check(cib_conn, rsc); } static void cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node) { const char* host_uname = node->details->uname; GListPtr allResources = node->details->allocated_rsc; GListPtr activeResources = node->details->running_rsc; GListPtr unactiveResources = subtract_lists(allResources,activeResources); GListPtr lpc = NULL; for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; printf("Resource %s is running on host %s\n",rsc->id,host_uname); cli_resource_check(cib_conn,rsc); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; printf("Resource %s is assigned to host %s but not running\n", rsc->id, host_uname); cli_resource_check(cib_conn,rsc); } g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } static void cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources, resource_t *rsc) { GListPtr hosts = NULL; rsc->fns->location(rsc, &hosts, TRUE); printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not ")); cli_resource_check(cib_conn, rsc); g_list_free(hosts); } void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc, node_t *node) { const char *host_uname = (node == NULL)? NULL : node->details->uname; if ((rsc == NULL) && (host_uname == NULL)) { cli_resource_why_without_rsc_and_host(cib_conn, resources); } else if ((rsc != NULL) && (host_uname != NULL)) { cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc, host_uname); } else if ((rsc == NULL) && (host_uname != NULL)) { cli_resource_why_without_rsc_with_host(cib_conn, resources, node); } else if ((rsc != NULL) && (host_uname == NULL)) { cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc); } }