diff --git a/daemons/controld/controld_fsa.c b/daemons/controld/controld_fsa.c index b985fa950a..bd732bcd3b 100644 --- a/daemons/controld/controld_fsa.c +++ b/daemons/controld/controld_fsa.c @@ -1,660 +1,661 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include char *fsa_our_dc = NULL; cib_t *fsa_cib_conn = NULL; char *fsa_our_dc_version = NULL; char *fsa_our_uuid = NULL; char *fsa_our_uname = NULL; char *fsa_cluster_name = NULL; gboolean do_fsa_stall = FALSE; long long fsa_input_register = 0; long long fsa_actions = A_NOTHING; enum crmd_fsa_state fsa_state = S_STARTING; extern uint highest_born_on; extern uint num_join_invites; extern void initialize_join(gboolean before); #define DOT_PREFIX "actions:trace: " #define do_dot_log(fmt, args...) crm_trace( fmt, ##args) long long do_state_transition(long long actions, enum crmd_fsa_state cur_state, enum crmd_fsa_state next_state, fsa_data_t * msg_data); void s_crmd_fsa_actions(fsa_data_t * fsa_data); void log_fsa_input(fsa_data_t * stored_msg); void init_dotfile(void); void init_dotfile(void) { do_dot_log(DOT_PREFIX "digraph \"g\" {"); do_dot_log(DOT_PREFIX " size = \"30,30\""); do_dot_log(DOT_PREFIX " graph ["); do_dot_log(DOT_PREFIX " fontsize = \"12\""); do_dot_log(DOT_PREFIX " fontname = \"Times-Roman\""); do_dot_log(DOT_PREFIX " fontcolor = \"black\""); do_dot_log(DOT_PREFIX " bb = \"0,0,398.922306,478.927856\""); do_dot_log(DOT_PREFIX " color = \"black\""); do_dot_log(DOT_PREFIX " ]"); do_dot_log(DOT_PREFIX " node ["); do_dot_log(DOT_PREFIX " fontsize = \"12\""); do_dot_log(DOT_PREFIX " fontname = \"Times-Roman\""); do_dot_log(DOT_PREFIX " fontcolor = \"black\""); do_dot_log(DOT_PREFIX " shape = \"ellipse\""); do_dot_log(DOT_PREFIX " color = \"black\""); do_dot_log(DOT_PREFIX " ]"); do_dot_log(DOT_PREFIX " edge ["); do_dot_log(DOT_PREFIX " fontsize = \"12\""); do_dot_log(DOT_PREFIX " fontname = \"Times-Roman\""); do_dot_log(DOT_PREFIX " fontcolor = \"black\""); do_dot_log(DOT_PREFIX " color = \"black\""); do_dot_log(DOT_PREFIX " ]"); do_dot_log(DOT_PREFIX "// special nodes"); do_dot_log(DOT_PREFIX " \"S_PENDING\" "); do_dot_log(DOT_PREFIX " ["); do_dot_log(DOT_PREFIX " color = \"blue\""); do_dot_log(DOT_PREFIX " fontcolor = \"blue\""); do_dot_log(DOT_PREFIX " ]"); do_dot_log(DOT_PREFIX " \"S_TERMINATE\" "); do_dot_log(DOT_PREFIX " ["); do_dot_log(DOT_PREFIX " color = \"red\""); do_dot_log(DOT_PREFIX " fontcolor = \"red\""); do_dot_log(DOT_PREFIX " ]"); do_dot_log(DOT_PREFIX "// DC only nodes"); do_dot_log(DOT_PREFIX " \"S_INTEGRATION\" [ fontcolor = \"green\" ]"); do_dot_log(DOT_PREFIX " \"S_POLICY_ENGINE\" [ fontcolor = \"green\" ]"); do_dot_log(DOT_PREFIX " \"S_TRANSITION_ENGINE\" [ fontcolor = \"green\" ]"); do_dot_log(DOT_PREFIX " \"S_RELEASE_DC\" [ fontcolor = \"green\" ]"); do_dot_log(DOT_PREFIX " \"S_IDLE\" [ fontcolor = \"green\" ]"); } static void do_fsa_action(fsa_data_t * fsa_data, long long an_action, void (*function) (long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data)) { fsa_actions &= ~an_action; crm_trace(DOT_PREFIX "\t// %s", fsa_action2string(an_action)); function(an_action, fsa_data->fsa_cause, fsa_state, fsa_data->fsa_input, fsa_data); } static long long startup_actions = A_STARTUP | A_CIB_START | A_LRM_CONNECT | A_HA_CONNECT | A_READCONFIG | A_STARTED | A_CL_JOIN_QUERY; // A_LOG, A_WARN, A_ERROR void do_log(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { unsigned log_type = LOG_TRACE; if (action & A_LOG) { log_type = LOG_INFO; } else if (action & A_WARN) { log_type = LOG_WARNING; } else if (action & A_ERROR) { log_type = LOG_ERR; } do_crm_log(log_type, "Input %s received in state %s from %s", fsa_input2string(msg_data->fsa_input), fsa_state2string(cur_state), msg_data->origin); if (msg_data->data_type == fsa_dt_ha_msg) { ha_msg_input_t *input = fsa_typed_data(msg_data->data_type); crm_log_xml_debug(input->msg, __FUNCTION__); } else if (msg_data->data_type == fsa_dt_xml) { xmlNode *input = fsa_typed_data(msg_data->data_type); crm_log_xml_debug(input, __FUNCTION__); } else if (msg_data->data_type == fsa_dt_lrm) { lrmd_event_data_t *input = fsa_typed_data(msg_data->data_type); do_crm_log(log_type, "Resource %s: Call ID %d returned %d (%d)." " New status if rc=0: %s", input->rsc_id, input->call_id, input->rc, input->op_status, (char *)input->user_data); } } enum crmd_fsa_state s_crmd_fsa(enum crmd_fsa_cause cause) { fsa_data_t *fsa_data = NULL; long long register_copy = fsa_input_register; long long new_actions = A_NOTHING; enum crmd_fsa_state last_state; crm_trace("FSA invoked with Cause: %s\tState: %s", fsa_cause2string(cause), fsa_state2string(fsa_state)); fsa_dump_actions(fsa_actions, "Initial"); do_fsa_stall = FALSE; if (is_message() == FALSE && fsa_actions != A_NOTHING) { /* fake the first message so we can get into the loop */ fsa_data = calloc(1, sizeof(fsa_data_t)); fsa_data->fsa_input = I_NULL; fsa_data->fsa_cause = C_FSA_INTERNAL; fsa_data->origin = __FUNCTION__; fsa_data->data_type = fsa_dt_none; fsa_message_queue = g_list_append(fsa_message_queue, fsa_data); fsa_data = NULL; } while (is_message() && do_fsa_stall == FALSE) { crm_trace("Checking messages (%d remaining)", g_list_length(fsa_message_queue)); fsa_data = get_message(); if(fsa_data == NULL) { continue; } log_fsa_input(fsa_data); /* add any actions back to the queue */ fsa_actions |= fsa_data->actions; fsa_dump_actions(fsa_data->actions, "Restored actions"); /* get the next batch of actions */ new_actions = crmd_fsa_actions[fsa_data->fsa_input][fsa_state]; fsa_actions |= new_actions; fsa_dump_actions(new_actions, "New actions"); if (fsa_data->fsa_input != I_NULL && fsa_data->fsa_input != I_ROUTER) { crm_debug("Processing %s: [ state=%s cause=%s origin=%s ]", fsa_input2string(fsa_data->fsa_input), fsa_state2string(fsa_state), fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin); } /* logging : *before* the state is changed */ if (is_set(fsa_actions, A_ERROR)) { do_fsa_action(fsa_data, A_ERROR, do_log); } if (is_set(fsa_actions, A_WARN)) { do_fsa_action(fsa_data, A_WARN, do_log); } if (is_set(fsa_actions, A_LOG)) { do_fsa_action(fsa_data, A_LOG, do_log); } /* update state variables */ last_state = fsa_state; fsa_state = crmd_fsa_state[fsa_data->fsa_input][fsa_state]; /* * Remove certain actions during shutdown */ if (fsa_state == S_STOPPING || ((fsa_input_register & R_SHUTDOWN) == R_SHUTDOWN)) { clear_bit(fsa_actions, startup_actions); } /* * Hook for change of state. * Allows actions to be added or removed when entering a state */ if (last_state != fsa_state) { fsa_actions = do_state_transition(fsa_actions, last_state, fsa_state, fsa_data); } else { do_dot_log(DOT_PREFIX "\t// FSA input: State=%s \tCause=%s" " \tInput=%s \tOrigin=%s() \tid=%d", fsa_state2string(fsa_state), fsa_cause2string(fsa_data->fsa_cause), fsa_input2string(fsa_data->fsa_input), fsa_data->origin, fsa_data->id); } /* start doing things... */ s_crmd_fsa_actions(fsa_data); delete_fsa_input(fsa_data); fsa_data = NULL; } - if (g_list_length(fsa_message_queue) > 0 || fsa_actions != A_NOTHING || do_fsa_stall) { + if ((fsa_message_queue != NULL) || (fsa_actions != A_NOTHING) + || do_fsa_stall) { crm_debug("Exiting the FSA: queue=%d, fsa_actions=0x%llx, stalled=%s", g_list_length(fsa_message_queue), fsa_actions, do_fsa_stall ? "true" : "false"); } else { crm_trace("Exiting the FSA"); } /* cleanup inputs? */ if (register_copy != fsa_input_register) { long long same = register_copy & fsa_input_register; fsa_dump_inputs(LOG_DEBUG, "Added", fsa_input_register ^ same); fsa_dump_inputs(LOG_DEBUG, "Removed", register_copy ^ same); } fsa_dump_actions(fsa_actions, "Remaining"); fsa_dump_queue(LOG_DEBUG); return fsa_state; } void s_crmd_fsa_actions(fsa_data_t * fsa_data) { /* * Process actions in order of priority but do only one * action at a time to avoid complicating the ordering. */ CRM_CHECK(fsa_data != NULL, return); while (fsa_actions != A_NOTHING && do_fsa_stall == FALSE) { /* regular action processing in order of action priority * * Make sure all actions that connect to required systems * are performed first */ if (fsa_actions & A_ERROR) { do_fsa_action(fsa_data, A_ERROR, do_log); } else if (fsa_actions & A_WARN) { do_fsa_action(fsa_data, A_WARN, do_log); } else if (fsa_actions & A_LOG) { do_fsa_action(fsa_data, A_LOG, do_log); /* get out of here NOW! before anything worse happens */ } else if (fsa_actions & A_EXIT_1) { do_fsa_action(fsa_data, A_EXIT_1, do_exit); /* sub-system restart */ } else if ((fsa_actions & O_LRM_RECONNECT) == O_LRM_RECONNECT) { do_fsa_action(fsa_data, O_LRM_RECONNECT, do_lrm_control); } else if ((fsa_actions & O_CIB_RESTART) == O_CIB_RESTART) { do_fsa_action(fsa_data, O_CIB_RESTART, do_cib_control); } else if ((fsa_actions & O_PE_RESTART) == O_PE_RESTART) { do_fsa_action(fsa_data, O_PE_RESTART, do_pe_control); } else if ((fsa_actions & O_TE_RESTART) == O_TE_RESTART) { do_fsa_action(fsa_data, O_TE_RESTART, do_te_control); /* essential start tasks */ } else if (fsa_actions & A_STARTUP) { do_fsa_action(fsa_data, A_STARTUP, do_startup); } else if (fsa_actions & A_CIB_START) { do_fsa_action(fsa_data, A_CIB_START, do_cib_control); } else if (fsa_actions & A_HA_CONNECT) { do_fsa_action(fsa_data, A_HA_CONNECT, do_ha_control); } else if (fsa_actions & A_READCONFIG) { do_fsa_action(fsa_data, A_READCONFIG, do_read_config); /* sub-system start/connect */ } else if (fsa_actions & A_LRM_CONNECT) { do_fsa_action(fsa_data, A_LRM_CONNECT, do_lrm_control); } else if (fsa_actions & A_TE_START) { do_fsa_action(fsa_data, A_TE_START, do_te_control); } else if (fsa_actions & A_PE_START) { do_fsa_action(fsa_data, A_PE_START, do_pe_control); /* Timers */ /* else if(fsa_actions & O_DC_TIMER_RESTART) { do_fsa_action(fsa_data, O_DC_TIMER_RESTART, do_timer_control) */ ; } else if (fsa_actions & A_DC_TIMER_STOP) { do_fsa_action(fsa_data, A_DC_TIMER_STOP, do_timer_control); } else if (fsa_actions & A_INTEGRATE_TIMER_STOP) { do_fsa_action(fsa_data, A_INTEGRATE_TIMER_STOP, do_timer_control); } else if (fsa_actions & A_INTEGRATE_TIMER_START) { do_fsa_action(fsa_data, A_INTEGRATE_TIMER_START, do_timer_control); } else if (fsa_actions & A_FINALIZE_TIMER_STOP) { do_fsa_action(fsa_data, A_FINALIZE_TIMER_STOP, do_timer_control); } else if (fsa_actions & A_FINALIZE_TIMER_START) { do_fsa_action(fsa_data, A_FINALIZE_TIMER_START, do_timer_control); /* * Highest priority actions */ } else if (fsa_actions & A_MSG_ROUTE) { do_fsa_action(fsa_data, A_MSG_ROUTE, do_msg_route); } else if (fsa_actions & A_RECOVER) { do_fsa_action(fsa_data, A_RECOVER, do_recover); } else if (fsa_actions & A_CL_JOIN_RESULT) { do_fsa_action(fsa_data, A_CL_JOIN_RESULT, do_cl_join_finalize_respond); } else if (fsa_actions & A_CL_JOIN_REQUEST) { do_fsa_action(fsa_data, A_CL_JOIN_REQUEST, do_cl_join_offer_respond); } else if (fsa_actions & A_SHUTDOWN_REQ) { do_fsa_action(fsa_data, A_SHUTDOWN_REQ, do_shutdown_req); } else if (fsa_actions & A_ELECTION_VOTE) { do_fsa_action(fsa_data, A_ELECTION_VOTE, do_election_vote); } else if (fsa_actions & A_ELECTION_COUNT) { do_fsa_action(fsa_data, A_ELECTION_COUNT, do_election_count_vote); } else if (fsa_actions & A_LRM_EVENT) { do_fsa_action(fsa_data, A_LRM_EVENT, do_lrm_event); /* * High priority actions */ } else if (fsa_actions & A_STARTED) { do_fsa_action(fsa_data, A_STARTED, do_started); } else if (fsa_actions & A_CL_JOIN_QUERY) { do_fsa_action(fsa_data, A_CL_JOIN_QUERY, do_cl_join_query); } else if (fsa_actions & A_DC_TIMER_START) { do_fsa_action(fsa_data, A_DC_TIMER_START, do_timer_control); /* * Medium priority actions * - Membership */ } else if (fsa_actions & A_DC_TAKEOVER) { do_fsa_action(fsa_data, A_DC_TAKEOVER, do_dc_takeover); } else if (fsa_actions & A_DC_RELEASE) { do_fsa_action(fsa_data, A_DC_RELEASE, do_dc_release); } else if (fsa_actions & A_DC_JOIN_FINAL) { do_fsa_action(fsa_data, A_DC_JOIN_FINAL, do_dc_join_final); } else if (fsa_actions & A_ELECTION_CHECK) { do_fsa_action(fsa_data, A_ELECTION_CHECK, do_election_check); } else if (fsa_actions & A_ELECTION_START) { do_fsa_action(fsa_data, A_ELECTION_START, do_election_vote); } else if (fsa_actions & A_DC_JOIN_OFFER_ALL) { do_fsa_action(fsa_data, A_DC_JOIN_OFFER_ALL, do_dc_join_offer_all); } else if (fsa_actions & A_DC_JOIN_OFFER_ONE) { do_fsa_action(fsa_data, A_DC_JOIN_OFFER_ONE, do_dc_join_offer_one); } else if (fsa_actions & A_DC_JOIN_PROCESS_REQ) { do_fsa_action(fsa_data, A_DC_JOIN_PROCESS_REQ, do_dc_join_filter_offer); } else if (fsa_actions & A_DC_JOIN_PROCESS_ACK) { do_fsa_action(fsa_data, A_DC_JOIN_PROCESS_ACK, do_dc_join_ack); } else if (fsa_actions & A_DC_JOIN_FINALIZE) { do_fsa_action(fsa_data, A_DC_JOIN_FINALIZE, do_dc_join_finalize); } else if (fsa_actions & A_CL_JOIN_ANNOUNCE) { do_fsa_action(fsa_data, A_CL_JOIN_ANNOUNCE, do_cl_join_announce); /* * Low(er) priority actions * Make sure the CIB is always updated before invoking the * scheduler, and the scheduler before the transition engine. */ } else if (fsa_actions & A_TE_HALT) { do_fsa_action(fsa_data, A_TE_HALT, do_te_invoke); } else if (fsa_actions & A_TE_CANCEL) { do_fsa_action(fsa_data, A_TE_CANCEL, do_te_invoke); } else if (fsa_actions & A_LRM_INVOKE) { do_fsa_action(fsa_data, A_LRM_INVOKE, do_lrm_invoke); } else if (fsa_actions & A_PE_INVOKE) { do_fsa_action(fsa_data, A_PE_INVOKE, do_pe_invoke); } else if (fsa_actions & A_TE_INVOKE) { do_fsa_action(fsa_data, A_TE_INVOKE, do_te_invoke); /* Shutdown actions */ } else if (fsa_actions & A_DC_RELEASED) { do_fsa_action(fsa_data, A_DC_RELEASED, do_dc_release); } else if (fsa_actions & A_PE_STOP) { do_fsa_action(fsa_data, A_PE_STOP, do_pe_control); } else if (fsa_actions & A_TE_STOP) { do_fsa_action(fsa_data, A_TE_STOP, do_te_control); } else if (fsa_actions & A_SHUTDOWN) { do_fsa_action(fsa_data, A_SHUTDOWN, do_shutdown); } else if (fsa_actions & A_LRM_DISCONNECT) { do_fsa_action(fsa_data, A_LRM_DISCONNECT, do_lrm_control); } else if (fsa_actions & A_HA_DISCONNECT) { do_fsa_action(fsa_data, A_HA_DISCONNECT, do_ha_control); } else if (fsa_actions & A_CIB_STOP) { do_fsa_action(fsa_data, A_CIB_STOP, do_cib_control); } else if (fsa_actions & A_STOP) { do_fsa_action(fsa_data, A_STOP, do_stop); /* exit gracefully */ } else if (fsa_actions & A_EXIT_0) { do_fsa_action(fsa_data, A_EXIT_0, do_exit); /* Error checking and reporting */ } else { crm_err("Action %s not supported "CRM_XS" 0x%llx", fsa_action2string(fsa_actions), fsa_actions); register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, fsa_data, NULL, __FUNCTION__); } } } void log_fsa_input(fsa_data_t * stored_msg) { CRM_ASSERT(stored_msg); crm_trace("Processing queued input %d", stored_msg->id); if (stored_msg->fsa_cause == C_LRM_OP_CALLBACK) { crm_trace("FSA processing LRM callback from %s", stored_msg->origin); } else if (stored_msg->data == NULL) { crm_trace("FSA processing input from %s", stored_msg->origin); } else { ha_msg_input_t *ha_input = fsa_typed_data_adv(stored_msg, fsa_dt_ha_msg, __FUNCTION__); crm_trace("FSA processing XML message from %s", stored_msg->origin); crm_log_xml_trace(ha_input->xml, "FSA message data"); } } static void check_join_counts(fsa_data_t *msg_data) { int count; guint npeers; count = crmd_join_phase_count(crm_join_finalized); if (count > 0) { crm_err("%d cluster node%s failed to confirm join", count, pcmk__plural_s(count)); crmd_join_phase_log(LOG_NOTICE); return; } npeers = crm_active_peers(); count = crmd_join_phase_count(crm_join_confirmed); if (count == npeers) { if (npeers == 1) { crm_debug("Sole active cluster node is fully joined"); } else { crm_debug("All %d active cluster nodes are fully joined", count); } } else if (count > npeers) { crm_err("New election needed because more nodes confirmed join " "than are in membership (%d > %u)", count, npeers); register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); } else if (saved_ccm_membership_id != crm_peer_seq) { crm_info("New join needed because membership changed (%llu -> %llu)", saved_ccm_membership_id, crm_peer_seq); register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL); } else { crm_warn("Only %d of %u active cluster nodes fully joined " "(%d did not respond to offer)", count, npeers, crmd_join_phase_count(crm_join_welcomed)); } } long long do_state_transition(long long actions, enum crmd_fsa_state cur_state, enum crmd_fsa_state next_state, fsa_data_t * msg_data) { int level = LOG_INFO; int count = 0; long long tmp = actions; gboolean clear_recovery_bit = TRUE; enum crmd_fsa_cause cause = msg_data->fsa_cause; enum crmd_fsa_input current_input = msg_data->fsa_input; const char *state_from = fsa_state2string(cur_state); const char *state_to = fsa_state2string(next_state); const char *input = fsa_input2string(current_input); CRM_LOG_ASSERT(cur_state != next_state); do_dot_log(DOT_PREFIX "\t%s -> %s [ label=%s cause=%s origin=%s ]", state_from, state_to, input, fsa_cause2string(cause), msg_data->origin); if (cur_state == S_IDLE || next_state == S_IDLE) { level = LOG_NOTICE; } else if (cur_state == S_NOT_DC || next_state == S_NOT_DC) { level = LOG_NOTICE; } else if (cur_state == S_ELECTION) { level = LOG_NOTICE; } else if (cur_state == S_STARTING) { level = LOG_NOTICE; } else if (next_state == S_RECOVERY) { level = LOG_WARNING; } do_crm_log(level, "State transition %s -> %s " CRM_XS " input=%s cause=%s origin=%s", state_from, state_to, input, fsa_cause2string(cause), msg_data->origin); if (next_state != S_ELECTION && cur_state != S_RELEASE_DC) { controld_stop_election_timer(); } #if 0 if ((fsa_input_register & R_SHUTDOWN)) { set_bit(tmp, A_DC_TIMER_STOP); } #endif if (next_state == S_INTEGRATION) { set_bit(tmp, A_INTEGRATE_TIMER_START); } else { set_bit(tmp, A_INTEGRATE_TIMER_STOP); } if (next_state == S_FINALIZE_JOIN) { set_bit(tmp, A_FINALIZE_TIMER_START); } else { set_bit(tmp, A_FINALIZE_TIMER_STOP); } if (next_state != S_PENDING) { set_bit(tmp, A_DC_TIMER_STOP); } if (next_state != S_ELECTION) { highest_born_on = 0; } if (next_state != S_IDLE) { controld_stop_timer(recheck_timer); } if (cur_state == S_FINALIZE_JOIN && next_state == S_POLICY_ENGINE) { populate_cib_nodes(node_update_quick|node_update_all, __FUNCTION__); } switch (next_state) { case S_PENDING: fsa_cib_conn->cmds->set_slave(fsa_cib_conn, cib_scope_local); /* fall through */ case S_ELECTION: crm_trace("Resetting our DC to NULL on transition to %s", fsa_state2string(next_state)); update_dc(NULL); break; case S_NOT_DC: election_trigger->counter = 0; purge_stonith_cleanup(); if (is_set(fsa_input_register, R_SHUTDOWN)) { crm_info("(Re)Issuing shutdown request now" " that we have a new DC"); set_bit(tmp, A_SHUTDOWN_REQ); } CRM_LOG_ASSERT(fsa_our_dc != NULL); if (fsa_our_dc == NULL) { crm_err("Reached S_NOT_DC without a DC" " being recorded"); } break; case S_RECOVERY: clear_recovery_bit = FALSE; break; case S_FINALIZE_JOIN: CRM_LOG_ASSERT(AM_I_DC); if (cause == C_TIMER_POPPED) { crm_warn("Progressed to state %s after %s", fsa_state2string(next_state), fsa_cause2string(cause)); } count = crmd_join_phase_count(crm_join_welcomed); if (count > 0) { crm_warn("%d cluster node%s failed to respond to join offer", count, pcmk__plural_s(count)); crmd_join_phase_log(LOG_NOTICE); } else { crm_debug("All cluster nodes (%d) responded to join offer", crmd_join_phase_count(crm_join_integrated)); } break; case S_POLICY_ENGINE: election_trigger->counter = 0; CRM_LOG_ASSERT(AM_I_DC); if (cause == C_TIMER_POPPED) { crm_info("Progressed to state %s after %s", fsa_state2string(next_state), fsa_cause2string(cause)); } check_join_counts(msg_data); break; case S_STOPPING: case S_TERMINATE: /* possibly redundant */ set_bit(fsa_input_register, R_SHUTDOWN); break; case S_IDLE: CRM_LOG_ASSERT(AM_I_DC); if (is_set(fsa_input_register, R_SHUTDOWN)) { crm_info("(Re)Issuing shutdown request now" " that we are the DC"); set_bit(tmp, A_SHUTDOWN_REQ); } controld_start_recheck_timer(); break; default: break; } if (clear_recovery_bit && next_state != S_PENDING) { tmp &= ~A_RECOVER; } else if (clear_recovery_bit == FALSE) { tmp |= A_RECOVER; } if (tmp != actions) { /* fsa_dump_actions(actions ^ tmp, "New actions"); */ actions = tmp; } return actions; } diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c index 9af1d09202..466c64c778 100644 --- a/daemons/controld/controld_messages.c +++ b/daemons/controld/controld_messages.c @@ -1,1167 +1,1167 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include GListPtr fsa_message_queue = NULL; extern void crm_shutdown(int nsig); void handle_response(xmlNode * stored_msg); enum crmd_fsa_input handle_request(xmlNode * stored_msg, enum crmd_fsa_cause cause); enum crmd_fsa_input handle_shutdown_request(xmlNode * stored_msg); #define ROUTER_RESULT(x) crm_trace("Router result: %s", x) /* debug only, can wrap all it likes */ int last_data_id = 0; void register_fsa_error_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input, fsa_data_t * cur_data, void *new_data, const char *raised_from) { /* save the current actions if any */ if (fsa_actions != A_NOTHING) { register_fsa_input_adv(cur_data ? cur_data->fsa_cause : C_FSA_INTERNAL, I_NULL, cur_data ? cur_data->data : NULL, fsa_actions, TRUE, __FUNCTION__); } /* reset the action list */ crm_info("Resetting the current action list"); fsa_dump_actions(fsa_actions, "Drop"); fsa_actions = A_NOTHING; /* register the error */ register_fsa_input_adv(cause, input, new_data, A_NOTHING, TRUE, raised_from); } int register_fsa_input_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input, void *data, long long with_actions, gboolean prepend, const char *raised_from) { unsigned old_len = g_list_length(fsa_message_queue); fsa_data_t *fsa_data = NULL; if (raised_from == NULL) { raised_from = ""; } if (input == I_NULL && with_actions == A_NOTHING /* && data == NULL */ ) { /* no point doing anything */ crm_err("Cannot add entry to queue: no input and no action"); return 0; } if (input == I_WAIT_FOR_EVENT) { do_fsa_stall = TRUE; crm_debug("Stalling the FSA pending further input: source=%s cause=%s data=%p queue=%d", raised_from, fsa_cause2string(cause), data, old_len); if (old_len > 0) { fsa_dump_queue(LOG_TRACE); prepend = FALSE; } if (data == NULL) { fsa_actions |= with_actions; fsa_dump_actions(with_actions, "Restored"); return 0; } /* Store everything in the new event and reset fsa_actions */ with_actions |= fsa_actions; fsa_actions = A_NOTHING; } last_data_id++; crm_trace("%s %s FSA input %d (%s) (cause=%s) %s data", raised_from, prepend ? "prepended" : "appended", last_data_id, fsa_input2string(input), fsa_cause2string(cause), data ? "with" : "without"); fsa_data = calloc(1, sizeof(fsa_data_t)); fsa_data->id = last_data_id; fsa_data->fsa_input = input; fsa_data->fsa_cause = cause; fsa_data->origin = raised_from; fsa_data->data = NULL; fsa_data->data_type = fsa_dt_none; fsa_data->actions = with_actions; if (with_actions != A_NOTHING) { crm_trace("Adding actions %.16llx to input", with_actions); } if (data != NULL) { switch (cause) { case C_FSA_INTERNAL: case C_CRMD_STATUS_CALLBACK: case C_IPC_MESSAGE: case C_HA_MESSAGE: crm_trace("Copying %s data from %s as a HA msg", fsa_cause2string(cause), raised_from); CRM_CHECK(((ha_msg_input_t *) data)->msg != NULL, crm_err("Bogus data from %s", raised_from)); fsa_data->data = copy_ha_msg_input(data); fsa_data->data_type = fsa_dt_ha_msg; break; case C_LRM_OP_CALLBACK: crm_trace("Copying %s data from %s as lrmd_event_data_t", fsa_cause2string(cause), raised_from); fsa_data->data = lrmd_copy_event((lrmd_event_data_t *) data); fsa_data->data_type = fsa_dt_lrm; break; case C_TIMER_POPPED: case C_SHUTDOWN: case C_UNKNOWN: case C_STARTUP: crm_err("Copying %s data (from %s)" " not yet implemented", fsa_cause2string(cause), raised_from); crmd_exit(CRM_EX_SOFTWARE); break; } crm_trace("%s data copied", fsa_cause2string(fsa_data->fsa_cause)); } /* make sure to free it properly later */ if (prepend) { crm_trace("Prepending input"); fsa_message_queue = g_list_prepend(fsa_message_queue, fsa_data); } else { fsa_message_queue = g_list_append(fsa_message_queue, fsa_data); } crm_trace("Queue len: %d", g_list_length(fsa_message_queue)); /* fsa_dump_queue(LOG_TRACE); */ if (old_len == g_list_length(fsa_message_queue)) { crm_err("Couldn't add message to the queue"); } if (fsa_source && input != I_WAIT_FOR_EVENT) { crm_trace("Triggering FSA: %s", __FUNCTION__); mainloop_set_trigger(fsa_source); } return last_data_id; } void fsa_dump_queue(int log_level) { int offset = 0; GListPtr lpc = NULL; for (lpc = fsa_message_queue; lpc != NULL; lpc = lpc->next) { fsa_data_t *data = (fsa_data_t *) lpc->data; do_crm_log_unlikely(log_level, "queue[%d.%d]: input %s raised by %s(%p.%d)\t(cause=%s)", offset++, data->id, fsa_input2string(data->fsa_input), data->origin, data->data, data->data_type, fsa_cause2string(data->fsa_cause)); } } ha_msg_input_t * copy_ha_msg_input(ha_msg_input_t * orig) { ha_msg_input_t *copy = NULL; xmlNodePtr data = NULL; if (orig != NULL) { crm_trace("Copy msg"); data = copy_xml(orig->msg); } else { crm_trace("No message to copy"); } copy = new_ha_msg_input(data); if (orig && orig->msg != NULL) { CRM_CHECK(copy->msg != NULL, crm_err("copy failed")); } return copy; } void delete_fsa_input(fsa_data_t * fsa_data) { lrmd_event_data_t *op = NULL; xmlNode *foo = NULL; if (fsa_data == NULL) { return; } crm_trace("About to free %s data", fsa_cause2string(fsa_data->fsa_cause)); if (fsa_data->data != NULL) { switch (fsa_data->data_type) { case fsa_dt_ha_msg: delete_ha_msg_input(fsa_data->data); break; case fsa_dt_xml: foo = fsa_data->data; free_xml(foo); break; case fsa_dt_lrm: op = (lrmd_event_data_t *) fsa_data->data; lrmd_free_event(op); break; case fsa_dt_none: if (fsa_data->data != NULL) { crm_err("Don't know how to free %s data from %s", fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin); crmd_exit(CRM_EX_SOFTWARE); } break; } crm_trace("%s data freed", fsa_cause2string(fsa_data->fsa_cause)); } free(fsa_data); } /* returns the next message */ fsa_data_t * get_message(void) { fsa_data_t *message = g_list_nth_data(fsa_message_queue, 0); fsa_message_queue = g_list_remove(fsa_message_queue, message); crm_trace("Processing input %d", message->id); return message; } /* returns the current head of the FIFO queue */ gboolean is_message(void) { - return (g_list_length(fsa_message_queue) > 0); + return fsa_message_queue != NULL; } void * fsa_typed_data_adv(fsa_data_t * fsa_data, enum fsa_data_type a_type, const char *caller) { void *ret_val = NULL; if (fsa_data == NULL) { crm_err("%s: No FSA data available", caller); } else if (fsa_data->data == NULL) { crm_err("%s: No message data available. Origin: %s", caller, fsa_data->origin); } else if (fsa_data->data_type != a_type) { crm_crit("%s: Message data was the wrong type! %d vs. requested=%d. Origin: %s", caller, fsa_data->data_type, a_type, fsa_data->origin); CRM_ASSERT(fsa_data->data_type == a_type); } else { ret_val = fsa_data->data; } return ret_val; } /* A_MSG_ROUTE */ void do_msg_route(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); route_message(msg_data->fsa_cause, input->msg); } void route_message(enum crmd_fsa_cause cause, xmlNode * input) { ha_msg_input_t fsa_input; enum crmd_fsa_input result = I_NULL; fsa_input.msg = input; CRM_CHECK(cause == C_IPC_MESSAGE || cause == C_HA_MESSAGE, return); /* try passing the buck first */ if (relay_message(input, cause == C_IPC_MESSAGE)) { return; } /* handle locally */ result = handle_message(input, cause); /* done or process later? */ switch (result) { case I_NULL: case I_CIB_OP: case I_ROUTER: case I_NODE_JOIN: case I_JOIN_REQUEST: case I_JOIN_RESULT: break; default: /* Defering local processing of message */ register_fsa_input_later(cause, result, &fsa_input); return; } if (result != I_NULL) { /* add to the front of the queue */ register_fsa_input(cause, result, &fsa_input); } } gboolean relay_message(xmlNode * msg, gboolean originated_locally) { int dest = 1; int is_for_dc = 0; int is_for_dcib = 0; int is_for_te = 0; int is_for_crm = 0; int is_for_cib = 0; int is_local = 0; gboolean processing_complete = FALSE; const char *host_to = crm_element_value(msg, F_CRM_HOST_TO); const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); const char *type = crm_element_value(msg, F_TYPE); const char *task = crm_element_value(msg, F_CRM_TASK); const char *msg_error = NULL; crm_trace("Routing message %s", crm_element_value(msg, XML_ATTR_REFERENCE)); if (msg == NULL) { msg_error = "Cannot route empty message"; } else if (safe_str_eq(task, CRM_OP_HELLO)) { /* quietly ignore */ processing_complete = TRUE; } else if (safe_str_neq(type, T_CRM)) { msg_error = "Bad message type"; } else if (sys_to == NULL) { msg_error = "Bad message destination: no subsystem"; } if (msg_error != NULL) { processing_complete = TRUE; crm_err("%s", msg_error); crm_log_xml_warn(msg, "bad msg"); } if (processing_complete) { return TRUE; } processing_complete = TRUE; is_for_dc = (strcasecmp(CRM_SYSTEM_DC, sys_to) == 0); is_for_dcib = (strcasecmp(CRM_SYSTEM_DCIB, sys_to) == 0); is_for_te = (strcasecmp(CRM_SYSTEM_TENGINE, sys_to) == 0); is_for_cib = (strcasecmp(CRM_SYSTEM_CIB, sys_to) == 0); is_for_crm = (strcasecmp(CRM_SYSTEM_CRMD, sys_to) == 0); is_local = 0; if (host_to == NULL || strlen(host_to) == 0) { if (is_for_dc || is_for_te) { is_local = 0; } else if (is_for_crm) { if (safe_str_eq(task, CRM_OP_NODE_INFO)) { /* Node info requests do not specify a host, which is normally * treated as "all hosts", because the whole point is that the * client doesn't know the local node name. Always handle these * requests locally. */ is_local = 1; } else { is_local = !originated_locally; } } else { is_local = 1; } } else if (safe_str_eq(fsa_our_uname, host_to)) { is_local = 1; } if (is_for_dc || is_for_dcib || is_for_te) { if (AM_I_DC && is_for_te) { ROUTER_RESULT("Message result: Local relay"); send_msg_via_ipc(msg, sys_to); } else if (AM_I_DC) { ROUTER_RESULT("Message result: DC/controller process"); processing_complete = FALSE; /* more to be done by caller */ } else if (originated_locally && safe_str_neq(sys_from, CRM_SYSTEM_PENGINE) && safe_str_neq(sys_from, CRM_SYSTEM_TENGINE)) { /* Neither the TE nor the scheduler should be sending messages * to DCs on other nodes. By definition, if we are no longer the DC, * then the scheduler's or TE's data should be discarded. */ #if SUPPORT_COROSYNC if (is_corosync_cluster()) { dest = text2msg_type(sys_to); } #endif ROUTER_RESULT("Message result: External relay to DC"); send_cluster_message(host_to ? crm_get_peer(0, host_to) : NULL, dest, msg, TRUE); } else { /* discard */ ROUTER_RESULT("Message result: Discard, not DC"); } } else if (is_local && (is_for_crm || is_for_cib)) { ROUTER_RESULT("Message result: controller process"); processing_complete = FALSE; /* more to be done by caller */ } else if (is_local) { ROUTER_RESULT("Message result: Local relay"); send_msg_via_ipc(msg, sys_to); } else { crm_node_t *node_to = NULL; #if SUPPORT_COROSYNC if (is_corosync_cluster()) { dest = text2msg_type(sys_to); if (dest == crm_msg_none || dest > crm_msg_stonith_ng) { dest = crm_msg_crmd; } } #endif if (host_to) { node_to = crm_find_peer(0, host_to); if (node_to == NULL) { crm_err("Cannot route message to unknown node %s", host_to); return TRUE; } } ROUTER_RESULT("Message result: External relay"); send_cluster_message(host_to ? node_to : NULL, dest, msg, TRUE); } return processing_complete; } static gboolean process_hello_message(xmlNode * hello, char **client_name, char **major_version, char **minor_version) { const char *local_client_name; const char *local_major_version; const char *local_minor_version; *client_name = NULL; *major_version = NULL; *minor_version = NULL; if (hello == NULL) { return FALSE; } local_client_name = crm_element_value(hello, "client_name"); local_major_version = crm_element_value(hello, "major_version"); local_minor_version = crm_element_value(hello, "minor_version"); if (local_client_name == NULL || strlen(local_client_name) == 0) { crm_err("Hello message was not valid (field %s not found)", "client name"); return FALSE; } else if (local_major_version == NULL || strlen(local_major_version) == 0) { crm_err("Hello message was not valid (field %s not found)", "major version"); return FALSE; } else if (local_minor_version == NULL || strlen(local_minor_version) == 0) { crm_err("Hello message was not valid (field %s not found)", "minor version"); return FALSE; } *client_name = strdup(local_client_name); *major_version = strdup(local_major_version); *minor_version = strdup(local_minor_version); crm_trace("Hello message ok"); return TRUE; } gboolean crmd_authorize_message(xmlNode * client_msg, crm_client_t * curr_client, const char *proxy_session) { char *client_name = NULL; char *major_version = NULL; char *minor_version = NULL; gboolean auth_result = FALSE; xmlNode *xml = NULL; const char *op = crm_element_value(client_msg, F_CRM_TASK); const char *uuid = curr_client ? curr_client->id : proxy_session; if (uuid == NULL) { crm_warn("Message [%s] not authorized", crm_element_value(client_msg, XML_ATTR_REFERENCE)); return FALSE; } else if (safe_str_neq(CRM_OP_HELLO, op)) { return TRUE; } xml = get_message_xml(client_msg, F_CRM_DATA); auth_result = process_hello_message(xml, &client_name, &major_version, &minor_version); if (auth_result == TRUE) { if (client_name == NULL) { crm_err("Bad client details (client_name=%s, uuid=%s)", crm_str(client_name), uuid); auth_result = FALSE; } } if (auth_result == TRUE) { /* check version */ int mav = atoi(major_version); int miv = atoi(minor_version); crm_trace("Checking client version number"); if (mav < 0 || miv < 0) { crm_err("Client version (%d:%d) is not acceptable", mav, miv); auth_result = FALSE; } } if (auth_result == TRUE) { crm_trace("Accepted client %s", client_name); if (curr_client) { curr_client->userdata = strdup(client_name); } crm_trace("Triggering FSA: %s", __FUNCTION__); mainloop_set_trigger(fsa_source); } else { crm_warn("Rejected client logon request"); if (curr_client) { qb_ipcs_disconnect(curr_client->ipcs); } } free(minor_version); free(major_version); free(client_name); /* hello messages should never be processed further */ return FALSE; } enum crmd_fsa_input handle_message(xmlNode * msg, enum crmd_fsa_cause cause) { const char *type = NULL; CRM_CHECK(msg != NULL, return I_NULL); type = crm_element_value(msg, F_CRM_MSG_TYPE); if (crm_str_eq(type, XML_ATTR_REQUEST, TRUE)) { return handle_request(msg, cause); } else if (crm_str_eq(type, XML_ATTR_RESPONSE, TRUE)) { handle_response(msg); return I_NULL; } crm_err("Unknown message type: %s", type); return I_NULL; } static enum crmd_fsa_input handle_failcount_op(xmlNode * stored_msg) { const char *rsc = NULL; const char *uname = NULL; const char *op = NULL; const char *interval_ms_s = NULL; char *interval_spec = NULL; guint interval_ms = 0; gboolean is_remote_node = FALSE; xmlNode *xml_op = get_message_xml(stored_msg, F_CRM_DATA); if (xml_op) { xmlNode *xml_rsc = first_named_child(xml_op, XML_CIB_TAG_RESOURCE); xmlNode *xml_attrs = first_named_child(xml_op, XML_TAG_ATTRS); if (xml_rsc) { rsc = ID(xml_rsc); } if (xml_attrs) { op = crm_element_value(xml_attrs, CRM_META "_" XML_RSC_ATTR_CLEAR_OP); interval_ms_s = crm_element_value(xml_attrs, CRM_META "_" XML_RSC_ATTR_CLEAR_INTERVAL); interval_ms = crm_parse_ms(interval_ms_s); } } uname = crm_element_value(xml_op, XML_LRM_ATTR_TARGET); if ((rsc == NULL) || (uname == NULL)) { crm_log_xml_warn(stored_msg, "invalid failcount op"); return I_NULL; } if (crm_element_value(xml_op, XML_LRM_ATTR_ROUTER_NODE)) { is_remote_node = TRUE; } if (interval_ms) { interval_spec = crm_strdup_printf("%ums", interval_ms); } update_attrd_clear_failures(uname, rsc, op, interval_spec, is_remote_node); free(interval_spec); lrm_clear_last_failure(rsc, uname, op, interval_ms); return I_NULL; } /*! * \brief Handle a CRM_OP_REMOTE_STATE message by updating remote peer cache * * \param[in] msg Message XML * * \return Next FSA input */ static enum crmd_fsa_input handle_remote_state(xmlNode *msg) { const char *remote_uname = ID(msg); const char *remote_is_up = crm_element_value(msg, XML_NODE_IN_CLUSTER); crm_node_t *remote_peer; CRM_CHECK(remote_uname && remote_is_up, return I_NULL); remote_peer = crm_remote_peer_get(remote_uname); CRM_CHECK(remote_peer, return I_NULL); crm_update_peer_state(__FUNCTION__, remote_peer, crm_is_true(remote_is_up)? CRM_NODE_MEMBER : CRM_NODE_LOST, 0); return I_NULL; } /*! * \brief Handle a CRM_OP_PING message * * \param[in] msg Message XML * * \return Next FSA input */ static enum crmd_fsa_input handle_ping(xmlNode *msg) { const char *value = NULL; xmlNode *ping = NULL; // Build reply ping = create_xml_node(NULL, XML_CRM_TAG_PING); value = crm_element_value(msg, F_CRM_SYS_TO); crm_xml_add(ping, XML_PING_ATTR_SYSFROM, value); // Add controller state value = fsa_state2string(fsa_state); crm_xml_add(ping, XML_PING_ATTR_CRMDSTATE, value); crm_notice("Current ping state: %s", value); // CTS needs this // Add controller health // @TODO maybe do some checks to determine meaningful status crm_xml_add(ping, XML_PING_ATTR_STATUS, "ok"); // Send reply msg = create_reply(msg, ping); free_xml(ping); if (msg) { (void) relay_message(msg, TRUE); free_xml(msg); } // Nothing further to do return I_NULL; } /*! * \brief Handle a CRM_OP_NODE_INFO request * * \param[in] msg Message XML * * \return Next FSA input */ static enum crmd_fsa_input handle_node_info_request(xmlNode *msg) { const char *value = NULL; crm_node_t *node = NULL; int node_id = 0; xmlNode *reply = NULL; // Build reply reply = create_xml_node(NULL, XML_CIB_TAG_NODE); crm_xml_add(reply, XML_PING_ATTR_SYSFROM, CRM_SYSTEM_CRMD); // Add whether current partition has quorum crm_xml_add_boolean(reply, XML_ATTR_HAVE_QUORUM, fsa_has_quorum); // Check whether client requested node info by ID and/or name crm_element_value_int(msg, XML_ATTR_ID, &node_id); if (node_id < 0) { node_id = 0; } value = crm_element_value(msg, XML_ATTR_UNAME); // Default to local node if none given if ((node_id == 0) && (value == NULL)) { value = fsa_our_uname; } node = crm_find_peer_full(node_id, value, CRM_GET_PEER_ANY); if (node) { crm_xml_add_int(reply, XML_ATTR_ID, node->id); crm_xml_add(reply, XML_ATTR_UUID, node->uuid); crm_xml_add(reply, XML_ATTR_UNAME, node->uname); crm_xml_add(reply, XML_NODE_IS_PEER, node->state); crm_xml_add_boolean(reply, XML_NODE_IS_REMOTE, node->flags & crm_remote_node); } // Send reply msg = create_reply(msg, reply); free_xml(reply); if (msg) { (void) relay_message(msg, TRUE); free_xml(msg); } // Nothing further to do return I_NULL; } static void verify_feature_set(xmlNode *msg) { const char *dc_version = crm_element_value(msg, XML_ATTR_CRM_VERSION); if (dc_version == NULL) { /* All we really know is that the DC feature set is older than 3.1.0, * but that's also all that really matters. */ dc_version = "3.0.14"; } if (feature_set_compatible(dc_version, CRM_FEATURE_SET)) { crm_trace("Local feature set (%s) is compatible with DC's (%s)", CRM_FEATURE_SET, dc_version); } else { crm_err("Local feature set (%s) is incompatible with DC's (%s)", CRM_FEATURE_SET, dc_version); // Nothing is likely to improve without administrator involvement set_bit(fsa_input_register, R_STAYDOWN); crmd_exit(CRM_EX_FATAL); } } enum crmd_fsa_input handle_request(xmlNode * stored_msg, enum crmd_fsa_cause cause) { xmlNode *msg = NULL; const char *op = crm_element_value(stored_msg, F_CRM_TASK); /* Optimize this for the DC - it has the most to do */ if (op == NULL) { crm_log_xml_err(stored_msg, "Bad message"); return I_NULL; } if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) { const char *from = crm_element_value(stored_msg, F_CRM_HOST_FROM); crm_node_t *node = crm_find_peer(0, from); crm_update_peer_expected(__FUNCTION__, node, CRMD_JOINSTATE_DOWN); if(AM_I_DC == FALSE) { return I_NULL; /* Done */ } } /*========== DC-Only Actions ==========*/ if (AM_I_DC) { if (strcmp(op, CRM_OP_JOIN_ANNOUNCE) == 0) { return I_NODE_JOIN; } else if (strcmp(op, CRM_OP_JOIN_REQUEST) == 0) { return I_JOIN_REQUEST; } else if (strcmp(op, CRM_OP_JOIN_CONFIRM) == 0) { return I_JOIN_RESULT; } else if (strcmp(op, CRM_OP_SHUTDOWN) == 0) { const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); gboolean dc_match = safe_str_eq(host_from, fsa_our_dc); if (is_set(fsa_input_register, R_SHUTDOWN)) { crm_info("Shutting ourselves down (DC)"); return I_STOP; } else if (dc_match) { crm_err("We didn't ask to be shut down, yet our" " TE is telling us to. Better get out now!"); return I_TERMINATE; } else if (fsa_state != S_STOPPING) { crm_err("Another node is asking us to shutdown" " but we think we're ok."); return I_ELECTION; } } else if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) { /* a slave wants to shut down */ /* create cib fragment and add to message */ return handle_shutdown_request(stored_msg); } else if (strcmp(op, CRM_OP_REMOTE_STATE) == 0) { /* a remote connection host is letting us know the node state */ return handle_remote_state(stored_msg); } } /*========== common actions ==========*/ if (strcmp(op, CRM_OP_NOVOTE) == 0) { ha_msg_input_t fsa_input; fsa_input.msg = stored_msg; register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input, A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE, __FUNCTION__); } else if (strcmp(op, CRM_OP_THROTTLE) == 0) { throttle_update(stored_msg); if (AM_I_DC && transition_graph != NULL) { if (transition_graph->complete == FALSE) { crm_debug("The throttle changed. Trigger a graph."); trigger_graph(); } } return I_NULL; } else if (strcmp(op, CRM_OP_CLEAR_FAILCOUNT) == 0) { return handle_failcount_op(stored_msg); } else if (strcmp(op, CRM_OP_VOTE) == 0) { /* count the vote and decide what to do after that */ ha_msg_input_t fsa_input; fsa_input.msg = stored_msg; register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input, A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE, __FUNCTION__); /* Sometimes we _must_ go into S_ELECTION */ if (fsa_state == S_HALT) { crm_debug("Forcing an election from S_HALT"); return I_ELECTION; #if 0 } else if (AM_I_DC) { /* This is the old way of doing things but what is gained? */ return I_ELECTION; #endif } } else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) { verify_feature_set(stored_msg); crm_debug("Raising I_JOIN_OFFER: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID)); return I_JOIN_OFFER; } else if (strcmp(op, CRM_OP_JOIN_ACKNAK) == 0) { crm_debug("Raising I_JOIN_RESULT: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID)); return I_JOIN_RESULT; } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0 || strcmp(op, CRM_OP_LRM_FAIL) == 0 || strcmp(op, CRM_OP_LRM_REFRESH) == 0 || strcmp(op, CRM_OP_REPROBE) == 0) { crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD); return I_ROUTER; } else if (strcmp(op, CRM_OP_NOOP) == 0) { return I_NULL; } else if (strcmp(op, CRM_OP_LOCAL_SHUTDOWN) == 0) { crm_shutdown(SIGTERM); /*return I_SHUTDOWN; */ return I_NULL; } else if (strcmp(op, CRM_OP_PING) == 0) { return handle_ping(stored_msg); } else if (strcmp(op, CRM_OP_NODE_INFO) == 0) { return handle_node_info_request(stored_msg); } else if (strcmp(op, CRM_OP_RM_NODE_CACHE) == 0) { int id = 0; const char *name = NULL; crm_element_value_int(stored_msg, XML_ATTR_ID, &id); name = crm_element_value(stored_msg, XML_ATTR_UNAME); if(cause == C_IPC_MESSAGE) { msg = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) { crm_err("Could not instruct peers to remove references to node %s/%u", name, id); } else { crm_notice("Instructing peers to remove references to node %s/%u", name, id); } free_xml(msg); } else { reap_crm_member(id, name); /* If we're forgetting this node, also forget any failures to fence * it, so we don't carry that over to any node added later with the * same name. */ st_fail_count_reset(name); } } else if (strcmp(op, CRM_OP_MAINTENANCE_NODES) == 0) { xmlNode *xml = get_message_xml(stored_msg, F_CRM_DATA); remote_ra_process_maintenance_nodes(xml); /*========== (NOT_DC)-Only Actions ==========*/ } else if (AM_I_DC == FALSE && strcmp(op, CRM_OP_SHUTDOWN) == 0) { const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); gboolean dc_match = safe_str_eq(host_from, fsa_our_dc); if (dc_match || fsa_our_dc == NULL) { if (is_set(fsa_input_register, R_SHUTDOWN) == FALSE) { crm_err("We didn't ask to be shut down, yet our DC is telling us to."); set_bit(fsa_input_register, R_STAYDOWN); return I_STOP; } crm_info("Shutting down"); return I_STOP; } else { crm_warn("Discarding %s op from %s", op, host_from); } } else { crm_err("Unexpected request (%s) sent to %s", op, AM_I_DC ? "the DC" : "non-DC node"); crm_log_xml_err(stored_msg, "Unexpected"); } return I_NULL; } void handle_response(xmlNode * stored_msg) { const char *op = crm_element_value(stored_msg, F_CRM_TASK); if (op == NULL) { crm_log_xml_err(stored_msg, "Bad message"); } else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) { // Check whether scheduler answer been superseded by subsequent request const char *msg_ref = crm_element_value(stored_msg, XML_ATTR_REFERENCE); if (msg_ref == NULL) { crm_err("%s - Ignoring calculation with no reference", op); } else if (safe_str_eq(msg_ref, fsa_pe_ref)) { ha_msg_input_t fsa_input; controld_stop_sched_timer(); fsa_input.msg = stored_msg; register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input); } else { crm_info("%s calculation %s is obsolete", op, msg_ref); } } else if (strcmp(op, CRM_OP_VOTE) == 0 || strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0 || strcmp(op, CRM_OP_SHUTDOWN) == 0) { } else { const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); crm_err("Unexpected response (op=%s, src=%s) sent to the %s", op, host_from, AM_I_DC ? "DC" : "controller"); } } enum crmd_fsa_input handle_shutdown_request(xmlNode * stored_msg) { /* handle here to avoid potential version issues * where the shutdown message/procedure may have * been changed in later versions. * * This way the DC is always in control of the shutdown */ char *now_s = NULL; time_t now = time(NULL); const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); if (host_from == NULL) { /* we're shutting down and the DC */ host_from = fsa_our_uname; } crm_info("Creating shutdown request for %s (state=%s)", host_from, fsa_state2string(fsa_state)); crm_log_xml_trace(stored_msg, "message"); now_s = crm_itoa(now); update_attrd(host_from, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, FALSE); free(now_s); /* will be picked up by the TE as long as its running */ return I_NULL; } /* msg is deleted by the time this returns */ extern gboolean process_te_message(xmlNode * msg, xmlNode * xml_data); gboolean send_msg_via_ipc(xmlNode * msg, const char *sys) { gboolean send_ok = TRUE; crm_client_t *client_channel = crm_client_get_by_id(sys); if (crm_element_value(msg, F_CRM_HOST_FROM) == NULL) { crm_xml_add(msg, F_CRM_HOST_FROM, fsa_our_uname); } if (client_channel != NULL) { /* Transient clients such as crmadmin */ send_ok = crm_ipcs_send(client_channel, 0, msg, crm_ipc_server_event); } else if (sys != NULL && strcmp(sys, CRM_SYSTEM_TENGINE) == 0) { xmlNode *data = get_message_xml(msg, F_CRM_DATA); process_te_message(msg, data); } else if (sys != NULL && strcmp(sys, CRM_SYSTEM_LRMD) == 0) { fsa_data_t fsa_data; ha_msg_input_t fsa_input; fsa_input.msg = msg; fsa_input.xml = get_message_xml(msg, F_CRM_DATA); fsa_data.id = 0; fsa_data.actions = 0; fsa_data.data = &fsa_input; fsa_data.fsa_input = I_MESSAGE; fsa_data.fsa_cause = C_IPC_MESSAGE; fsa_data.origin = __FUNCTION__; fsa_data.data_type = fsa_dt_ha_msg; #ifdef FSA_TRACE crm_trace("Invoking action A_LRM_INVOKE (%.16llx)", A_LRM_INVOKE); #endif do_lrm_invoke(A_LRM_INVOKE, C_IPC_MESSAGE, fsa_state, I_MESSAGE, &fsa_data); } else if (sys != NULL && crmd_is_proxy_session(sys)) { crmd_proxy_send(sys, msg); } else { crm_debug("Unknown Sub-system (%s)... discarding message.", crm_str(sys)); send_ok = FALSE; } return send_ok; } ha_msg_input_t * new_ha_msg_input(xmlNode * orig) { ha_msg_input_t *input_copy = NULL; input_copy = calloc(1, sizeof(ha_msg_input_t)); input_copy->msg = orig; input_copy->xml = get_message_xml(input_copy->msg, F_CRM_DATA); return input_copy; } void delete_ha_msg_input(ha_msg_input_t * orig) { if (orig == NULL) { return; } free_xml(orig->msg); free(orig); } /*! * \internal * \brief Notify the DC of a remote node state change * * \param[in] node_name Node's name * \param[in] node_up TRUE if node is up, FALSE if down */ void send_remote_state_message(const char *node_name, gboolean node_up) { /* If we don't have a DC, or the message fails, we have a failsafe: * the DC will eventually pick up the change via the CIB node state. * The message allows it to happen sooner if possible. */ if (fsa_our_dc) { xmlNode *msg = create_request(CRM_OP_REMOTE_STATE, NULL, fsa_our_dc, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); crm_info("Notifying DC %s of pacemaker_remote node %s %s", fsa_our_dc, node_name, (node_up? "coming up" : "going down")); crm_xml_add(msg, XML_ATTR_ID, node_name); crm_xml_add_boolean(msg, XML_NODE_IN_CLUSTER, node_up); send_cluster_message(crm_get_peer(0, fsa_our_dc), crm_msg_crmd, msg, TRUE); free_xml(msg); } else { crm_debug("No DC to notify of pacemaker_remote node %s %s", node_name, (node_up? "coming up" : "going down")); } } diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index d6f5475373..d75b8c38ec 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -1,2736 +1,2736 @@ /* * Copyright 2009-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if SUPPORT_CIBSECRETS # include #endif #include GHashTable *device_list = NULL; GHashTable *topology = NULL; GList *cmd_list = NULL; struct device_search_s { /* target of fence action */ char *host; /* requested fence action */ char *action; /* timeout to use if a device is queried dynamically for possible targets */ int per_device_timeout; /* number of registered fencing devices at time of request */ int replies_needed; /* number of device replies received so far */ int replies_received; /* whether the target is eligible to perform requested action (or off) */ bool allow_suicide; /* private data to pass to search callback function */ void *user_data; /* function to call when all replies have been received */ void (*callback) (GList * devices, void *user_data); /* devices capable of performing requested action (or off if remapping) */ GListPtr capable; }; static gboolean stonith_device_dispatch(gpointer user_data); static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data); static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer, const char *client_id); static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence); typedef struct async_command_s { int id; int pid; int fd_stdout; int options; int default_timeout; /* seconds */ int timeout; /* seconds */ int start_delay; /* milliseconds */ int delay_id; char *op; char *origin; char *client; char *client_name; char *remote_op_id; char *victim; uint32_t victim_nodeid; char *action; char *device; char *mode; GListPtr device_list; GListPtr device_next; void *internal_user_data; void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data); guint timer_sigterm; guint timer_sigkill; /*! If the operation timed out, this is the last signal * we sent to the process to get it to terminate */ int last_timeout_signo; stonith_device_t *active_on; stonith_device_t *activating_on; } async_command_t; static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc); static gboolean is_action_required(const char *action, stonith_device_t *device) { return device && device->automatic_unfencing && safe_str_eq(action, "on"); } static int get_action_delay_max(stonith_device_t * device, const char * action) { const char *value = NULL; int delay_max_ms = 0; if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) { return 0; } value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_MAX); if (value) { delay_max_ms = crm_get_msec(value); } return delay_max_ms; } static int get_action_delay_base(stonith_device_t * device, const char * action) { const char *value = NULL; int delay_base_ms = 0; if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) { return 0; } value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_BASE); if (value) { delay_base_ms = crm_get_msec(value); } return delay_base_ms; } /*! * \internal * \brief Override STONITH timeout with pcmk_*_timeout if available * * \param[in] device STONITH device to use * \param[in] action STONITH action name * \param[in] default_timeout Timeout to use if device does not have * a pcmk_*_timeout parameter for action * * \return Value of pcmk_(action)_timeout if available, otherwise default_timeout * \note For consistency, it would be nice if reboot/off/on timeouts could be * set the same way as start/stop/monitor timeouts, i.e. with an * entry in the fencing resource configuration. However that * is insufficient because fencing devices may be registered directly via * the fencer's register_device() API instead of going through the CIB * (e.g. stonith_admin uses it for its -R option, and the executor uses it * to ensure a device is registered when a command is issued). As device * properties, pcmk_*_timeout parameters can be grabbed by the fencer when * the device is registered, whether by CIB change or API call. */ static int get_action_timeout(stonith_device_t * device, const char *action, int default_timeout) { if (action && device && device->params) { char buffer[64] = { 0, }; const char *value = NULL; /* If "reboot" was requested but the device does not support it, * we will remap to "off", so check timeout for "off" instead */ if (safe_str_eq(action, "reboot") && is_not_set(device->flags, st_device_supports_reboot)) { crm_trace("%s doesn't support reboot, using timeout for off instead", device->id); action = "off"; } /* If the device config specified an action-specific timeout, use it */ snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, buffer); if (value) { return atoi(value); } } return default_timeout; } static void free_async_command(async_command_t * cmd) { if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } cmd_list = g_list_remove(cmd_list, cmd); g_list_free_full(cmd->device_list, free); free(cmd->device); free(cmd->action); free(cmd->victim); free(cmd->remote_op_id); free(cmd->client); free(cmd->client_name); free(cmd->origin); free(cmd->mode); free(cmd->op); free(cmd); } static async_command_t * create_async_command(xmlNode * msg) { async_command_t *cmd = NULL; xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR); const char *action = crm_element_value(op, F_STONITH_ACTION); CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL); crm_log_xml_trace(msg, "Command"); cmd = calloc(1, sizeof(async_command_t)); crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id)); crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options)); crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; cmd->origin = crm_element_value_copy(msg, F_ORIG); cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID); cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID); cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME); cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION); cmd->action = strdup(action); cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET); cmd->mode = crm_element_value_copy(op, F_STONITH_MODE); cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE); CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL); CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient")); cmd->done_cb = st_child_done; cmd_list = g_list_append(cmd_list, cmd); return cmd; } static int get_action_limit(stonith_device_t * device) { const char *value = NULL; int action_limit = 1; value = g_hash_table_lookup(device->params, STONITH_ATTR_ACTION_LIMIT); if (value) { action_limit = crm_parse_int(value, "1"); if (action_limit == 0) { /* pcmk_action_limit should not be 0. Enforce it to be 1. */ action_limit = 1; } } return action_limit; } static int get_active_cmds(stonith_device_t * device) { int counter = 0; GListPtr gIter = NULL; GListPtr gIterNext = NULL; CRM_CHECK(device != NULL, return 0); for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd = gIter->data; gIterNext = gIter->next; if (cmd->active_on == device) { counter++; } } return counter; } static void fork_cb(GPid pid, gpointer user_data) { async_command_t *cmd = (async_command_t *) user_data; stonith_device_t * device = /* in case of a retry we've done the move from activating_on to active_on already */ cmd->activating_on?cmd->activating_on:cmd->active_on; CRM_ASSERT(device); crm_debug("Operation '%s'%s%s on %s now running with pid=%d, timeout=%ds", cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "", device->id, pid, cmd->timeout); cmd->active_on = device; cmd->activating_on = NULL; } static gboolean stonith_device_execute(stonith_device_t * device) { int exec_rc = 0; const char *action_str = NULL; async_command_t *cmd = NULL; stonith_action_t *action = NULL; int active_cmds = 0; int action_limit = 0; GListPtr gIter = NULL; GListPtr gIterNext = NULL; CRM_CHECK(device != NULL, return FALSE); active_cmds = get_active_cmds(device); action_limit = get_action_limit(device); if (action_limit > -1 && active_cmds >= action_limit) { crm_trace("%s is over its action limit of %d (%u active action%s)", device->id, action_limit, active_cmds, active_cmds > 1 ? "s" : ""); return TRUE; } for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) { async_command_t *pending_op = gIter->data; gIterNext = gIter->next; if (pending_op && pending_op->delay_id) { crm_trace ("Operation '%s'%s%s on %s was asked to run too early, waiting for start_delay timeout of %dms", pending_op->action, pending_op->victim ? " targeting " : "", pending_op->victim ? pending_op->victim : "", device->id, pending_op->start_delay); continue; } device->pending_ops = g_list_remove_link(device->pending_ops, gIter); g_list_free_1(gIter); cmd = pending_op; break; } if (cmd == NULL) { crm_trace("Nothing further to do for %s for now", device->id); return TRUE; } if(safe_str_eq(device->agent, STONITH_WATCHDOG_AGENT)) { if(safe_str_eq(cmd->action, "reboot")) { pcmk_panic(__FUNCTION__); goto done; } else if(safe_str_eq(cmd->action, "off")) { pcmk_panic(__FUNCTION__); goto done; } else { crm_info("Faking success for %s watchdog operation", cmd->action); cmd->done_cb(0, 0, NULL, cmd); goto done; } } #if SUPPORT_CIBSECRETS if (replace_secret_params(device->id, device->params) < 0) { /* replacing secrets failed! */ if (safe_str_eq(cmd->action,"stop")) { /* don't fail on stop! */ crm_info("proceeding with the stop operation for %s", device->id); } else { crm_err("failed to get secrets for %s, " "considering resource not configured", device->id); exec_rc = PCMK_OCF_NOT_CONFIGURED; cmd->done_cb(0, exec_rc, NULL, cmd); goto done; } } #endif action_str = cmd->action; if (safe_str_eq(cmd->action, "reboot") && is_not_set(device->flags, st_device_supports_reboot)) { crm_warn("Agent '%s' does not advertise support for 'reboot', performing 'off' action instead", device->agent); action_str = "off"; } action = stonith_action_create(device->agent, action_str, cmd->victim, cmd->victim_nodeid, cmd->timeout, device->params, device->aliases); /* for async exec, exec_rc is negative for early error exit otherwise handling of success/errors is done via callbacks */ cmd->activating_on = device; exec_rc = stonith_action_execute_async(action, (void *)cmd, cmd->done_cb, fork_cb); if (exec_rc < 0) { crm_warn("Operation '%s'%s%s on %s failed: %s (%d)", cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "", device->id, pcmk_strerror(exec_rc), exec_rc); cmd->activating_on = NULL; cmd->done_cb(0, exec_rc, NULL, cmd); } done: /* Device might get triggered to work by multiple fencing commands * simultaneously. Trigger the device again to make sure any * remaining concurrent commands get executed. */ if (device->pending_ops) { mainloop_set_trigger(device->work); } return TRUE; } static gboolean stonith_device_dispatch(gpointer user_data) { return stonith_device_execute(user_data); } static gboolean start_delay_helper(gpointer data) { async_command_t *cmd = data; stonith_device_t *device = NULL; cmd->delay_id = 0; device = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL; if (device) { mainloop_set_trigger(device->work); } return FALSE; } static void schedule_stonith_command(async_command_t * cmd, stonith_device_t * device) { int delay_max = 0; int delay_base = 0; CRM_CHECK(cmd != NULL, return); CRM_CHECK(device != NULL, return); if (cmd->device) { free(cmd->device); } if (device->include_nodeid && cmd->victim) { crm_node_t *node = crm_get_peer(0, cmd->victim); cmd->victim_nodeid = node->id; } cmd->device = strdup(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); if (cmd->remote_op_id) { crm_debug("Scheduling '%s' action%s%s on %s for remote peer %s with op id (%s) (timeout=%ds)", cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "", device->id, cmd->origin, cmd->remote_op_id, cmd->timeout); } else { crm_debug("Scheduling '%s' action%s%s on %s for %s (timeout=%ds)", cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "", device->id, cmd->client, cmd->timeout); } device->pending_ops = g_list_append(device->pending_ops, cmd); mainloop_set_trigger(device->work); delay_max = get_action_delay_max(device, cmd->action); delay_base = get_action_delay_base(device, cmd->action); if (delay_max == 0) { delay_max = delay_base; } if (delay_max < delay_base) { crm_warn("Base-delay (%dms) is larger than max-delay (%dms) " "for %s on %s - limiting to max-delay", delay_base, delay_max, cmd->action, device->id); delay_base = delay_max; } if (delay_max > 0) { // coverity[dont_call] We're not using rand() for security cmd->start_delay = ((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0) + delay_base; crm_notice("Delaying '%s' action%s%s on %s for %dms (timeout=%ds, base=%dms, " "max=%dms)", cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "", device->id, cmd->start_delay, cmd->timeout, delay_base, delay_max); cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } } static void free_device(gpointer data) { GListPtr gIter = NULL; stonith_device_t *device = data; g_hash_table_destroy(device->params); g_hash_table_destroy(device->aliases); for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) { async_command_t *cmd = gIter->data; crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action); cmd->done_cb(0, -ENODEV, NULL, cmd); } g_list_free(device->pending_ops); g_list_free_full(device->targets, free); mainloop_destroy_trigger(device->work); free_xml(device->agent_metadata); free(device->namespace); free(device->on_target_actions); free(device->agent); free(device->id); free(device); } void free_device_list() { if (device_list != NULL) { g_hash_table_destroy(device_list); device_list = NULL; } } void init_device_list() { if (device_list == NULL) { device_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_device); } } static GHashTable * build_port_aliases(const char *hostmap, GListPtr * targets) { char *name = NULL; int last = 0, lpc = 0, max = 0, added = 0; GHashTable *aliases = crm_strcase_table_new(); if (hostmap == NULL) { return aliases; } max = strlen(hostmap); for (; lpc <= max; lpc++) { switch (hostmap[lpc]) { /* Assignment chars */ case '=': case ':': if (lpc > last) { free(name); name = calloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; break; /* Delimeter chars */ /* case ',': Potentially used to specify multiple ports */ case 0: case ';': case ' ': case '\t': if (name) { char *value = NULL; value = calloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if (targets) { *targets = g_list_append(*targets, strdup(value)); } value = NULL; name = NULL; added++; } else if (lpc > last) { crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last); } last = lpc + 1; break; } if (hostmap[lpc] == 0) { break; } } if (added == 0) { crm_info("No host mappings detected in '%s'", hostmap); } free(name); return aliases; } GHashTable *metadata_cache = NULL; void free_metadata_cache() { if (metadata_cache != NULL) { g_hash_table_destroy(metadata_cache); metadata_cache = NULL; } } static void init_metadata_cache() { if (metadata_cache == NULL) { metadata_cache = crm_str_table_new(); } } static xmlNode * get_agent_metadata(const char *agent) { xmlNode *xml = NULL; char *buffer = NULL; init_metadata_cache(); buffer = g_hash_table_lookup(metadata_cache, agent); if(safe_str_eq(agent, STONITH_WATCHDOG_AGENT)) { return NULL; } else if(buffer == NULL) { stonith_t *st = stonith_api_new(); int rc; if (st == NULL) { crm_warn("Could not get agent meta-data: " "API memory allocation failed"); return NULL; } rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); stonith_api_delete(st); if (rc || !buffer) { crm_err("Could not retrieve metadata for fencing agent %s", agent); return NULL; } g_hash_table_replace(metadata_cache, strdup(agent), buffer); } xml = string2xml(buffer); return xml; } static gboolean is_nodeid_required(xmlNode * xml) { xmlXPathObjectPtr xpath = NULL; if (stand_alone) { return FALSE; } if (!xml) { return FALSE; } xpath = xpath_search(xml, "//parameter[@name='nodeid']"); if (numXpathResults(xpath) <= 0) { freeXpathObject(xpath); return FALSE; } freeXpathObject(xpath); return TRUE; } #define MAX_ACTION_LEN 256 static char * add_action(char *actions, const char *action) { int offset = 0; if (actions == NULL) { actions = calloc(1, MAX_ACTION_LEN); } else { offset = strlen(actions); } if (offset > 0) { offset += snprintf(actions+offset, MAX_ACTION_LEN - offset, " "); } offset += snprintf(actions+offset, MAX_ACTION_LEN - offset, "%s", action); return actions; } static void read_action_metadata(stonith_device_t *device) { xmlXPathObjectPtr xpath = NULL; int max = 0; int lpc = 0; if (device->agent_metadata == NULL) { return; } xpath = xpath_search(device->agent_metadata, "//action"); max = numXpathResults(xpath); if (max <= 0) { freeXpathObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *on_target = NULL; const char *action = NULL; xmlNode *match = getXpathResult(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; on_target = crm_element_value(match, "on_target"); action = crm_element_value(match, "name"); if(safe_str_eq(action, "list")) { set_bit(device->flags, st_device_supports_list); } else if(safe_str_eq(action, "status")) { set_bit(device->flags, st_device_supports_status); } else if(safe_str_eq(action, "reboot")) { set_bit(device->flags, st_device_supports_reboot); } else if (safe_str_eq(action, "on")) { /* "automatic" means the cluster will unfence node when it joins */ const char *automatic = crm_element_value(match, "automatic"); /* "required" is a deprecated synonym for "automatic" */ const char *required = crm_element_value(match, "required"); if (crm_is_true(automatic) || crm_is_true(required)) { device->automatic_unfencing = TRUE; } } if (action && crm_is_true(on_target)) { device->on_target_actions = add_action(device->on_target_actions, action); } } freeXpathObject(xpath); } /*! * \internal * \brief Set a pcmk_*_action parameter if not already set * * \param[in,out] params Device parameters * \param[in] action Name of action * \param[in] value Value to use if action is not already set */ static void map_action(GHashTable *params, const char *action, const char *value) { char *key = crm_strdup_printf("pcmk_%s_action", action); if (g_hash_table_lookup(params, key)) { crm_warn("Ignoring %s='%s', see %s instead", STONITH_ATTR_ACTION_OP, value, key); free(key); } else { crm_warn("Mapping %s='%s' to %s='%s'", STONITH_ATTR_ACTION_OP, value, key, value); g_hash_table_insert(params, key, strdup(value)); } } /*! * \internal * \brief Create device parameter table from XML * * \param[in] name Device name (used for logging only) * \param[in,out] params Device parameters */ static GHashTable * xml2device_params(const char *name, xmlNode *dev) { GHashTable *params = xml2list(dev); const char *value; /* Action should never be specified in the device configuration, * but we support it for users who are familiar with other software * that worked that way. */ value = g_hash_table_lookup(params, STONITH_ATTR_ACTION_OP); if (value != NULL) { crm_warn("%s has '%s' parameter, which should never be specified in configuration", name, STONITH_ATTR_ACTION_OP); if (*value == '\0') { crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP); } else if (strcmp(value, "reboot") == 0) { crm_warn("Ignoring %s='reboot' (see stonith-action cluster property instead)", STONITH_ATTR_ACTION_OP); } else if (strcmp(value, "off") == 0) { map_action(params, "reboot", value); } else { map_action(params, "off", value); map_action(params, "reboot", value); } g_hash_table_remove(params, STONITH_ATTR_ACTION_OP); } return params; } static stonith_device_t * build_device_from_xml(xmlNode * msg) { const char *value; xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR); stonith_device_t *device = NULL; char *agent = crm_element_value_copy(dev, "agent"); CRM_CHECK(agent != NULL, return device); device = calloc(1, sizeof(stonith_device_t)); CRM_CHECK(device != NULL, {free(agent); return device;}); device->id = crm_element_value_copy(dev, XML_ATTR_ID); device->agent = agent; device->namespace = crm_element_value_copy(dev, "namespace"); device->params = xml2device_params(device->id, dev); value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST); if (value) { device->targets = stonith__parse_targets(value); } value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP); device->aliases = build_port_aliases(value, &(device->targets)); device->agent_metadata = get_agent_metadata(device->agent); read_action_metadata(device); value = g_hash_table_lookup(device->params, "nodeid"); if (!value) { device->include_nodeid = is_nodeid_required(device->agent_metadata); } value = crm_element_value(dev, "rsc_provides"); if (safe_str_eq(value, "unfencing")) { device->automatic_unfencing = TRUE; } if (is_action_required("on", device)) { crm_info("The fencing device '%s' requires unfencing", device->id); } if (device->on_target_actions) { crm_info("The fencing device '%s' requires actions (%s) to be executed on the target node", device->id, device->on_target_actions); } device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device); /* TODO: Hook up priority */ return device; } static const char * target_list_type(stonith_device_t * dev) { const char *check_type = NULL; check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK); if (check_type == NULL) { if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) { check_type = "static-list"; } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) { check_type = "static-list"; } else if(is_set(dev->flags, st_device_supports_list)){ check_type = "dynamic-list"; } else if(is_set(dev->flags, st_device_supports_status)){ check_type = "status"; } else { check_type = "none"; } } return check_type; } static void schedule_internal_command(const char *origin, stonith_device_t * device, const char *action, const char *victim, int timeout, void *internal_user_data, void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data)) { async_command_t *cmd = NULL; cmd = calloc(1, sizeof(async_command_t)); cmd->id = -1; cmd->default_timeout = timeout ? timeout : 60; cmd->timeout = cmd->default_timeout; cmd->action = strdup(action); cmd->victim = victim ? strdup(victim) : NULL; cmd->device = strdup(device->id); cmd->origin = strdup(origin); cmd->client = strdup(crm_system_name); cmd->client_name = strdup(crm_system_name); cmd->internal_user_data = internal_user_data; cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */ schedule_stonith_command(cmd, device); } gboolean string_in_list(GListPtr list, const char *item) { int lpc = 0; int max = g_list_length(list); for (lpc = 0; lpc < max; lpc++) { const char *value = g_list_nth_data(list, lpc); if (safe_str_eq(item, value)) { return TRUE; } else { crm_trace("%d: '%s' != '%s'", lpc, item, value); } } return FALSE; } static void status_search_cb(GPid pid, int rc, const char *output, gpointer user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL; gboolean can = FALSE; free_async_command(cmd); if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (rc == 1 /* unknown */ ) { crm_trace("Host %s is not known by %s", search->host, dev->id); } else if (rc == 0 /* active */ || rc == 2 /* inactive */ ) { crm_trace("Host %s is known by %s", search->host, dev->id); can = TRUE; } else { crm_notice("Unknown result when testing if %s can fence %s: rc=%d", dev->id, search->host, rc); } search_devices_record_result(search, dev->id, can); } static void dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL; gboolean can_fence = FALSE; free_async_command(cmd); /* Host/alias must be in the list output to be eligible to be fenced * * Will cause problems if down'd nodes aren't listed or (for virtual nodes) * if the guest is still listed despite being moved to another machine */ if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); /* If we successfully got the targets earlier, don't disable. */ if (rc != 0 && !dev->targets) { crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output); /* Fall back to status */ g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status")); g_list_free_full(dev->targets, free); dev->targets = NULL; } else if (!rc) { crm_info("Refreshing port list for %s", dev->id); g_list_free_full(dev->targets, free); dev->targets = stonith__parse_targets(output); dev->targets_age = time(NULL); } if (dev->targets) { const char *alias = g_hash_table_lookup(dev->aliases, search->host); if (!alias) { alias = search->host; } if (string_in_list(dev->targets, alias)) { can_fence = TRUE; } } search_devices_record_result(search, dev->id, can_fence); } /*! * \internal * \brief Returns true if any key in first is not in second or second has a different value for key */ static int device_params_diff(GHashTable *first, GHashTable *second) { char *key = NULL; char *value = NULL; GHashTableIter gIter; g_hash_table_iter_init(&gIter, first); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) { if(strstr(key, "CRM_meta") == key) { continue; } else if(strcmp(key, "crm_feature_set") == 0) { continue; } else { char *other_value = g_hash_table_lookup(second, key); if (!other_value || safe_str_neq(other_value, value)) { crm_trace("Different value for %s: %s != %s", key, other_value, value); return 1; } } } return 0; } /*! * \internal * \brief Checks to see if an identical device already exists in the device_list */ static stonith_device_t * device_has_duplicate(stonith_device_t * device) { stonith_device_t *dup = g_hash_table_lookup(device_list, device->id); if (!dup) { crm_trace("No match for %s", device->id); return NULL; } else if (safe_str_neq(dup->agent, device->agent)) { crm_trace("Different agent: %s != %s", dup->agent, device->agent); return NULL; } /* Use calculate_operation_digest() here? */ if (device_params_diff(device->params, dup->params) || device_params_diff(dup->params, device->params)) { return NULL; } crm_trace("Match"); return dup; } int stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib) { stonith_device_t *dup = NULL; stonith_device_t *device = build_device_from_xml(msg); CRM_CHECK(device != NULL, return -ENOMEM); dup = device_has_duplicate(device); if (dup) { crm_debug("Device '%s' already existed in device list (%d active devices)", device->id, g_hash_table_size(device_list)); free_device(device); device = dup; } else { stonith_device_t *old = g_hash_table_lookup(device_list, device->id); if (from_cib && old && old->api_registered) { /* If the cib is writing over an entry that is shared with a stonith client, * copy any pending ops that currently exist on the old entry to the new one. * Otherwise the pending ops will be reported as failures */ crm_info("Overwriting an existing entry for %s from the cib", device->id); device->pending_ops = old->pending_ops; device->api_registered = TRUE; old->pending_ops = NULL; if (device->pending_ops) { mainloop_set_trigger(device->work); } } g_hash_table_replace(device_list, device->id, device); crm_notice("Added '%s' to the device list (%d active devices)", device->id, g_hash_table_size(device_list)); } if (desc) { *desc = device->id; } if (from_cib) { device->cib_registered = TRUE; } else { device->api_registered = TRUE; } return pcmk_ok; } int stonith_device_remove(const char *id, gboolean from_cib) { stonith_device_t *device = g_hash_table_lookup(device_list, id); if (!device) { crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list)); return pcmk_ok; } if (from_cib) { device->cib_registered = FALSE; } else { device->verified = FALSE; device->api_registered = FALSE; } if (!device->cib_registered && !device->api_registered) { g_hash_table_remove(device_list, id); crm_info("Removed '%s' from the device list (%d active devices)", id, g_hash_table_size(device_list)); } else { crm_trace("Not removing '%s' from the device list (%d active devices) " "- still %s%s_registered", id, g_hash_table_size(device_list), device->cib_registered?"cib":"", device->api_registered?"api":""); } return pcmk_ok; } /*! * \internal * \brief Return the number of stonith levels registered for a node * * \param[in] tp Node's topology table entry * * \return Number of non-NULL levels in topology entry * \note This function is used only for log messages. */ static int count_active_levels(stonith_topology_t * tp) { int lpc = 0; int count = 0; for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) { if (tp->levels[lpc] != NULL) { count++; } } return count; } static void free_topology_entry(gpointer data) { stonith_topology_t *tp = data; int lpc = 0; for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) { if (tp->levels[lpc] != NULL) { g_list_free_full(tp->levels[lpc], free); } } free(tp->target); free(tp->target_value); free(tp->target_pattern); free(tp->target_attribute); free(tp); } void free_topology_list() { if (topology != NULL) { g_hash_table_destroy(topology); topology = NULL; } } void init_topology_list() { if (topology == NULL) { topology = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_topology_entry); } } char *stonith_level_key(xmlNode *level, int mode) { if(mode == -1) { mode = stonith_level_kind(level); } switch(mode) { case 0: return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET); case 1: return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN); case 2: { const char *name = crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE); const char *value = crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE); if(name && value) { return crm_strdup_printf("%s=%s", name, value); } } default: return crm_strdup_printf("Unknown-%d-%s", mode, ID(level)); } } int stonith_level_kind(xmlNode * level) { int mode = 0; const char *target = crm_element_value(level, XML_ATTR_STONITH_TARGET); if(target == NULL) { mode++; target = crm_element_value(level, XML_ATTR_STONITH_TARGET_PATTERN); } if(stand_alone == FALSE && target == NULL) { mode++; if(crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE) == NULL) { mode++; } else if(crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE) == NULL) { mode++; } } return mode; } static stonith_key_value_t * parse_device_list(const char *devices) { int lpc = 0; int max = 0; int last = 0; stonith_key_value_t *output = NULL; if (devices == NULL) { return output; } max = strlen(devices); for (lpc = 0; lpc <= max; lpc++) { if (devices[lpc] == ',' || devices[lpc] == 0) { char *line = strndup(devices + last, lpc - last); output = stonith_key_value_add(output, NULL, line); free(line); last = lpc + 1; } } return output; } /*! * \internal * \brief Register a STONITH level for a target * * Given an XML request specifying the target name, level index, and device IDs * for the level, this will create an entry for the target in the global topology * table if one does not already exist, then append the specified device IDs to * the entry's device list for the specified level. * * \param[in] msg XML request for STONITH level registration * \param[out] desc If not NULL, will be set to string representation ("TARGET[LEVEL]") * * \return pcmk_ok on success, -EINVAL if XML does not specify valid level index */ int stonith_level_register(xmlNode *msg, char **desc) { int id = 0; xmlNode *level; int mode; char *target; stonith_topology_t *tp; stonith_key_value_t *dIter = NULL; stonith_key_value_t *devices = NULL; /* Allow the XML here to point to the level tag directly, or wrapped in * another tag. If directly, don't search by xpath, because it might give * multiple hits (e.g. if the XML is the CIB). */ if (safe_str_eq(TYPE(msg), XML_TAG_FENCING_LEVEL)) { level = msg; } else { level = get_xpath_object("//" XML_TAG_FENCING_LEVEL, msg, LOG_ERR); } CRM_CHECK(level != NULL, return -EINVAL); mode = stonith_level_kind(level); target = stonith_level_key(level, mode); crm_element_value_int(level, XML_ATTR_STONITH_INDEX, &id); if (desc) { *desc = crm_strdup_printf("%s[%d]", target, id); } /* Sanity-check arguments */ if (mode >= 3 || (id <= 0) || (id >= ST_LEVEL_MAX)) { crm_trace("Could not add %s[%d] (%d) to the topology (%d active entries)", target, id, mode, g_hash_table_size(topology)); free(target); crm_log_xml_err(level, "Bad topology"); return -EINVAL; } /* Find or create topology table entry */ tp = g_hash_table_lookup(topology, target); if (tp == NULL) { tp = calloc(1, sizeof(stonith_topology_t)); tp->kind = mode; tp->target = target; tp->target_value = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_VALUE); tp->target_pattern = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN); tp->target_attribute = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE); g_hash_table_replace(topology, tp->target, tp); crm_trace("Added %s (%d) to the topology (%d active entries)", target, mode, g_hash_table_size(topology)); } else { free(target); } if (tp->levels[id] != NULL) { crm_info("Adding to the existing %s[%d] topology entry", tp->target, id); } devices = parse_device_list(crm_element_value(level, XML_ATTR_STONITH_DEVICES)); for (dIter = devices; dIter; dIter = dIter->next) { const char *device = dIter->value; crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id); tp->levels[id] = g_list_append(tp->levels[id], strdup(device)); } stonith_key_value_freeall(devices, 1, 1); crm_info("Target %s has %d active fencing levels", tp->target, count_active_levels(tp)); return pcmk_ok; } int stonith_level_remove(xmlNode *msg, char **desc) { int id = 0; stonith_topology_t *tp; char *target; /* Unlike additions, removal requests should always have one level tag */ xmlNode *level = get_xpath_object("//" XML_TAG_FENCING_LEVEL, msg, LOG_ERR); CRM_CHECK(level != NULL, return -EINVAL); target = stonith_level_key(level, -1); crm_element_value_int(level, XML_ATTR_STONITH_INDEX, &id); if (desc) { *desc = crm_strdup_printf("%s[%d]", target, id); } /* Sanity-check arguments */ if (id >= ST_LEVEL_MAX) { free(target); return -EINVAL; } tp = g_hash_table_lookup(topology, target); if (tp == NULL) { crm_info("Topology for %s not found (%d active entries)", target, g_hash_table_size(topology)); } else if (id == 0 && g_hash_table_remove(topology, target)) { crm_info("Removed all %s related entries from the topology (%d active entries)", target, g_hash_table_size(topology)); } else if (id > 0 && tp->levels[id] != NULL) { g_list_free_full(tp->levels[id], free); tp->levels[id] = NULL; crm_info("Removed level '%d' from topology for %s (%d active levels remaining)", id, target, count_active_levels(tp)); } free(target); return pcmk_ok; } /*! * \internal * \brief Schedule an (asynchronous) action directly on a stonith device * * Handle a STONITH_OP_EXEC API message by scheduling a requested agent action * directly on a specified device. Only list, monitor, and status actions are * expected to use this call, though it should work with any agent command. * * \param[in] msg API message XML with desired action * \param[out] output Unused * * \return -EINPROGRESS on success, -errno otherwise * \note If the action is monitor, the device must be registered via the API * (CIB registration is not sufficient), because monitor should not be * possible unless the device is "started" (API registered). */ static int stonith_device_action(xmlNode * msg, char **output) { xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR); xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR); const char *id = crm_element_value(dev, F_STONITH_DEVICE); const char *action = crm_element_value(op, F_STONITH_ACTION); async_command_t *cmd = NULL; stonith_device_t *device = NULL; if ((id == NULL) || (action == NULL)) { crm_info("Malformed API action request: device %s, action %s", (id? id : "not specified"), (action? action : "not specified")); return -EPROTO; } device = g_hash_table_lookup(device_list, id); if ((device == NULL) || (!device->api_registered && !strcmp(action, "monitor"))) { // Monitors may run only on "started" (API-registered) devices crm_info("Ignoring API '%s' action request because device %s not found", action, id); return -ENODEV; } cmd = create_async_command(msg); if (cmd == NULL) { return -EPROTO; } schedule_stonith_command(cmd, device); return -EINPROGRESS; } static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence) { search->replies_received++; if (can_fence && device) { search->capable = g_list_append(search->capable, strdup(device)); } if (search->replies_needed == search->replies_received) { crm_debug("Finished Search. %d devices can perform action (%s) on node %s", g_list_length(search->capable), search->action ? search->action : "", search->host ? search->host : ""); search->callback(search->capable, search->user_data); free(search->host); free(search->action); free(search); } } /*! * \internal * \brief Check whether the local host is allowed to execute a fencing action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Hostname of fence target * \param[in] allow_suicide Whether self-fencing is allowed for this operation * * \return TRUE if local host is allowed to execute action, FALSE otherwise */ static gboolean localhost_is_eligible(const stonith_device_t *device, const char *action, const char *target, gboolean allow_suicide) { gboolean localhost_is_target = safe_str_eq(target, stonith_our_uname); if (device && action && device->on_target_actions && strstr(device->on_target_actions, action)) { if (!localhost_is_target) { crm_trace("'%s' operation with %s can only be executed for localhost not %s", action, device->id, target); return FALSE; } } else if (localhost_is_target && !allow_suicide) { crm_trace("'%s' operation does not support self-fencing", action); return FALSE; } return TRUE; } static void can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search) { gboolean can = FALSE; const char *check_type = NULL; const char *host = search->host; const char *alias = NULL; CRM_LOG_ASSERT(dev != NULL); if (dev == NULL) { goto search_report_results; } else if (host == NULL) { can = TRUE; goto search_report_results; } /* Short-circuit query if this host is not allowed to perform the action */ if (safe_str_eq(search->action, "reboot")) { /* A "reboot" *might* get remapped to "off" then "on", so short-circuit * only if all three are disallowed. If only one or two are disallowed, * we'll report that with the results. We never allow suicide for * remapped "on" operations because the host is off at that point. */ if (!localhost_is_eligible(dev, "reboot", host, search->allow_suicide) && !localhost_is_eligible(dev, "off", host, search->allow_suicide) && !localhost_is_eligible(dev, "on", host, FALSE)) { goto search_report_results; } } else if (!localhost_is_eligible(dev, search->action, host, search->allow_suicide)) { goto search_report_results; } alias = g_hash_table_lookup(dev->aliases, host); if (alias == NULL) { alias = host; } check_type = target_list_type(dev); if (safe_str_eq(check_type, "none")) { can = TRUE; } else if (safe_str_eq(check_type, "static-list")) { /* Presence in the hostmap is sufficient * Only use if all hosts on which the device can be active can always fence all listed hosts */ if (string_in_list(dev->targets, host)) { can = TRUE; } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP) && g_hash_table_lookup(dev->aliases, host)) { can = TRUE; } } else if (safe_str_eq(check_type, "dynamic-list")) { time_t now = time(NULL); if (dev->targets == NULL || dev->targets_age + 60 < now) { crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev->id, search->host, search->action); schedule_internal_command(__FUNCTION__, dev, "list", NULL, search->per_device_timeout, search, dynamic_list_search_cb); /* we'll respond to this search request async in the cb */ return; } if (string_in_list(dev->targets, alias)) { can = TRUE; } } else if (safe_str_eq(check_type, "status")) { crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev->id, search->host, search->action); schedule_internal_command(__FUNCTION__, dev, "status", search->host, search->per_device_timeout, search, status_search_cb); /* we'll respond to this search request async in the cb */ return; } else { crm_err("Invalid value for " STONITH_ATTR_HOSTCHECK ": %s", check_type); check_type = "Invalid " STONITH_ATTR_HOSTCHECK; } if (safe_str_eq(host, alias)) { crm_notice("%s is%s eligible to fence (%s) %s: %s", dev->id, (can? "" : " not"), search->action, host, check_type); } else { crm_notice("%s is%s eligible to fence (%s) %s (aka. '%s'): %s", dev->id, (can? "" : " not"), search->action, host, alias, check_type); } search_report_results: search_devices_record_result(search, dev ? dev->id : NULL, can); } static void search_devices(gpointer key, gpointer value, gpointer user_data) { stonith_device_t *dev = value; struct device_search_s *search = user_data; can_fence_host_with_device(dev, search); } #define DEFAULT_QUERY_TIMEOUT 20 static void get_capable_devices(const char *host, const char *action, int timeout, bool suicide, void *user_data, void (*callback) (GList * devices, void *user_data)) { struct device_search_s *search; int per_device_timeout = DEFAULT_QUERY_TIMEOUT; int devices_needing_async_query = 0; char *key = NULL; const char *check_type = NULL; GHashTableIter gIter; stonith_device_t *device = NULL; if (!g_hash_table_size(device_list)) { callback(NULL, user_data); return; } search = calloc(1, sizeof(struct device_search_s)); if (!search) { callback(NULL, user_data); return; } g_hash_table_iter_init(&gIter, device_list); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&device)) { check_type = target_list_type(device); if (safe_str_eq(check_type, "status") || safe_str_eq(check_type, "dynamic-list")) { devices_needing_async_query++; } } /* If we have devices that require an async event in order to know what * nodes they can fence, we have to give the events a timeout. The total * query timeout is divided among those events. */ if (devices_needing_async_query) { per_device_timeout = timeout / devices_needing_async_query; if (!per_device_timeout) { crm_err("STONITH timeout %ds is too low; using %ds, but consider raising to at least %ds", timeout, DEFAULT_QUERY_TIMEOUT, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query); per_device_timeout = DEFAULT_QUERY_TIMEOUT; } else if (per_device_timeout < DEFAULT_QUERY_TIMEOUT) { crm_notice("STONITH timeout %ds is low for the current configuration;" " consider raising to at least %ds", timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query); } } search->host = host ? strdup(host) : NULL; search->action = action ? strdup(action) : NULL; search->per_device_timeout = per_device_timeout; /* We are guaranteed this many replies. Even if a device gets * unregistered some how during the async search, we will get * the correct number of replies. */ search->replies_needed = g_hash_table_size(device_list); search->allow_suicide = suicide; search->callback = callback; search->user_data = user_data; /* kick off the search */ crm_debug("Searching through %d devices to see what is capable of action (%s) for target %s", search->replies_needed, search->action ? search->action : "", search->host ? search->host : ""); g_hash_table_foreach(device_list, search_devices, search); } struct st_query_data { xmlNode *reply; char *remote_peer; char *client_id; char *target; char *action; int call_options; }; /*! * \internal * \brief Add action-specific attributes to query reply XML * * \param[in,out] xml XML to add attributes to * \param[in] action Fence action * \param[in] device Fence device */ static void add_action_specific_attributes(xmlNode *xml, const char *action, stonith_device_t *device) { int action_specific_timeout; int delay_max; int delay_base; CRM_CHECK(xml && action && device, return); if (is_action_required(action, device)) { crm_trace("Action '%s' is required on %s", action, device->id); crm_xml_add_int(xml, F_STONITH_DEVICE_REQUIRED, 1); } action_specific_timeout = get_action_timeout(device, action, 0); if (action_specific_timeout) { crm_trace("Action '%s' has timeout %dms on %s", action, action_specific_timeout, device->id); crm_xml_add_int(xml, F_STONITH_ACTION_TIMEOUT, action_specific_timeout); } delay_max = get_action_delay_max(device, action); if (delay_max > 0) { crm_trace("Action '%s' has maximum random delay %dms on %s", action, delay_max, device->id); crm_xml_add_int(xml, F_STONITH_DELAY_MAX, delay_max / 1000); } delay_base = get_action_delay_base(device, action); if (delay_base > 0) { crm_xml_add_int(xml, F_STONITH_DELAY_BASE, delay_base / 1000); } if ((delay_max > 0) && (delay_base == 0)) { crm_trace("Action '%s' has maximum random delay %dms on %s", action, delay_max, device->id); } else if ((delay_max == 0) && (delay_base > 0)) { crm_trace("Action '%s' has a static delay of %dms on %s", action, delay_base, device->id); } else if ((delay_max > 0) && (delay_base > 0)) { crm_trace("Action '%s' has a minimum delay of %dms and a randomly chosen " "maximum delay of %dms on %s", action, delay_base, delay_max, device->id); } } /*! * \internal * \brief Add "disallowed" attribute to query reply XML if appropriate * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_suicide Whether self-fencing is allowed */ static void add_disallowed(xmlNode *xml, const char *action, stonith_device_t *device, const char *target, gboolean allow_suicide) { if (!localhost_is_eligible(device, action, target, allow_suicide)) { crm_trace("Action '%s' on %s is disallowed for local host", action, device->id); crm_xml_add(xml, F_STONITH_ACTION_DISALLOWED, XML_BOOLEAN_TRUE); } } /*! * \internal * \brief Add child element with action-specific values to query reply XML * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_suicide Whether self-fencing is allowed */ static void add_action_reply(xmlNode *xml, const char *action, stonith_device_t *device, const char *target, gboolean allow_suicide) { xmlNode *child = create_xml_node(xml, F_STONITH_ACTION); crm_xml_add(child, XML_ATTR_ID, action); add_action_specific_attributes(child, action, device); add_disallowed(child, action, device, target, allow_suicide); } static void stonith_query_capable_device_cb(GList * devices, void *user_data) { struct st_query_data *query = user_data; int available_devices = 0; xmlNode *dev = NULL; xmlNode *list = NULL; GListPtr lpc = NULL; /* Pack the results into XML */ list = create_xml_node(NULL, __FUNCTION__); crm_xml_add(list, F_STONITH_TARGET, query->target); for (lpc = devices; lpc != NULL; lpc = lpc->next) { stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data); const char *action = query->action; if (!device) { /* It is possible the device got unregistered while * determining who can fence the target */ continue; } available_devices++; dev = create_xml_node(list, F_STONITH_DEVICE); crm_xml_add(dev, XML_ATTR_ID, device->id); crm_xml_add(dev, "namespace", device->namespace); crm_xml_add(dev, "agent", device->agent); crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified); /* If the originating fencer wants to reboot the node, and we have a * capable device that doesn't support "reboot", remap to "off" instead. */ if (is_not_set(device->flags, st_device_supports_reboot) && safe_str_eq(query->action, "reboot")) { crm_trace("%s doesn't support reboot, using values for off instead", device->id); action = "off"; } /* Add action-specific values if available */ add_action_specific_attributes(dev, action, device); if (safe_str_eq(query->action, "reboot")) { /* A "reboot" *might* get remapped to "off" then "on", so after * sending the "reboot"-specific values in the main element, we add * sub-elements for "off" and "on" values. * * We short-circuited earlier if "reboot", "off" and "on" are all * disallowed for the local host. However if only one or two are * disallowed, we send back the results and mark which ones are * disallowed. If "reboot" is disallowed, this might cause problems * with older fencer versions, which won't check for it. Older * versions will ignore "off" and "on", so they are not a problem. */ add_disallowed(dev, action, device, query->target, is_set(query->call_options, st_opt_allow_suicide)); add_action_reply(dev, "off", device, query->target, is_set(query->call_options, st_opt_allow_suicide)); add_action_reply(dev, "on", device, query->target, FALSE); } /* A query without a target wants device parameters */ if (query->target == NULL) { xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS); g_hash_table_foreach(device->params, hash2field, attrs); } } crm_xml_add_int(list, F_STONITH_AVAILABLE_DEVICES, available_devices); if (query->target) { crm_debug("Found %d matching devices for '%s'", available_devices, query->target); } else { crm_debug("%d devices installed", available_devices); } if (list != NULL) { crm_log_xml_trace(list, "Add query results"); add_message_xml(query->reply, F_STONITH_CALLDATA, list); } stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id); free_xml(query->reply); free(query->remote_peer); free(query->client_id); free(query->target); free(query->action); free(query); free_xml(list); g_list_free_full(devices, free); } static void stonith_query(xmlNode * msg, const char *remote_peer, const char *client_id, int call_options) { struct st_query_data *query = NULL; const char *action = NULL; const char *target = NULL; int timeout = 0; xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_NEVER); crm_element_value_int(msg, F_STONITH_TIMEOUT, &timeout); if (dev) { const char *device = crm_element_value(dev, F_STONITH_DEVICE); target = crm_element_value(dev, F_STONITH_TARGET); action = crm_element_value(dev, F_STONITH_ACTION); if (device && safe_str_eq(device, "manual_ack")) { /* No query or reply necessary */ return; } } crm_log_xml_debug(msg, "Query"); query = calloc(1, sizeof(struct st_query_data)); query->reply = stonith_construct_reply(msg, NULL, NULL, pcmk_ok); query->remote_peer = remote_peer ? strdup(remote_peer) : NULL; query->client_id = client_id ? strdup(client_id) : NULL; query->target = target ? strdup(target) : NULL; query->action = action ? strdup(action) : NULL; query->call_options = call_options; get_capable_devices(target, action, timeout, is_set(call_options, st_opt_allow_suicide), query, stonith_query_capable_device_cb); } #define ST_LOG_OUTPUT_MAX 512 static void log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output, gboolean op_merged) { if (rc == 0) { next = NULL; } if (cmd->victim != NULL) { do_crm_log(rc == 0 ? LOG_NOTICE : LOG_ERR, "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned%s: %d (%s)%s%s", cmd->action, pid, cmd->id, cmd->client_name, cmd->victim, cmd->device, (op_merged? " (merged)" : ""), rc, pcmk_strerror(rc), (next? ", retrying with " : ""), (next ? next : "")); } else { do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, "Operation '%s' [%d] for device '%s' returned%s: %d (%s)%s%s", cmd->action, pid, cmd->device, (op_merged? " (merged)" : ""), rc, pcmk_strerror(rc), (next? ", retrying with " : ""), (next ? next : "")); } if (output) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = crm_strdup_printf("%s:%d", cmd->device, pid); crm_log_output(rc == 0 ? LOG_DEBUG : LOG_WARNING, prefix, output); free(prefix); } } static void stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid, int options) { xmlNode *reply = NULL; gboolean bcast = FALSE; reply = stonith_construct_async_reply(cmd, output, NULL, rc); if (safe_str_eq(cmd->action, "metadata")) { /* Too verbose to log */ crm_trace("Metadata query for %s", cmd->device); output = NULL; } else if (crm_str_eq(cmd->action, "monitor", TRUE) || crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) { crm_trace("Never broadcast '%s' replies", cmd->action); } else if (!stand_alone && safe_str_eq(cmd->origin, cmd->victim) && safe_str_neq(cmd->action, "on")) { crm_trace("Broadcast '%s' reply for %s", cmd->action, cmd->victim); crm_xml_add(reply, F_SUBTYPE, "broadcast"); bcast = TRUE; } log_operation(cmd, rc, pid, NULL, output, (options & st_reply_opt_merged ? TRUE : FALSE)); crm_log_xml_trace(reply, "Reply"); if (options & st_reply_opt_merged) { crm_xml_add(reply, F_STONITH_MERGED, "true"); } if (bcast) { crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY); send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE); } else if (cmd->origin) { crm_trace("Directed reply to %s", cmd->origin); send_cluster_message(crm_get_peer(0, cmd->origin), crm_msg_stonith_ng, reply, FALSE); } else { crm_trace("Directed local %ssync reply to %s", (cmd->options & st_opt_sync_call) ? "" : "a-", cmd->client_name); do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE); } if (stand_alone) { /* Do notification with a clean data object */ xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE); crm_xml_add_int(notify_data, F_STONITH_RC, rc); crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim); crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op); crm_xml_add(notify_data, F_STONITH_DELEGATE, "localhost"); crm_xml_add(notify_data, F_STONITH_DEVICE, cmd->device); crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id); crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client); do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data); do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL); } free_xml(reply); } static void cancel_stonith_command(async_command_t * cmd) { stonith_device_t *device; CRM_CHECK(cmd != NULL, return); if (!cmd->device) { return; } device = g_hash_table_lookup(device_list, cmd->device); if (device) { crm_trace("Cancel scheduled '%s' action on %s", cmd->action, device->id); device->pending_ops = g_list_remove(device->pending_ops, cmd); } } static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data) { stonith_device_t *device = NULL; stonith_device_t *next_device = NULL; async_command_t *cmd = user_data; GListPtr gIter = NULL; GListPtr gIterNext = NULL; CRM_CHECK(cmd != NULL, return); cmd->active_on = NULL; /* The device is ready to do something else now */ device = g_hash_table_lookup(device_list, cmd->device); if (device) { if (!device->verified && (rc == pcmk_ok) && (safe_str_eq(cmd->action, "list") || safe_str_eq(cmd->action, "monitor") || safe_str_eq(cmd->action, "status"))) { device->verified = TRUE; } mainloop_set_trigger(device->work); } crm_debug("Operation '%s' on '%s' completed with rc=%d (%d remaining)", cmd->action, cmd->device, rc, g_list_length(cmd->device_next)); if (rc == 0) { GListPtr iter; /* see if there are any required devices left to execute for this op */ for (iter = cmd->device_next; iter != NULL; iter = iter->next) { next_device = g_hash_table_lookup(device_list, iter->data); if (next_device != NULL && is_action_required(cmd->action, next_device)) { cmd->device_next = iter->next; break; } next_device = NULL; } } else if (rc != 0 && cmd->device_next && (is_action_required(cmd->action, device) == FALSE)) { /* if this device didn't work out, see if there are any others we can try. * if the failed device was 'required', we can't pick another device. */ next_device = g_hash_table_lookup(device_list, cmd->device_next->data); cmd->device_next = cmd->device_next->next; } /* this operation requires more fencing, hooray! */ if (next_device) { log_operation(cmd, rc, pid, next_device->id, output, FALSE); schedule_stonith_command(cmd, next_device); /* Prevent cmd from being freed */ cmd = NULL; goto done; } stonith_send_async_reply(cmd, output, rc, pid, st_reply_opt_none); if (rc != 0) { goto done; } /* Check to see if any operations are scheduled to do the exact * same thing that just completed. If so, rather than * performing the same fencing operation twice, return the result * of this operation for all pending commands it matches. */ for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd_other = gIter->data; gIterNext = gIter->next; if (cmd == cmd_other) { continue; } /* A pending scheduled command matches the command that just finished if. * 1. The client connections are different. * 2. The node victim is the same. * 3. The fencing action is the same. * 4. The device scheduled to execute the action is the same. */ if (safe_str_eq(cmd->client, cmd_other->client) || safe_str_neq(cmd->victim, cmd_other->victim) || safe_str_neq(cmd->action, cmd_other->action) || safe_str_neq(cmd->device, cmd_other->device)) { continue; } /* Duplicate merging will do the right thing for either type of remapped * reboot. If the executing fencer remapped an unsupported reboot to * off, then cmd->action will be reboot and will be merged with any * other reboot requests. If the originating fencer remapped a * topology reboot to off then on, we will get here once with * cmd->action "off" and once with "on", and they will be merged * separately with similar requests. */ crm_notice ("Merging stonith action '%s' targeting %s originating from client %s with identical stonith request from client %s", cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name); cmd_list = g_list_remove_link(cmd_list, gIter); stonith_send_async_reply(cmd_other, output, rc, pid, st_reply_opt_merged); cancel_stonith_command(cmd_other); free_async_command(cmd_other); g_list_free_1(gIter); } done: free_async_command(cmd); } static gint sort_device_priority(gconstpointer a, gconstpointer b) { const stonith_device_t *dev_a = a; const stonith_device_t *dev_b = b; if (dev_a->priority > dev_b->priority) { return -1; } else if (dev_a->priority < dev_b->priority) { return 1; } return 0; } static void stonith_fence_get_devices_cb(GList * devices, void *user_data) { async_command_t *cmd = user_data; stonith_device_t *device = NULL; crm_info("Found %d matching devices for '%s'", g_list_length(devices), cmd->victim); - if (g_list_length(devices) > 0) { + if (devices != NULL) { /* Order based on priority */ devices = g_list_sort(devices, sort_device_priority); device = g_hash_table_lookup(device_list, devices->data); if (device) { cmd->device_list = devices; cmd->device_next = devices->next; devices = NULL; /* list owned by cmd now */ } } /* we have a device, schedule it for fencing. */ if (device) { schedule_stonith_command(cmd, device); /* in progress */ return; } /* no device found! */ stonith_send_async_reply(cmd, NULL, -ENODEV, 0, st_reply_opt_none); free_async_command(cmd); g_list_free_full(devices, free); } static int stonith_fence(xmlNode * msg) { const char *device_id = NULL; stonith_device_t *device = NULL; async_command_t *cmd = create_async_command(msg); xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR); if (cmd == NULL) { return -EPROTO; } device_id = crm_element_value(dev, F_STONITH_DEVICE); if (device_id) { device = g_hash_table_lookup(device_list, device_id); if (device == NULL) { crm_err("Requested device '%s' is not available", device_id); return -ENODEV; } schedule_stonith_command(cmd, device); } else { const char *host = crm_element_value(dev, F_STONITH_TARGET); if (cmd->options & st_opt_cs_nodeid) { int nodeid = crm_atoi(host, NULL); crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY); if (node) { host = node->uname; } } /* If we get to here, then self-fencing is implicitly allowed */ get_capable_devices(host, cmd->action, cmd->default_timeout, TRUE, cmd, stonith_fence_get_devices_cb); } return -EINPROGRESS; } xmlNode * stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc) { int lpc = 0; xmlNode *reply = NULL; const char *name = NULL; const char *value = NULL; const char *names[] = { F_STONITH_OPERATION, F_STONITH_CALLID, F_STONITH_CLIENTID, F_STONITH_CLIENTNAME, F_STONITH_REMOTE_OP_ID, F_STONITH_CALLOPTS }; crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); crm_xml_add(reply, F_TYPE, T_STONITH_NG); crm_xml_add(reply, "st_output", output); crm_xml_add_int(reply, F_STONITH_RC, rc); CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply); for (lpc = 0; lpc < DIMOF(names); lpc++) { name = names[lpc]; value = crm_element_value(request, name); crm_xml_add(reply, name, value); } if (data != NULL) { crm_trace("Attaching reply output"); add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } static xmlNode * stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc) { xmlNode *reply = NULL; crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); crm_xml_add(reply, F_TYPE, T_STONITH_NG); crm_xml_add(reply, F_STONITH_OPERATION, cmd->op); crm_xml_add(reply, F_STONITH_DEVICE, cmd->device); crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id); crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client); crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name); crm_xml_add(reply, F_STONITH_TARGET, cmd->victim); crm_xml_add(reply, F_STONITH_ACTION, cmd->op); crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin); crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id); crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options); crm_xml_add_int(reply, F_STONITH_RC, rc); crm_xml_add(reply, "st_output", output); if (data != NULL) { crm_info("Attaching reply output"); add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } bool fencing_peer_active(crm_node_t *peer) { if (peer == NULL) { return FALSE; } else if (peer->uname == NULL) { return FALSE; } else if (is_set(peer->processes, crm_get_cluster_proc())) { return TRUE; } return FALSE; } /*! * \internal * \brief Determine if we need to use an alternate node to * fence the target. If so return that node's uname * * \retval NULL, no alternate host * \retval uname, uname of alternate host to use */ static const char * check_alternate_host(const char *target) { const char *alternate_host = NULL; crm_trace("Checking if we (%s) can fence %s", stonith_our_uname, target); if (find_topology_for_host(target) && safe_str_eq(target, stonith_our_uname)) { GHashTableIter gIter; crm_node_t *entry = NULL; g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target); if (fencing_peer_active(entry) && safe_str_neq(entry->uname, target)) { alternate_host = entry->uname; break; } } if (alternate_host == NULL) { crm_err("No alternate host available to handle complex self fencing request"); g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { crm_notice("Peer[%d] %s", entry->id, entry->uname); } } } return alternate_host; } static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer, const char *client_id) { if (remote_peer) { send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, reply, FALSE); } else { do_local_reply(reply, client_id, is_set(call_options, st_opt_sync_call), remote_peer != NULL); } } static int handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request, const char *remote_peer) { int call_options = 0; int rc = -EOPNOTSUPP; xmlNode *data = NULL; xmlNode *reply = NULL; char *output = NULL; const char *op = crm_element_value(request, F_STONITH_OPERATION); const char *client_id = crm_element_value(request, F_STONITH_CLIENTID); crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { xmlNode *reply = create_xml_node(NULL, "reply"); CRM_ASSERT(client); crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(reply, F_STONITH_CLIENTID, client->id); crm_ipcs_send(client, id, reply, flags); client->request_id = 0; free_xml(reply); return 0; } else if (crm_str_eq(op, STONITH_OP_EXEC, TRUE)) { rc = stonith_device_action(request, &output); } else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) { const char *call_id = crm_element_value(request, F_STONITH_CALLID); const char *client_id = crm_element_value(request, F_STONITH_CLIENTID); int op_timeout = 0; crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); return 0; } else if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) { if (remote_peer) { create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */ } stonith_query(request, remote_peer, client_id, call_options); return 0; } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) { const char *flag_name = NULL; CRM_ASSERT(client); flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE); if (flag_name) { crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id); client->options |= get_stonith_flag(flag_name); } flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE); if (flag_name) { crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id); client->options |= get_stonith_flag(flag_name); } if (flags & crm_ipc_client_response) { crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__); } return 0; } else if (crm_str_eq(op, STONITH_OP_RELAY, TRUE)) { xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); crm_notice("Peer %s has received a forwarded fencing request from %s to fence (%s) peer %s", stonith_our_uname, client ? client->name : remote_peer, crm_element_value(dev, F_STONITH_ACTION), crm_element_value(dev, F_STONITH_TARGET)); if (initiate_remote_stonith_op(NULL, request, FALSE) != NULL) { rc = -EINPROGRESS; } } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { if (remote_peer || stand_alone) { rc = stonith_fence(request); } else if (call_options & st_opt_manual_ack) { remote_fencing_op_t *rop = NULL; xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); const char *target = crm_element_value(dev, F_STONITH_TARGET); crm_notice("Received manual confirmation that %s is fenced", target); rop = initiate_remote_stonith_op(client, request, TRUE); rc = stonith_manual_ack(request, rop); } else { const char *alternate_host = NULL; xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); const char *target = crm_element_value(dev, F_STONITH_TARGET); const char *action = crm_element_value(dev, F_STONITH_ACTION); const char *device = crm_element_value(dev, F_STONITH_DEVICE); if (client) { int tolerance = 0; crm_notice("Client %s.%.8s wants to fence (%s) '%s' with device '%s'", client->name, client->id, action, target, device ? device : "(any)"); crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance); if (stonith_check_fence_tolerance(tolerance, target, action)) { rc = 0; goto done; } } else { crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'", remote_peer, action, target, device ? device : "(any)"); } alternate_host = check_alternate_host(target); if (alternate_host && client) { const char *client_id = NULL; crm_notice("Forwarding complex self fencing request to peer %s", alternate_host); if (client->id) { client_id = client->id; } else { client_id = crm_element_value(request, F_STONITH_CLIENTID); } /* Create a record of it, otherwise call_id will be 0 if we need to notify of failures */ create_remote_stonith_op(client_id, request, FALSE); crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY); crm_xml_add(request, F_STONITH_CLIENTID, client->id); send_cluster_message(crm_get_peer(0, alternate_host), crm_msg_stonith_ng, request, FALSE); rc = -EINPROGRESS; } else if (initiate_remote_stonith_op(client, request, FALSE) != NULL) { rc = -EINPROGRESS; } } } else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) { rc = stonith_fence_history(request, &data, remote_peer, call_options); if (call_options & st_opt_discard_reply) { /* we don't expect answers to the broadcast * we might have sent out */ free_xml(data); return pcmk_ok; } } else if (crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) { const char *device_id = NULL; rc = stonith_device_register(request, &device_id, FALSE); do_stonith_notify_device(call_options, op, rc, device_id); } else if (crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) { xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request, LOG_ERR); const char *device_id = crm_element_value(dev, XML_ATTR_ID); rc = stonith_device_remove(device_id, FALSE); do_stonith_notify_device(call_options, op, rc, device_id); } else if (crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) { char *device_id = NULL; rc = stonith_level_register(request, &device_id); do_stonith_notify_level(call_options, op, rc, device_id); free(device_id); } else if (crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) { char *device_id = NULL; rc = stonith_level_remove(request, &device_id); do_stonith_notify_level(call_options, op, rc, device_id); } else if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) { int node_id = 0; const char *name = NULL; crm_element_value_int(request, XML_ATTR_ID, &node_id); name = crm_element_value(request, XML_ATTR_UNAME); reap_crm_member(node_id, name); return pcmk_ok; } else { crm_err("Unknown %s from %s", op, client ? client->name : remote_peer); crm_log_xml_warn(request, "UnknownOp"); } done: /* Always reply unless the request is in process still. * If in progress, a reply will happen async after the request * processing is finished */ if (rc != -EINPROGRESS) { crm_trace("Reply handling: %p %u %u %d %d %s", client, client?client->request_id:0, id, is_set(call_options, st_opt_sync_call), call_options, crm_element_value(request, F_STONITH_CALLOPTS)); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } reply = stonith_construct_reply(request, output, data, rc); stonith_send_reply(reply, call_options, remote_peer, client_id); } free(output); free_xml(data); free_xml(reply); return rc; } static void handle_reply(crm_client_t * client, xmlNode * request, const char *remote_peer) { const char *op = crm_element_value(request, F_STONITH_OPERATION); if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) { process_remote_stonith_query(request); } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) { process_remote_stonith_exec(request); } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { /* Reply to a complex fencing op */ process_remote_stonith_exec(request); } else { crm_err("Unknown %s reply from %s", op, client ? client->name : remote_peer); crm_log_xml_warn(request, "UnknownOp"); } } void stonith_command(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request, const char *remote_peer) { int call_options = 0; int rc = 0; gboolean is_reply = FALSE; /* Copy op for reporting. The original might get freed by handle_reply() * before we use it in crm_debug(): * handle_reply() * |- process_remote_stonith_exec() * |-- remote_op_done() * |--- handle_local_reply_and_notify() * |---- crm_xml_add(...F_STONITH_OPERATION...) * |--- free_xml(op->request) */ char *op = crm_element_value_copy(request, F_STONITH_OPERATION); if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_NEVER)) { is_reply = TRUE; } crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); crm_debug("Processing %s%s %u from %s (%16x)", op, is_reply ? " reply" : "", id, client ? client->name : remote_peer, call_options); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } if (is_reply) { handle_reply(client, request, remote_peer); } else { rc = handle_request(client, id, flags, request, remote_peer); } crm_debug("Processed %s%s from %s: %s (%d)", op, is_reply ? " reply" : "", client ? client->name : remote_peer, rc > 0 ? "" : pcmk_strerror(rc), rc); free(op); } diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c index dde8b6950c..89bd95d222 100644 --- a/lib/pacemaker/pcmk_sched_allocate.c +++ b/lib/pacemaker/pcmk_sched_allocate.c @@ -1,2969 +1,2969 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_allocate); void set_alloc_actions(pe_working_set_t * data_set); extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set); extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); static void apply_remote_node_ordering(pe_working_set_t *data_set); static enum remote_connection_state get_remote_node_state(pe_node_t *node); enum remote_connection_state { remote_state_unknown = 0, remote_state_alive = 1, remote_state_resting = 2, remote_state_failed = 3, remote_state_stopped = 4 }; static const char * state2text(enum remote_connection_state state) { switch (state) { case remote_state_unknown: return "unknown"; case remote_state_alive: return "alive"; case remote_state_resting: return "resting"; case remote_state_failed: return "failed"; case remote_state_stopped: return "stopped"; } return "impossible"; } resource_alloc_functions_t resource_class_alloc_functions[] = { { native_merge_weights, native_color, native_create_actions, native_create_probe, native_internal_constraints, native_rsc_colocation_lh, native_rsc_colocation_rh, native_rsc_location, native_action_flags, native_update_actions, native_expand, native_append_meta, }, { group_merge_weights, group_color, group_create_actions, native_create_probe, group_internal_constraints, group_rsc_colocation_lh, group_rsc_colocation_rh, group_rsc_location, group_action_flags, group_update_actions, group_expand, group_append_meta, }, { clone_merge_weights, clone_color, clone_create_actions, clone_create_probe, clone_internal_constraints, clone_rsc_colocation_lh, clone_rsc_colocation_rh, clone_rsc_location, clone_action_flags, pcmk__multi_update_actions, clone_expand, clone_append_meta, }, { pcmk__bundle_merge_weights, pcmk__bundle_color, pcmk__bundle_create_actions, pcmk__bundle_create_probe, pcmk__bundle_internal_constraints, pcmk__bundle_rsc_colocation_lh, pcmk__bundle_rsc_colocation_rh, pcmk__bundle_rsc_location, pcmk__bundle_action_flags, pcmk__multi_update_actions, pcmk__bundle_expand, pcmk__bundle_append_meta, } }; gboolean update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line) { static unsigned long calls = 0; gboolean changed = FALSE; gboolean clear = is_set(flags, pe_action_clear); enum pe_action_flags last = action->flags; if (clear) { action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags); } else { action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags); } if (last != action->flags) { calls++; changed = TRUE; /* Useful for tracking down _who_ changed a specific flag */ /* CRM_ASSERT(calls != 534); */ clear_bit(flags, pe_action_clear); crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)", action->uuid, action->node ? action->node->details->uname : "[none]", clear ? "un-" : "", flags, last, action->flags, calls, source); } return changed; } static gboolean check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry, gboolean active_here, pe_working_set_t * data_set) { int attr_lpc = 0; gboolean force_restart = FALSE; gboolean delete_resource = FALSE; gboolean changed = FALSE; const char *value = NULL; const char *old_value = NULL; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; for (; attr_lpc < DIMOF(attr_list); attr_lpc++) { value = crm_element_value(rsc->xml, attr_list[attr_lpc]); old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]); if (value == old_value /* i.e. NULL */ || crm_str_eq(value, old_value, TRUE)) { continue; } changed = TRUE; trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set); if (active_here) { force_restart = TRUE; crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s", rsc->id, node->details->uname, attr_list[attr_lpc], crm_str(old_value), crm_str(value)); } } if (force_restart) { /* make sure the restart happens */ stop_action(rsc, node, FALSE); set_bit(rsc->flags, pe_rsc_start_pending); delete_resource = TRUE; } else if (changed) { delete_resource = TRUE; } return delete_resource; } static void CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node, const char *reason, pe_working_set_t * data_set) { guint interval_ms = 0; action_t *cancel = NULL; const char *task = NULL; const char *call_id = NULL; const char *interval_ms_s = NULL; CRM_CHECK(xml_op != NULL, return); CRM_CHECK(active_node != NULL, return); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); crm_info("Action " CRM_OP_FMT " on %s will be stopped: %s", rsc->id, task, interval_ms, active_node->details->uname, (reason? reason : "unknown")); cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set); add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id); custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set); } static gboolean check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op, pe_working_set_t * data_set) { char *key = NULL; guint interval_ms = 0; const char *interval_ms_s = NULL; const op_digest_cache_t *digest_data = NULL; gboolean did_change = FALSE; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_secure = NULL; CRM_CHECK(active_node != NULL, return FALSE); interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); if (interval_ms > 0) { xmlNode *op_match = NULL; /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = generate_op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Checking parameters for %s", key); op_match = find_rsc_op_entry(rsc, key); if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) { CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set); free(key); return TRUE; } else if (op_match == NULL) { pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname); free(key); return TRUE; } free(key); key = NULL; } crm_trace("Testing " CRM_OP_FMT " on %s", rsc->id, task, interval_ms, active_node->details->uname); if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) { /* Reload based on the start action not a probe */ task = RSC_START; } else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) { /* Reload based on the start action not a migrate */ task = RSC_START; } else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) { /* Reload based on the start action not a promote */ task = RSC_START; } digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set); if(is_set(data_set->flags, pe_flag_sanitized)) { digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST); } if(digest_data->rc != RSC_DIGEST_MATCH && digest_secure && digest_data->digest_secure_calc && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) { if (is_set(data_set->flags, pe_flag_stdout)) { printf("Only 'private' parameters to " CRM_OP_FMT " on %s changed: %s\n", rsc->id, task, interval_ms, active_node->details->uname, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); } } else if (digest_data->rc == RSC_DIGEST_RESTART) { /* Changes that force a restart */ pe_action_t *required = NULL; did_change = TRUE; key = generate_op_key(rsc->id, task, interval_ms); crm_log_xml_info(digest_data->params_restart, "params:restart"); required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set); pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL, "resource definition change", pe_action_optional, TRUE); trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set); } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) { /* Changes that can potentially be handled by a reload */ const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); did_change = TRUE; trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set); crm_log_xml_info(digest_data->params_all, "params:reload"); key = generate_op_key(rsc->id, task, interval_ms); if (interval_ms > 0) { action_t *op = NULL; #if 0 /* Always reload/restart the entire resource */ ReloadRsc(rsc, active_node, data_set); #else /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */ op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_reschedule); #endif } else if (digest_restart) { pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id); /* Reload this resource */ ReloadRsc(rsc, active_node, data_set); free(key); } else { pe_action_t *required = NULL; pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id); /* Re-send the start/demote/promote op * Recurring ops will be detected independently */ required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set); pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL, "resource definition change", pe_action_optional, TRUE); } } return did_change; } /*! * \internal * \brief Do deferred action checks after allocation * * \param[in] data_set Working set for cluster */ static void check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op, enum pe_check_parameters check, pe_working_set_t *data_set) { const char *reason = NULL; op_digest_cache_t *digest_data = NULL; switch (check) { case pe_check_active: if (check_action_definition(rsc, node, rsc_op, data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { reason = "action definition changed"; } break; case pe_check_last_failure: digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s has no digest to compare", rsc->id, ID(rsc_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: reason = "resource parameters have changed"; break; } break; } if (reason) { pe__clear_failcount(rsc, node, reason, data_set); } } static void check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set) { GListPtr gIter = NULL; int offset = -1; guint interval_ms = 0; int stop_index = 0; int start_index = 0; const char *task = NULL; const char *interval_ms_s = NULL; xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; CRM_CHECK(node != NULL, return); if (is_set(rsc->flags, pe_rsc_orphan)) { resource_t *parent = uber_parent(rsc); if(parent == NULL || pe_rsc_is_clone(parent) == FALSE || is_set(parent->flags, pe_rsc_unique)) { pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id); DeleteRsc(rsc, node, FALSE, data_set); } else { pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id); } return; } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) { if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s", rsc->id, node->details->uname); return; } pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname); if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; offset++; if (start_index < stop_index) { /* stopped */ continue; } else if (offset < start_index) { /* action occurred prior to a start */ continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); if ((interval_ms > 0) && (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { // Maintenance mode cancels recurring operations CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); } else if ((interval_ms > 0) || safe_str_eq(task, RSC_STATUS) || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || safe_str_eq(task, RSC_MIGRATED)) { /* If a resource operation failed, and the operation's definition * has changed, clear any fail count so they can be retried fresh. */ if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources to nodes yet, so if the * REMOTE_CONTAINER_HACK is used, we may calculate the digest * based on the literal "#uname" value rather than the properly * substituted value. That would mistakenly make the action * definition appear to have been changed. Defer the check until * later in this case. */ pe__add_param_check(rsc_op, rsc, node, pe_check_active, data_set); } else if (check_action_definition(rsc, node, rsc_op, data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { pe__clear_failcount(rsc, node, "action definition changed", data_set); } } } g_list_free(sorted_op_list); } static GListPtr find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones, gboolean partial, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean match = FALSE; if (id == NULL) { return NULL; } if (rsc == NULL) { if (data_set == NULL) { return NULL; } for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } return result; } if (partial) { if (strstr(rsc->id, id)) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) { match = TRUE; } } else { if (strcmp(rsc->id, id) == 0) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } } if (match) { result = g_list_prepend(result, rsc); } if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } } return result; } static void check_actions(pe_working_set_t * data_set) { const char *id = NULL; node_t *node = NULL; xmlNode *lrm_rscs = NULL; xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); xmlNode *node_state = NULL; for (node_state = __xml_first_child_element(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { id = crm_element_value(node_state, XML_ATTR_ID); lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE); node = pe_find_node_id(data_set->nodes, id); if (node == NULL) { continue; /* Still need to check actions for a maintenance node to cancel existing monitor operations */ } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) { crm_trace("Skipping param check for %s: can't run resources", node->details->uname); continue; } crm_trace("Processing node %s", node->details->uname); if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child_element(lrm_rscs); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if (xml_has_children(rsc_entry)) { GListPtr gIter = NULL; GListPtr result = NULL; const char *rsc_id = ID(rsc_entry); CRM_CHECK(rsc_id != NULL, return); result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set); for (gIter = result; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->variant != pe_native) { continue; } check_actions_for(rsc_entry, rsc, node, data_set); } g_list_free(result); } } } } } } } static void apply_placement_constraints(pe_working_set_t * data_set) { for (GList *gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { pe__location_t *cons = gIter->data; cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons); } } static gboolean failcount_clear_action_exists(node_t * node, resource_t * rsc) { gboolean rc = FALSE; GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE); if (list) { rc = TRUE; } g_list_free(list); return rc; } /*! * \internal * \brief Force resource away if failures hit migration threshold * * \param[in,out] rsc Resource to check for failures * \param[in,out] node Node to check for failures * \param[in,out] data_set Cluster working set to update */ static void check_migration_threshold(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { int fail_count, countdown; resource_t *failed; /* Migration threshold of 0 means never force away */ if (rsc->migration_threshold == 0) { return; } // If we're ignoring failures, also ignore the migration threshold if (is_set(rsc->flags, pe_rsc_failure_ignored)) { return; } /* If there are no failures, there's no need to force away */ fail_count = pe_get_failcount(node, rsc, NULL, pe_fc_effective|pe_fc_fillers, NULL, data_set); if (fail_count <= 0) { return; } /* How many more times recovery will be tried on this node */ countdown = QB_MAX(rsc->migration_threshold - fail_count, 0); /* If failed resource has a parent, we'll force the parent away */ failed = rsc; if (is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(rsc); } if (countdown == 0) { resource_location(failed, node, -INFINITY, "__fail_limit__", data_set); crm_warn("Forcing %s away from %s after %d failures (max=%d)", failed->id, node->details->uname, fail_count, rsc->migration_threshold); } else { crm_info("%s can fail %d more times on %s before being forced off", failed->id, countdown, node->details->uname); } } static void common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; common_apply_stickiness(child_rsc, node, data_set); } return; } if (is_set(rsc->flags, pe_rsc_managed) - && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) { + && rsc->stickiness != 0 && pcmk__list_of_1(rsc->running_on)) { node_t *current = pe_find_node_id(rsc->running_on, node->details->id); node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (current == NULL) { } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) { resource_t *sticky_rsc = rsc; resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set); pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location" " (node=%s, weight=%d)", sticky_rsc->id, node->details->uname, rsc->stickiness); } else { GHashTableIter iter; node_t *nIter = NULL; pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric" " and node %s is not explicitly allowed", rsc->id, node->details->uname); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) { crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight); } } } /* Check the migration threshold only if a failcount clear action * has not already been placed for this resource on the node. * There is no sense in potentially forcing the resource from this * node if the failcount is being reset anyway. * * @TODO A clear_failcount operation can be scheduled in stage4() via * check_actions_for(), or in stage5() via check_params(). This runs in * stage2(), so it cannot detect those, meaning we might check the migration * threshold when we shouldn't -- worst case, we stop or move the resource, * then move it back next transition. */ if (failcount_clear_action_exists(node, rsc) == FALSE) { check_migration_threshold(rsc, node, data_set); } } void complex_set_cmds(resource_t * rsc) { GListPtr gIter = rsc->children; rsc->cmds = &resource_class_alloc_functions[rsc->variant]; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; complex_set_cmds(child_rsc); } } void set_alloc_actions(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; complex_set_cmds(rsc); } } static void calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data) { const char *key = (const char *)gKey; const char *value = (const char *)gValue; int *system_health = (int *)user_data; if (!gKey || !gValue || !user_data) { return; } if (crm_starts_with(key, "#health")) { int score; /* Convert the value into an integer */ score = char2score(value); /* Add it to the running total */ *system_health = merge_weights(score, *system_health); } } static gboolean apply_system_health(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy"); int base_health = 0; if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) { /* Prevent any accidental health -> score translation */ node_score_red = 0; node_score_yellow = 0; node_score_green = 0; return TRUE; } else if (safe_str_eq(health_strategy, "migrate-on-red")) { /* Resources on nodes which have health values of red are * weighted away from that node. */ node_score_red = -INFINITY; node_score_yellow = 0; node_score_green = 0; } else if (safe_str_eq(health_strategy, "only-green")) { /* Resources on nodes which have health values of red or yellow * are forced away from that node. */ node_score_red = -INFINITY; node_score_yellow = -INFINITY; node_score_green = 0; } else if (safe_str_eq(health_strategy, "progressive")) { /* Same as the above, but use the r/y/g scores provided by the user * Defaults are provided by the pe_prefs table * Also, custom health "base score" can be used */ base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0"); } else if (safe_str_eq(health_strategy, "custom")) { /* Requires the admin to configure the rsc_location constaints for * processing the stored health scores */ /* TODO: Check for the existence of appropriate node health constraints */ return TRUE; } else { crm_err("Unknown node health strategy: %s", health_strategy); return FALSE; } crm_info("Applying automated node health strategy: %s", health_strategy); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int system_health = base_health; node_t *node = (node_t *) gIter->data; /* Search through the node hash table for system health entries. */ g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health); crm_info(" Node %s has an combined system health of %d", node->details->uname, system_health); /* If the health is non-zero, then create a new rsc2node so that the * weight will be added later on. */ if (system_health != 0) { GListPtr gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set); } } } return TRUE; } gboolean stage0(pe_working_set_t * data_set) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); if (data_set->input == NULL) { return FALSE; } if (is_set(data_set->flags, pe_flag_have_status) == FALSE) { crm_trace("Calculating status"); cluster_status(data_set); } set_alloc_actions(data_set); apply_system_health(data_set); unpack_constraints(cib_constraints, data_set); return TRUE; } /* * Check nodes for resources started outside of the LRM */ gboolean probe_resources(pe_working_set_t * data_set) { action_t *probe_node_complete = NULL; for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED); if (node->details->online == FALSE) { if (pe__is_remote_node(node) && node->details->remote_rsc && (get_remote_node_state(node) == remote_state_failed)) { pe_fence_node(data_set, node, "the connection is unrecoverable"); } continue; } else if (node->details->unclean) { continue; } else if (node->details->rsc_discovery_enabled == FALSE) { /* resource discovery is disabled for this node */ continue; } if (probed != NULL && crm_is_true(probed) == FALSE) { action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname), CRM_OP_REPROBE, node, FALSE, TRUE, data_set); add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); continue; } for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set); } } return TRUE; } static void rsc_discover_filter(resource_t *rsc, node_t *node) { GListPtr gIter = rsc->children; resource_t *top = uber_parent(rsc); node_t *match; if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) { return; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_discover_filter(child_rsc, node); } match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match && match->rsc_discover_mode != pe_discover_exclusive) { match->weight = -INFINITY; } } /* * Count how many valid nodes we have (so we know the maximum number of * colors we can resolve). * * Apply node constraints (i.e. filter the "allowed_nodes" part of resources) */ gboolean stage2(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node == NULL) { /* error */ } else if (node->weight >= 0.0 /* global weight */ && node->details->online && node->details->type != node_ping) { data_set->max_valid_nodes++; } } apply_placement_constraints(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; node_t *node = (node_t *) gIter->data; gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; common_apply_stickiness(rsc, node, data_set); rsc_discover_filter(rsc, node); } } return TRUE; } /* * Create internal resource constraints before allocation */ gboolean stage3(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; rsc->cmds->internal_constraints(rsc, data_set); } return TRUE; } /* * Check for orphaned or redefined actions */ gboolean stage4(pe_working_set_t * data_set) { check_actions(data_set); return TRUE; } static void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } static gint sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data) { int rc = 0; int r1_weight = -INFINITY; int r2_weight = -INFINITY; const char *reason = "existence"; const GListPtr nodes = (GListPtr) data; const resource_t *resource1 = a; const resource_t *resource2 = b; node_t *r1_node = NULL; node_t *r2_node = NULL; GListPtr gIter = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; if (a == NULL && b == NULL) { goto done; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } reason = "priority"; r1_weight = resource1->priority; r2_weight = resource2->priority; if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "no node list"; if (nodes == NULL) { goto done; } r1_nodes = rsc_merge_weights(convert_const_pointer(resource1), resource1->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes); r2_nodes = rsc_merge_weights(convert_const_pointer(resource2), resource2->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes); /* Current location score */ reason = "current location"; r1_weight = -INFINITY; r2_weight = -INFINITY; if (resource1->running_on) { r1_node = pe__current_node(resource1); r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id); if (r1_node != NULL) { r1_weight = r1_node->weight; } } if (resource2->running_on) { r2_node = pe__current_node(resource2); r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id); if (r2_node != NULL) { r2_weight = r2_node->weight; } } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "score"; for (gIter = nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; r1_node = NULL; r2_node = NULL; r1_weight = -INFINITY; if (r1_nodes) { r1_node = g_hash_table_lookup(r1_nodes, node->details->id); } if (r1_node) { r1_weight = r1_node->weight; } r2_weight = -INFINITY; if (r2_nodes) { r2_node = g_hash_table_lookup(r2_nodes, node->details->id); } if (r2_node) { r2_weight = r2_node->weight; } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } } done: crm_trace("%s (%d) on %s %c %s (%d) on %s: %s", resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a", rc < 0 ? '>' : rc > 0 ? '<' : '=', resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason); if (r1_nodes) { g_hash_table_destroy(r1_nodes); } if (r2_nodes) { g_hash_table_destroy(r2_nodes); } return rc; } static void allocate_resources(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_have_remote_nodes)) { /* Force remote connection resources to be allocated first. This * also forces any colocation dependencies to be allocated as well */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->is_remote_node == FALSE) { continue; } pe_rsc_trace(rsc, "Allocating: %s", rsc->id); /* For remote node connection resources, always prefer the partial * migration target during resource allocation, if the rsc is in the * middle of a migration. */ rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set); } } /* now do the rest of the resources */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->is_remote_node == TRUE) { continue; } pe_rsc_trace(rsc, "Allocating: %s", rsc->id); rsc->cmds->allocate(rsc, NULL, data_set); } } /* We always use pe_order_preserve with these convenience functions to exempt * internally generated constraints from the prohibition of user constraints * involving remote connection resources. * * The start ordering additionally uses pe_order_runnable_left so that the * specified action is not runnable if the start is not runnable. */ static inline void order_start_then_action(resource_t *lh_rsc, action_t *rh_action, enum pe_ordering extra, pe_working_set_t *data_set) { if (lh_rsc && rh_action && data_set) { custom_action_order(lh_rsc, start_key(lh_rsc), NULL, rh_action->rsc, NULL, rh_action, pe_order_preserve | pe_order_runnable_left | extra, data_set); } } static inline void order_action_then_stop(action_t *lh_action, resource_t *rh_rsc, enum pe_ordering extra, pe_working_set_t *data_set) { if (lh_action && rh_rsc && data_set) { custom_action_order(lh_action->rsc, NULL, lh_action, rh_rsc, stop_key(rh_rsc), NULL, pe_order_preserve | extra, data_set); } } // Clear fail counts for orphaned rsc on all online nodes static void cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node->details->online && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { pe_action_t *clear_op = NULL; clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set); /* We can't use order_action_then_stop() here because its * pe_order_preserve breaks things */ custom_action_order(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc), NULL, pe_order_optional, data_set); } } } gboolean stage5(pe_working_set_t * data_set) { GListPtr gIter = NULL; int log_prio = show_utilization? LOG_STDOUT : utilization_log_level; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr nodes = g_list_copy(data_set->nodes); nodes = sort_nodes_by_weight(nodes, NULL, data_set); data_set->resources = g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes); g_list_free(nodes); } gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; dump_node_capacity(log_prio, "Original", node); } crm_trace("Allocating services"); /* Take (next) highest resource, assign it and create its actions */ allocate_resources(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; dump_node_capacity(log_prio, "Remaining", node); } // Process deferred action checks pe__foreach_param_check(data_set, check_params); pe__free_param_checks(data_set); if (is_set(data_set->flags, pe_flag_startup_probes)) { crm_trace("Calculating needed probes"); /* This code probably needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=100: With probes: ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints 36s ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph Without probes: ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph */ probe_resources(data_set); } crm_trace("Handle orphans"); if (is_set(data_set->flags, pe_flag_stop_rsc_orphans)) { for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; /* There's no need to recurse into rsc->children because those * should just be unallocated clone instances. */ if (is_set(rsc->flags, pe_rsc_orphan)) { cleanup_orphans(rsc, data_set); } } } crm_trace("Creating actions"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; rsc->cmds->create_actions(rsc, data_set); } crm_trace("Creating done"); return TRUE; } static gboolean is_managed(const resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_managed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (is_managed(child_rsc)) { return TRUE; } } return FALSE; } static gboolean any_managed_resources(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (is_managed(rsc)) { return TRUE; } } return FALSE; } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * * \param[in] node Guest node to fence * \param[in] data_set Working set of CIB state */ static void fence_guest(pe_node_t *node, pe_working_set_t *data_set) { resource_t *container = node->details->remote_rsc->container; pe_action_t *stop = NULL; pe_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (container recovery). */ const char *fence_action = "off"; /* Check whether guest's container resource has any explicit stop or * start (the stop may be implied by fencing of the guest's host). */ if (container) { stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL); if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) { fence_action = "reboot"; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and the controller can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set); update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable, __FUNCTION__, __LINE__); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if(stop && is_set(stop->flags, pe_action_pseudo)) { pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, data_set); crm_info("Implying guest node %s is down (action %d) after %s fencing", node->details->uname, stonith_op->id, stop->node->details->uname); order_actions(parent_stonith_op, stonith_op, pe_order_runnable_left|pe_order_implies_then); } else if (stop) { order_actions(stop, stonith_op, pe_order_runnable_left|pe_order_implies_then); crm_info("Implying guest node %s is down (action %d) " "after container %s is stopped (action %d)", node->details->uname, stonith_op->id, container->id, stop->id); } else { /* If we're fencing the guest node but there's no stop for the guest * resource, we must think the guest is already stopped. However, we may * think so because its resource history was just cleaned. To avoid * unnecessarily considering the guest node down if it's really up, * order the pseudo-fencing after any stop of the connection resource, * which will be ordered after any container (re-)probe. */ stop = find_first_action(node->details->remote_rsc->actions, NULL, RSC_STOP, NULL); if (stop) { order_actions(stop, stonith_op, pe_order_optional); crm_info("Implying guest node %s is down (action %d) " "after connection is stopped (action %d)", node->details->uname, stonith_op->id, stop->id); } else { /* Not sure why we're fencing, but everything must already be * cleanly stopped. */ crm_info("Implying guest node %s is down (action %d) ", node->details->uname, stonith_op->id); } } /* Order/imply other actions relative to pseudo-fence as with real fence */ pcmk__order_vs_fence(stonith_op, data_set); } /* * Create dependencies for stonith and shutdown operations */ gboolean stage6(pe_working_set_t * data_set) { action_t *dc_down = NULL; action_t *stonith_op = NULL; gboolean integrity_lost = FALSE; gboolean need_stonith = TRUE; GListPtr gIter; GListPtr stonith_ops = NULL; GList *shutdown_ops = NULL; /* Remote ordering constraints need to happen prior to calculating fencing * because it is one more place we will mark the node as dirty. * * A nice side effect of doing them early is that apply_*_ordering() can be * simpler because pe_fence_node() has already done some of the work. */ crm_trace("Creating remote ordering constraints"); apply_remote_node_ordering(data_set); crm_trace("Processing fencing and shutdown cases"); if (any_managed_resources(data_set) == FALSE) { crm_notice("Delaying fencing operations until there are resources to manage"); need_stonith = FALSE; } /* Check each node for stonith/shutdown */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* Guest nodes are "fenced" by recovering their container resource, * so handle them separately. */ if (pe__is_guest_node(node)) { if (node->details->remote_requires_reset && need_stonith && pe_can_fence(data_set, node)) { fence_guest(node, data_set); } continue; } stonith_op = NULL; if (node->details->unclean && need_stonith && pe_can_fence(data_set, node)) { stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set); pe_warn("Scheduling Node %s for STONITH", node->details->uname); pcmk__order_vs_fence(stonith_op, data_set); if (node->details->is_dc) { // Remember if the DC is being fenced dc_down = stonith_op; } else { if (is_not_set(data_set->flags, pe_flag_concurrent_fencing) && (stonith_ops != NULL)) { /* Concurrent fencing is disabled, so order each non-DC * fencing in a chain. If there is any DC fencing or * shutdown, it will be ordered after the last action in the * chain later. */ order_actions((pe_action_t *) stonith_ops->data, stonith_op, pe_order_optional); } // Remember all non-DC fencing actions in a separate list stonith_ops = g_list_prepend(stonith_ops, stonith_op); } } else if (node->details->online && node->details->shutdown && /* TODO define what a shutdown op means for a remote node. * For now we do not send shutdown operations for remote nodes, but * if we can come up with a good use for this in the future, we will. */ pe__is_guest_or_remote_node(node) == FALSE) { action_t *down_op = sched_shutdown_op(node, data_set); if (node->details->is_dc) { // Remember if the DC is being shut down dc_down = down_op; } else { // Remember non-DC shutdowns for later ordering shutdown_ops = g_list_prepend(shutdown_ops, down_op); } } if (node->details->unclean && stonith_op == NULL) { integrity_lost = TRUE; pe_warn("Node %s is unclean!", node->details->uname); } } if (integrity_lost) { if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED"); pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE"); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) { crm_notice("Cannot fence unclean nodes until quorum is" " attained (or no-quorum-policy is set to ignore)"); } } if (dc_down != NULL) { /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated * DC elections. However, we don't want to order non-DC shutdowns before * a DC *fencing*, because even though we don't want a node that's * shutting down to become DC, the DC fencing could be ordered before a * clone stop that's also ordered before the shutdowns, thus leading to * a graph loop. */ if (safe_str_eq(dc_down->task, CRM_OP_SHUTDOWN)) { for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) { action_t *node_stop = (action_t *) gIter->data; crm_debug("Ordering shutdown on %s before %s on DC %s", node_stop->node->details->uname, dc_down->task, dc_down->node->details->uname); order_actions(node_stop, dc_down, pe_order_optional); } } // Order any non-DC fencing before any DC fencing or shutdown if (is_set(data_set->flags, pe_flag_concurrent_fencing)) { /* With concurrent fencing, order each non-DC fencing action * separately before any DC fencing or shutdown. */ for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) { order_actions((pe_action_t *) gIter->data, dc_down, pe_order_optional); } } else if (stonith_ops) { /* Without concurrent fencing, the non-DC fencing actions are * already ordered relative to each other, so we just need to order * the DC fencing after the last action in the chain (which is the * first item in the list). */ order_actions((pe_action_t *) stonith_ops->data, dc_down, pe_order_optional); } } g_list_free(stonith_ops); g_list_free(shutdown_ops); return TRUE; } /* * Determine the sets of independent actions and the correct order for the * actions in each set. * * Mark dependencies of un-runnable actions un-runnable * */ static GListPtr find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if (list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *task = NULL; guint interval_ms = 0; if (parse_op_key(original_key, NULL, &task, &interval_ms)) { key = generate_op_key(rsc->id, task, interval_ms); list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } free(key); free(task); } return list; } static void rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc, pe__ordering_t *order) { GListPtr gIter = NULL; GListPtr rh_actions = NULL; action_t *rh_action = NULL; enum pe_ordering type; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); type = order->type; rh_action = order->rh_action; crm_trace("Processing RH of ordering constraint %d", order->id); if (rh_action != NULL) { rh_actions = g_list_prepend(NULL, rh_action); } else if (rsc != NULL) { rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task); } if (rh_actions == NULL) { pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id, order->rh_action_task); if (lh_action) { pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid); } return; } if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) { pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid, order->rh_action_task); clear_bit(type, pe_order_implies_then); } gIter = rh_actions; for (; gIter != NULL; gIter = gIter->next) { action_t *rh_action_iter = (action_t *) gIter->data; if (lh_action) { order_actions(lh_action, rh_action_iter, type); } else if (type & pe_order_implies_then) { update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type); } } g_list_free(rh_actions); } static void rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order, pe_working_set_t *data_set) { GListPtr gIter = NULL; GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_trace("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if (lh_action != NULL) { lh_actions = g_list_prepend(NULL, lh_action); } else { lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task); } if (lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *op_type = NULL; guint interval_ms = 0; parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms); key = generate_op_key(lh_rsc->id, op_type, interval_ms); if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else { pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); lh_actions = g_list_prepend(NULL, lh_action); } free(op_type); } gIter = lh_actions; for (; gIter != NULL; gIter = gIter->next) { action_t *lh_action_iter = (action_t *) gIter->data; if (rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if (rh_rsc) { rsc_order_then(lh_action_iter, rh_rsc, order); } else if (order->rh_action) { order_actions(lh_action_iter, order->rh_action, order->type); } } g_list_free(lh_actions); } extern void update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set); static int is_recurring_action(action_t *action) { const char *interval_ms_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); guint interval_ms = crm_parse_ms(interval_ms_s); return (interval_ms > 0); } static void apply_container_ordering(action_t *action, pe_working_set_t *data_set) { /* VMs are also classified as containers for these purposes... in * that they both involve a 'thing' running on a real or remote * cluster node. * * This allows us to be smarter about the type and extent of * recovery actions required in various scenarios */ resource_t *remote_rsc = NULL; resource_t *container = NULL; enum action_tasks task = text2task(action->task); CRM_ASSERT(action->rsc); CRM_ASSERT(action->node); CRM_ASSERT(pe__is_guest_or_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); container = remote_rsc->container; CRM_ASSERT(container); if(is_set(container->flags, pe_rsc_failed)) { pe_fence_node(data_set, action->node, "container failed"); } crm_trace("Order %s action %s relative to %s%s for %s%s", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, is_set(container->flags, pe_rsc_failed)? "failed " : "", container->id); if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { /* Migration ops map to "no_action", but we need to apply the same * ordering as for stop or demote (see get_router_node()). */ task = stop_rsc; } switch (task) { case start_rsc: case action_promote: /* Force resource recovery if the container is recovered */ order_start_then_action(container, action, pe_order_implies_then, data_set); /* Wait for the connection resource to be up too */ order_start_then_action(remote_rsc, action, pe_order_none, data_set); break; case stop_rsc: case action_demote: if (is_set(container->flags, pe_rsc_failed)) { /* When the container representing a guest node fails, any stop * or demote actions for resources running on the guest node * are implied by the container stopping. This is similar to * how fencing operations work for cluster nodes and remote * nodes. */ } else { /* Ensure the operation happens before the connection is brought * down. * * If we really wanted to, we could order these after the * connection start, IFF the container's current role was * stopped (otherwise we re-introduce an ordering loop when the * connection is restarting). */ order_action_then_stop(action, remote_rsc, pe_order_none, data_set); } break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ if(task != no_action) { order_start_then_action(remote_rsc, action, pe_order_implies_then, data_set); } } else { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } break; } } static enum remote_connection_state get_remote_node_state(pe_node_t *node) { resource_t *remote_rsc = NULL; node_t *cluster_node = NULL; CRM_ASSERT(node); remote_rsc = node->details->remote_rsc; CRM_ASSERT(remote_rsc); cluster_node = pe__current_node(remote_rsc); /* If the cluster node the remote connection resource resides on * is unclean or went offline, we can't process any operations * on that remote node until after it starts elsewhere. */ if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) { /* The connection resource is not going to run anywhere */ if (cluster_node && cluster_node->details->unclean) { /* The remote connection is failed because its resource is on a * failed node and can't be recovered elsewhere, so we must fence. */ return remote_state_failed; } if (is_not_set(remote_rsc->flags, pe_rsc_failed)) { /* Connection resource is cleanly stopped */ return remote_state_stopped; } /* Connection resource is failed */ if ((remote_rsc->next_role == RSC_ROLE_STOPPED) && remote_rsc->remote_reconnect_ms && node->details->remote_was_fenced && !pe__shutdown_requested(node)) { /* We won't know whether the connection is recoverable until the * reconnect interval expires and we reattempt connection. */ return remote_state_unknown; } /* The remote connection is in a failed state. If there are any * resources known to be active on it (stop) or in an unknown state * (probe), we must assume the worst and fence it. */ return remote_state_failed; } else if (cluster_node == NULL) { /* Connection is recoverable but not currently running anywhere, see if we can recover it first */ return remote_state_unknown; } else if(cluster_node->details->unclean == TRUE || cluster_node->details->online == FALSE) { /* Connection is running on a dead node, see if we can recover it first */ return remote_state_resting; - } else if (g_list_length(remote_rsc->running_on) > 1 + } else if (pcmk__list_of_multiple(remote_rsc->running_on) && remote_rsc->partial_migration_source && remote_rsc->partial_migration_target) { /* We're in the middle of migrating a connection resource, * wait until after the resource migrates before performing * any actions. */ return remote_state_resting; } return remote_state_alive; } /*! * \internal * \brief Order actions on remote node relative to actions for the connection */ static void apply_remote_ordering(action_t *action, pe_working_set_t *data_set) { resource_t *remote_rsc = NULL; enum action_tasks task = text2task(action->task); enum remote_connection_state state = get_remote_node_state(action->node); enum pe_ordering order_opts = pe_order_none; if (action->rsc == NULL) { return; } CRM_ASSERT(action->node); CRM_ASSERT(pe__is_guest_or_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); crm_trace("Order %s action %s relative to %s%s (state: %s)", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, state2text(state)); if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { /* Migration ops map to "no_action", but we need to apply the same * ordering as for stop or demote (see get_router_node()). */ task = stop_rsc; } switch (task) { case start_rsc: case action_promote: order_opts = pe_order_none; if (state == remote_state_failed) { /* Force recovery, by making this action required */ order_opts |= pe_order_implies_then; } /* Ensure connection is up before running this action */ order_start_then_action(remote_rsc, action, order_opts, data_set); break; case stop_rsc: if(state == remote_state_alive) { order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else if(state == remote_state_failed) { /* The resource is active on the node, but since we don't have a * valid connection, the only way to stop the resource is by * fencing the node. There is no need to order the stop relative * to the remote connection, since the stop will become implied * by the fencing. */ pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable"); } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) { /* State must be remote_state_unknown or remote_state_stopped. * Since the connection is not coming back up in this * transition, stop this resource first. */ order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else { /* The connection is going to be started somewhere else, so * stop this resource after that completes. */ order_start_then_action(remote_rsc, action, pe_order_none, data_set); } break; case action_demote: /* Only order this demote relative to the connection start if the * connection isn't being torn down. Otherwise, the demote would be * blocked because the connection start would not be allowed. */ if(state == remote_state_resting || state == remote_state_unknown) { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } /* Otherwise we can rely on the stop ordering */ break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ order_start_then_action(remote_rsc, action, pe_order_implies_then, data_set); } else { node_t *cluster_node = pe__current_node(remote_rsc); if(task == monitor_rsc && state == remote_state_failed) { /* We would only be here if we do not know the * state of the resource on the remote node. * Since we have no way to find out, it is * necessary to fence the node. */ pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable"); } if(cluster_node && state == remote_state_stopped) { /* The connection is currently up, but is going * down permanently. * * Make sure we check services are actually * stopped _before_ we let the connection get * closed */ order_action_then_stop(action, remote_rsc, pe_order_runnable_left, data_set); } else { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } } break; } } static void apply_remote_node_ordering(pe_working_set_t *data_set) { if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) { return; } for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; resource_t *remote = NULL; // We are only interested in resource actions if (action->rsc == NULL) { continue; } /* Special case: If we are clearing the failcount of an actual * remote connection resource, then make sure this happens before * any start of the resource in this transition. */ if (action->rsc->is_remote_node && safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { custom_action_order(action->rsc, NULL, action, action->rsc, generate_op_key(action->rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); continue; } // We are only interested in actions allocated to a node if (action->node == NULL) { continue; } if (!pe__is_guest_or_remote_node(action->node)) { continue; } /* We are only interested in real actions. * * @TODO This is probably wrong; pseudo-actions might be converted to * real actions and vice versa later in update_actions() at the end of * stage7(). */ if (is_set(action->flags, pe_action_pseudo)) { continue; } remote = action->node->details->remote_rsc; if (remote == NULL) { // Orphaned continue; } /* Another special case: if a resource is moving to a Pacemaker Remote * node, order the stop on the original node after any start of the * remote connection. This ensures that if the connection fails to * start, we leave the resource running on the original node. */ if (safe_str_eq(action->task, RSC_START)) { for (GList *item = action->rsc->actions; item != NULL; item = item->next) { pe_action_t *rsc_action = item->data; if ((rsc_action->node->details != action->node->details) && safe_str_eq(rsc_action->task, RSC_STOP)) { custom_action_order(remote, start_key(remote), NULL, action->rsc, NULL, rsc_action, pe_order_optional, data_set); } } } /* The action occurs across a remote connection, so create * ordering constraints that guarantee the action occurs while the node * is active (after start, before stop ... things like that). * * This is somewhat brittle in that we need to make sure the results of * this ordering are compatible with the result of get_router_node(). * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part * of this logic rather than action2xml(). */ if (remote->container) { crm_trace("Container ordering for %s", action->uuid); apply_container_ordering(action, data_set); } else { crm_trace("Remote ordering for %s", action->uuid); apply_remote_ordering(action, data_set); } } } static gboolean order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action) { /* No need to probe the resource on the node that is being * unfenced. Otherwise it might introduce transition loop * since probe will be performed after the node is * unfenced. */ if (safe_str_eq(rh_action->task, CRM_OP_FENCE) && probe->node && rh_action->node && probe->node->details == rh_action->node->details) { const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action"); if (safe_str_eq(op, "on")) { return TRUE; } } // Shutdown waits for probe to complete only if it's on the same node if ((safe_str_eq(rh_action->task, CRM_OP_SHUTDOWN)) && probe->node && rh_action->node && probe->node->details != rh_action->node->details) { return TRUE; } return FALSE; } static void order_first_probes_imply_stops(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) { pe__ordering_t *order = gIter->data; enum pe_ordering order_type = pe_order_optional; pe_resource_t *lh_rsc = order->lh_rsc; pe_resource_t *rh_rsc = order->rh_rsc; pe_action_t *lh_action = order->lh_action; pe_action_t *rh_action = order->rh_action; const char *lh_action_task = order->lh_action_task; const char *rh_action_task = order->rh_action_task; GListPtr probes = NULL; GListPtr rh_actions = NULL; GListPtr pIter = NULL; if (lh_rsc == NULL) { continue; } else if (rh_rsc && lh_rsc == rh_rsc) { continue; } if (lh_action == NULL && lh_action_task == NULL) { continue; } if (rh_action == NULL && rh_action_task == NULL) { continue; } /* Technically probe is expected to return "not running", which could be * the alternative of stop action if the status of the resource is * unknown yet. */ if (lh_action && safe_str_neq(lh_action->task, RSC_STOP)) { continue; } else if (lh_action == NULL && lh_action_task && crm_ends_with(lh_action_task, "_" RSC_STOP "_0") == FALSE) { continue; } /* Do not probe the resource inside of a stopping container. Otherwise * it might introduce transition loop since probe will be performed * after the container starts again. */ if (rh_rsc && lh_rsc->container == rh_rsc) { if (rh_action && safe_str_eq(rh_action->task, RSC_STOP)) { continue; } else if (rh_action == NULL && rh_action_task && crm_ends_with(rh_action_task,"_" RSC_STOP "_0")) { continue; } } if (order->type == pe_order_none) { continue; } // Preserve the order options for future filtering if (is_set(order->type, pe_order_apply_first_non_migratable)) { set_bit(order_type, pe_order_apply_first_non_migratable); } if (is_set(order->type, pe_order_same_node)) { set_bit(order_type, pe_order_same_node); } // Keep the order types for future filtering if (order->type == pe_order_anti_colocation || order->type == pe_order_load) { order_type = order->type; } probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE); if (probes == NULL) { continue; } if (rh_action) { rh_actions = g_list_prepend(rh_actions, rh_action); } else if (rh_rsc && rh_action_task) { rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL); } if (rh_actions == NULL) { g_list_free(probes); continue; } crm_trace("Processing for LH probe based on ordering constraint %s -> %s" " (id=%d, type=%.6x)", lh_action ? lh_action->uuid : lh_action_task, rh_action ? rh_action->uuid : rh_action_task, order->id, order->type); for (pIter = probes; pIter != NULL; pIter = pIter->next) { pe_action_t *probe = (pe_action_t *) pIter->data; GListPtr rIter = NULL; for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) { pe_action_t *rh_action_iter = (pe_action_t *) rIter->data; if (order_first_probe_unneeded(probe, rh_action_iter)) { continue; } order_actions(probe, rh_action_iter, order_type); } } g_list_free(rh_actions); g_list_free(probes); } } static void order_first_probe_then_restart_repromote(pe_action_t * probe, pe_action_t * after, pe_working_set_t * data_set) { GListPtr gIter = NULL; bool interleave = FALSE; pe_resource_t *compatible_rsc = NULL; if (probe == NULL || probe->rsc == NULL || probe->rsc->variant != pe_native) { return; } if (after == NULL // Avoid running into any possible loop || is_set(after->flags, pe_action_tracking)) { return; } if (safe_str_neq(probe->task, RSC_STATUS)) { return; } pe_set_action_bit(after, pe_action_tracking); crm_trace("Processing based on %s %s -> %s %s", probe->uuid, probe->node ? probe->node->details->uname: "", after->uuid, after->node ? after->node->details->uname : ""); if (after->rsc /* Better not build a dependency directly with a clone/group. * We are going to proceed through the ordering chain and build * dependencies with its children. */ && after->rsc->variant == pe_native && probe->rsc != after->rsc) { GListPtr then_actions = NULL; enum pe_ordering probe_order_type = pe_order_optional; if (safe_str_eq(after->task, RSC_START)) { then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE); } else if (safe_str_eq(after->task, RSC_PROMOTE)) { then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE); } for (gIter = then_actions; gIter != NULL; gIter = gIter->next) { pe_action_t *then = (pe_action_t *) gIter->data; // Skip any pseudo action which for example is implied by fencing if (is_set(then->flags, pe_action_pseudo)) { continue; } order_actions(probe, then, probe_order_type); } g_list_free(then_actions); } if (after->rsc && after->rsc->variant > pe_group) { const char *interleave_s = g_hash_table_lookup(after->rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); if (interleave) { /* For an interleaved clone, we should build a dependency only * with the relevant clone child. */ compatible_rsc = find_compatible_child(probe->rsc, after->rsc, RSC_ROLE_UNKNOWN, FALSE, data_set); } } for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data; /* pe_order_implies_then is the reason why a required A.start * implies/enforces B.start to be required too, which is the cause of * B.restart/re-promote. * * Not sure about pe_order_implies_then_on_node though. It's now only * used for unfencing case, which tends to introduce transition * loops... */ if (is_not_set(after_wrapper->type, pe_order_implies_then)) { /* The order type between a group/clone and its child such as * B.start-> B_child.start is: * pe_order_implies_first_printed | pe_order_runnable_left * * Proceed through the ordering chain and build dependencies with * its children. */ if (after->rsc == NULL || after->rsc->variant < pe_group || probe->rsc->parent == after->rsc || after_wrapper->action->rsc == NULL || after_wrapper->action->rsc->variant > pe_group || after->rsc != after_wrapper->action->rsc->parent) { continue; } /* Proceed to the children of a group or a non-interleaved clone. * For an interleaved clone, proceed only to the relevant child. */ if (after->rsc->variant > pe_group && interleave == TRUE && (compatible_rsc == NULL || compatible_rsc != after_wrapper->action->rsc)) { continue; } } crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)", after->uuid, after->node ? after->node->details->uname: "", after_wrapper->action->uuid, after_wrapper->action->node ? after_wrapper->action->node->details->uname : "", after_wrapper->type); order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set); } } static void clear_actions_tracking_flag(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (is_set(action->flags, pe_action_tracking)) { pe_clear_action_bit(action, pe_action_tracking); } } } static void order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; GListPtr probes = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t * child = (pe_resource_t *) gIter->data; order_first_rsc_probes(child, data_set); } if (rsc->variant != pe_native) { return; } probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE); for (gIter = probes; gIter != NULL; gIter= gIter->next) { pe_action_t *probe = (pe_action_t *) gIter->data; GListPtr aIter = NULL; for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) { pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data; order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set); clear_actions_tracking_flag(data_set); } } g_list_free(probes); } static void order_first_probes(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; order_first_rsc_probes(rsc, data_set); } order_first_probes_imply_stops(data_set); } static void order_then_probes(pe_working_set_t * data_set) { #if 0 GListPtr gIter = NULL; for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; /* Given "A then B", we would prefer to wait for A to be * started before probing B. * * If A was a filesystem on which the binaries and data for B * lived, it would have been useful if the author of B's agent * could assume that A is running before B.monitor will be * called. * * However we can't _only_ probe once A is running, otherwise * we'd not detect the state of B if A could not be started * for some reason. * * In practice however, we cannot even do an opportunistic * version of this because B may be moving: * * B.probe -> B.start * B.probe -> B.stop * B.stop -> B.start * A.stop -> A.start * A.start -> B.probe * * So far so good, but if we add the result of this code: * * B.stop -> A.stop * * Then we get a loop: * * B.probe -> B.stop -> A.stop -> A.start -> B.probe * * We could kill the 'B.probe -> B.stop' dependency, but that * could mean stopping B "too" soon, because B.start must wait * for the probes to complete. * * Another option is to allow it only if A is a non-unique * clone with clone-max == node-max (since we'll never be * moving it). However, we could still be stopping one * instance at the same time as starting another. * The complexity of checking for allowed conditions combined * with the ever narrowing usecase suggests that this code * should remain disabled until someone gets smarter. */ action_t *start = NULL; GListPtr actions = NULL; GListPtr probes = NULL; actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE); if (actions) { start = actions->data; g_list_free(actions); } if(start == NULL) { crm_err("No start action for %s", rsc->id); continue; } probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE); for (actions = start->actions_before; actions != NULL; actions = actions->next) { action_wrapper_t *before = (action_wrapper_t *) actions->data; GListPtr pIter = NULL; action_t *first = before->action; resource_t *first_rsc = first->rsc; if(first->required_runnable_before) { GListPtr clone_actions = NULL; for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) { before = (action_wrapper_t *) clone_actions->data; crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid); CRM_ASSERT(before->action->rsc); first_rsc = before->action->rsc; break; } } else if(safe_str_neq(first->task, RSC_START)) { crm_trace("Not a start op %s for %s", first->uuid, start->uuid); } if(first_rsc == NULL) { continue; } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) { crm_trace("Same parent %s for %s", first_rsc->id, start->uuid); continue; } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) { crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid); continue; } crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant); for (pIter = probes; pIter != NULL; pIter = pIter->next) { action_t *probe = (action_t *) pIter->data; crm_err("Ordering %s before %s", first->uuid, probe->uuid); order_actions(first, probe, pe_order_optional); } } } #endif } static void order_probes(pe_working_set_t * data_set) { order_first_probes(data_set); order_then_probes(data_set); } gboolean stage7(pe_working_set_t * data_set) { GList *gIter = NULL; crm_trace("Applying ordering constraints"); /* Don't ask me why, but apparently they need to be processed in * the order they were created in... go figure * * Also g_list_append() has horrendous performance characteristics * So we need to use g_list_prepend() and then reverse the list here */ data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints); for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) { pe__ordering_t *order = gIter->data; resource_t *rsc = order->lh_rsc; crm_trace("Applying ordering constraint: %d", order->id); if (rsc != NULL) { crm_trace("rsc_action-to-*"); rsc_order_first(rsc, order, data_set); continue; } rsc = order->rh_rsc; if (rsc != NULL) { crm_trace("action-to-rsc_action"); rsc_order_then(order->lh_action, rsc, order); } else { crm_trace("action-to-action"); order_actions(order->lh_action, order->rh_action, order->type); } } for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; update_colo_start_chain(action, data_set); } crm_trace("Ordering probes"); order_probes(data_set); crm_trace("Updating %d actions", g_list_length(data_set->actions)); for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; update_action(action, data_set); } // Check for invalid orderings for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; pe_action_wrapper_t *input = NULL; for (GList *input_iter = action->actions_before; input_iter != NULL; input_iter = input_iter->next) { input = (pe_action_wrapper_t *) input_iter->data; if (pcmk__ordering_is_invalid(action, input)) { input->type = pe_order_none; } } } LogNodeActions(data_set, FALSE); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; LogActions(rsc, data_set, FALSE); } return TRUE; } static int transition_id = -1; /*! * \internal * \brief Log a message after calculating a transition * * \param[in] filename Where transition input is stored */ void pcmk__log_transition_summary(const char *filename) { if (was_processing_error) { crm_err("Calculated transition %d (with errors), saving inputs in %s", transition_id, filename); } else if (was_processing_warning) { crm_warn("Calculated transition %d (with warnings), saving inputs in %s", transition_id, filename); } else { crm_notice("Calculated transition %d, saving inputs in %s", transition_id, filename); } if (crm_config_error) { crm_notice("Configuration errors found during scheduler processing," " please run \"crm_verify -L\" to identify issues"); } } /* * Create a dependency graph to send to the transitioner (via the controller) */ gboolean stage8(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *value = NULL; transition_id++; crm_trace("Creating transition graph %d.", transition_id); data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH); value = pe_pref(data_set->config_hash, "cluster-delay"); crm_xml_add(data_set->graph, "cluster-delay", value); value = pe_pref(data_set->config_hash, "stonith-timeout"); crm_xml_add(data_set->graph, "stonith-timeout", value); crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY"); if (is_set(data_set->flags, pe_flag_start_failure_fatal)) { crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(data_set->graph, "failed-start-offset", "1"); } value = pe_pref(data_set->config_hash, "batch-limit"); crm_xml_add(data_set->graph, "batch-limit", value); crm_xml_add_int(data_set->graph, "transition_id", transition_id); value = pe_pref(data_set->config_hash, "migration-limit"); if (crm_int_helper(value, NULL) > 0) { crm_xml_add(data_set->graph, "migration-limit", value); } if (data_set->recheck_by > 0) { char *recheck_epoch = NULL; recheck_epoch = crm_strdup_printf("%llu", (long long) data_set->recheck_by); crm_xml_add(data_set->graph, "recheck-by", recheck_epoch); free(recheck_epoch); } /* errors... slist_iter(action, action_t, action_list, lpc, if(action->optional == FALSE && action->runnable == FALSE) { print_action("Ignoring", action, TRUE); } ); */ /* The following code will de-duplicate action inputs, so nothing past this * should rely on the action input type flags retaining their original * values. */ gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id); rsc->cmds->expand(rsc, data_set); } crm_log_xml_trace(data_set->graph, "created resource-driven action list"); /* pseudo action to distribute list of nodes with maintenance state update */ add_maintenance_update(data_set); /* catch any non-resource specific actions */ crm_trace("processing non-resource actions"); gIter = data_set->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->rsc && action->node && action->node->details->shutdown && is_not_set(action->rsc->flags, pe_rsc_maintenance) && is_not_set(action->flags, pe_action_optional) && is_not_set(action->flags, pe_action_runnable) && crm_str_eq(action->task, RSC_STOP, TRUE) ) { /* Eventually we should just ignore the 'fence' case * But for now it's the best way to detect (in CTS) when * CIB resource updates are being lost */ if (is_set(data_set->flags, pe_flag_have_quorum) || data_set->no_quorum_policy == no_quorum_ignore) { crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)", action->node->details->unclean ? "fence" : "shut down", action->node->details->uname, action->rsc->id, is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked", is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "", action->uuid); } } graph_element_from_action(action, data_set); } crm_log_xml_trace(data_set->graph, "created generic action list"); crm_trace("Created transition graph %d.", transition_id); return TRUE; } void LogNodeActions(pe_working_set_t * data_set, gboolean terminal) { GListPtr gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { char *node_name = NULL; char *task = NULL; action_t *action = (action_t *) gIter->data; if (action->rsc != NULL) { continue; } else if (is_set(action->flags, pe_action_optional)) { continue; } if (pe__is_guest_node(action->node)) { node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id); } else if(action->node) { node_name = crm_strdup_printf("%s", action->node->details->uname); } if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { task = strdup("Shutdown"); } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { const char *op = g_hash_table_lookup(action->meta, "stonith_action"); task = crm_strdup_printf("Fence (%s)", op); } if(task == NULL) { /* Nothing to report */ } else if(terminal && action->reason) { printf(" * %s %s '%s'\n", task, node_name, action->reason); } else if(terminal) { printf(" * %s %s\n", task, node_name); } else if(action->reason) { crm_notice(" * %s %s '%s'\n", task, node_name, action->reason); } else { crm_notice(" * %s %s\n", task, node_name); } free(node_name); free(task); } } diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c index 7d93e1b184..e4d8adcb10 100644 --- a/lib/pacemaker/pcmk_sched_clone.c +++ b/lib/pacemaker/pcmk_sched_clone.c @@ -1,1476 +1,1476 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #define VARIANT_CLONE 1 #include gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); static void append_parent_colocation(resource_t * rsc, resource_t * child, gboolean all); static gint sort_rsc_id(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; long num1, num2; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); /* * Sort clone instances numerically by instance number, so instance :10 * comes after :9. */ num1 = strtol(strrchr(resource1->id, ':') + 1, NULL, 10); num2 = strtol(strrchr(resource2->id, ':') + 1, NULL, 10); if (num1 < num2) { return -1; } else if (num1 > num2) { return 1; } return 0; } static node_t * parent_node_instance(const resource_t * rsc, node_t * node) { node_t *ret = NULL; if (node != NULL && rsc->parent) { ret = pe_hash_table_lookup(rsc->parent->allowed_nodes, node->details->id); } else if(node != NULL) { ret = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); } return ret; } static gboolean did_fail(const resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_failed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (did_fail(child_rsc)) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc = 0; node_t *node1 = NULL; node_t *node2 = NULL; node_t *current_node1 = NULL; node_t *current_node2 = NULL; unsigned int nnodes1 = 0; unsigned int nnodes2 = 0; gboolean can1 = TRUE; gboolean can2 = TRUE; const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); /* allocation order: * - active instances * - instances running on nodes with the least copies * - active instances on nodes that can't support them or are to be fenced * - failed instances * - inactive instances */ current_node1 = pe__find_active_on(resource1, &nnodes1, NULL); current_node2 = pe__find_active_on(resource2, &nnodes2, NULL); if (nnodes1 && nnodes2) { if (nnodes1 < nnodes2) { crm_trace("%s < %s: running_on", resource1->id, resource2->id); return -1; } else if (nnodes1 > nnodes2) { crm_trace("%s > %s: running_on", resource1->id, resource2->id); return 1; } } node1 = current_node1; node2 = current_node2; if (node1) { node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id); if (match == NULL || match->weight < 0) { crm_trace("%s: current location is unavailable", resource1->id); node1 = NULL; can1 = FALSE; } } if (node2) { node_t *match = pe_hash_table_lookup(resource2->allowed_nodes, node2->details->id); if (match == NULL || match->weight < 0) { crm_trace("%s: current location is unavailable", resource2->id); node2 = NULL; can2 = FALSE; } } if (can1 != can2) { if (can1) { crm_trace("%s < %s: availability of current location", resource1->id, resource2->id); return -1; } crm_trace("%s > %s: availability of current location", resource1->id, resource2->id); return 1; } if (resource1->priority < resource2->priority) { crm_trace("%s < %s: priority", resource1->id, resource2->id); return 1; } else if (resource1->priority > resource2->priority) { crm_trace("%s > %s: priority", resource1->id, resource2->id); return -1; } if (node1 == NULL && node2 == NULL) { crm_trace("%s == %s: not active", resource1->id, resource2->id); return 0; } if (node1 != node2) { if (node1 == NULL) { crm_trace("%s > %s: active", resource1->id, resource2->id); return 1; } else if (node2 == NULL) { crm_trace("%s < %s: active", resource1->id, resource2->id); return -1; } } can1 = can_run_resources(node1); can2 = can_run_resources(node2); if (can1 != can2) { if (can1) { crm_trace("%s < %s: can", resource1->id, resource2->id); return -1; } crm_trace("%s > %s: can", resource1->id, resource2->id); return 1; } node1 = parent_node_instance(resource1, node1); node2 = parent_node_instance(resource2, node2); if (node1 != NULL && node2 == NULL) { crm_trace("%s < %s: not allowed", resource1->id, resource2->id); return -1; } else if (node1 == NULL && node2 != NULL) { crm_trace("%s > %s: not allowed", resource1->id, resource2->id); return 1; } if (node1 == NULL || node2 == NULL) { crm_trace("%s == %s: not allowed", resource1->id, resource2->id); return 0; } if (node1->count < node2->count) { crm_trace("%s < %s: count", resource1->id, resource2->id); return -1; } else if (node1->count > node2->count) { crm_trace("%s > %s: count", resource1->id, resource2->id); return 1; } can1 = did_fail(resource1); can2 = did_fail(resource2); if (can1 != can2) { if (can1) { crm_trace("%s > %s: failed", resource1->id, resource2->id); return 1; } crm_trace("%s < %s: failed", resource1->id, resource2->id); return -1; } if (node1 && node2) { int lpc = 0; int max = 0; node_t *n = NULL; GListPtr gIter = NULL; GListPtr list1 = NULL; GListPtr list2 = NULL; GHashTable *hash1 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); GHashTable *hash2 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); n = node_copy(current_node1); g_hash_table_insert(hash1, (gpointer) n->details->id, n); n = node_copy(current_node2); g_hash_table_insert(hash2, (gpointer) n->details->id, n); if(resource1->parent) { for (gIter = resource1->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights(constraint->rsc_rh, resource1->id, hash1, constraint->node_attribute, (float)constraint->score / INFINITY, 0); } for (gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights(constraint->rsc_lh, resource1->id, hash1, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_positive); } } if(resource2->parent) { for (gIter = resource2->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights(constraint->rsc_rh, resource2->id, hash2, constraint->node_attribute, (float)constraint->score / INFINITY, 0); } for (gIter = resource2->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights(constraint->rsc_lh, resource2->id, hash2, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_positive); } } /* Current location score */ node1 = g_hash_table_lookup(hash1, current_node1->details->id); node2 = g_hash_table_lookup(hash2, current_node2->details->id); if (node1->weight < node2->weight) { if (node1->weight < 0) { crm_trace("%s > %s: current score: %d %d", resource1->id, resource2->id, node1->weight, node2->weight); rc = -1; goto out; } else { crm_trace("%s < %s: current score: %d %d", resource1->id, resource2->id, node1->weight, node2->weight); rc = 1; goto out; } } else if (node1->weight > node2->weight) { crm_trace("%s > %s: current score: %d %d", resource1->id, resource2->id, node1->weight, node2->weight); rc = -1; goto out; } /* All location scores */ list1 = g_hash_table_get_values(hash1); list2 = g_hash_table_get_values(hash2); list1 = sort_nodes_by_weight(list1, current_node1, data_set); list2 = sort_nodes_by_weight(list2, current_node2, data_set); max = g_list_length(list1); if (max < g_list_length(list2)) { max = g_list_length(list2); } for (; lpc < max; lpc++) { node1 = g_list_nth_data(list1, lpc); node2 = g_list_nth_data(list2, lpc); if (node1 == NULL) { crm_trace("%s < %s: colocated score NULL", resource1->id, resource2->id); rc = 1; break; } else if (node2 == NULL) { crm_trace("%s > %s: colocated score NULL", resource1->id, resource2->id); rc = -1; break; } if (node1->weight < node2->weight) { crm_trace("%s < %s: colocated score", resource1->id, resource2->id); rc = 1; break; } else if (node1->weight > node2->weight) { crm_trace("%s > %s: colocated score", resource1->id, resource2->id); rc = -1; break; } } /* Order by reverse uname - same as sort_node_weight() does? */ out: g_hash_table_destroy(hash1); /* Free mem */ g_hash_table_destroy(hash2); /* Free mem */ g_list_free(list1); g_list_free(list2); if (rc != 0) { return rc; } } rc = strcmp(resource1->id, resource2->id); crm_trace("%s %c %s: default", resource1->id, rc < 0 ? '<' : '>', resource2->id); return rc; } static node_t * can_run_instance(resource_t * rsc, node_t * node, int limit) { node_t *local_node = NULL; if (node == NULL && rsc->allowed_nodes) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) { can_run_instance(rsc, local_node, limit); } return NULL; } if (can_run_resources(node) == FALSE) { goto bail; } else if (is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = parent_node_instance(rsc, node); if (local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); goto bail; } else if (local_node->weight < 0) { common_update_score(rsc, node->details->id, local_node->weight); pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.", rsc->id, node->details->uname); } else if (local_node->count < limit) { pe_rsc_trace(rsc, "%s can run on %s (already running %d)", rsc->id, node->details->uname, local_node->count); return local_node; } else { pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)", rsc->id, node->details->uname, local_node->count, limit); } bail: if (node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static node_t * color_instance(resource_t * rsc, node_t * prefer, gboolean all_coloc, int limit, pe_working_set_t * data_set) { node_t *chosen = NULL; GHashTable *backup = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)", rsc->id, (prefer? prefer->details->uname: "none"), (all_coloc? "all" : "some")); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } /* Only include positive colocation preferences of dependent resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc->parent, rsc, all_coloc); if (prefer) { node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (local_prefer == NULL || local_prefer->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id, prefer->details->uname); return NULL; } } can_run_instance(rsc, NULL, limit); backup = node_hash_dup(rsc->allowed_nodes); chosen = rsc->cmds->allocate(rsc, prefer, data_set); if (chosen && prefer && (chosen->details != prefer->details)) { crm_info("Not pre-allocating %s to %s because %s is better", rsc->id, prefer->details->uname, chosen->details->uname); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = backup; native_deallocate(rsc); chosen = NULL; backup = NULL; } if (chosen) { pe_node_t *local_node = parent_node_instance(rsc, chosen); if (local_node) { local_node->count++; } else if (is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ crm_config_err("%s not found in %s (list=%d)", chosen->details->id, rsc->parent->id, g_hash_table_size(rsc->parent->allowed_nodes)); } } if(backup) { g_hash_table_destroy(backup); } return chosen; } static void append_parent_colocation(resource_t * rsc, resource_t * child, gboolean all) { GListPtr gIter = NULL; gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; if (all || cons->score < 0 || cons->score == INFINITY) { child->rsc_cons = g_list_prepend(child->rsc_cons, cons); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; if (all || cons->score < 0) { child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); } } } void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set); void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set) { int loop_max = 0; int allocated = 0; int available_nodes = 0; /* count now tracks the number of clones currently allocated */ for(GListPtr nIter = nodes; nIter != NULL; nIter = nIter->next) { pe_node_t *node = nIter->data; node->count = 0; if (can_run_resources(node)) { available_nodes++; } } if(available_nodes) { loop_max = max / available_nodes; } if (loop_max < 1) { loop_max = 1; } pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)", max, rsc->id, available_nodes, per_host_max, loop_max); /* Pre-allocate as many instances as we can to their current location */ for (GListPtr gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (child->running_on && is_set(child->flags, pe_rsc_provisional) && is_not_set(child->flags, pe_rsc_failed)) { node_t *child_node = pe__current_node(child); node_t *local_node = parent_node_instance(child, child_node); pe_rsc_trace(rsc, "Checking pre-allocation of %s to %s (%d remaining of %d)", child->id, child_node->details->uname, max - allocated, max); if (can_run_resources(child_node) == FALSE || child_node->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s", child_node->details->uname, child->id); } else if(local_node && local_node->count >= loop_max) { pe_rsc_trace(rsc, "Not pre-allocating because %s already allocated optimal instances", child_node->details->uname); } else if (color_instance(child, child_node, max < available_nodes, per_host_max, data_set)) { pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id, child_node->details->uname); allocated++; } } } pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max); for (GListPtr gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (child->running_on != NULL) { node_t *child_node = pe__current_node(child); node_t *local_node = parent_node_instance(child, child_node); if (local_node == NULL) { crm_err("%s is running on %s which isn't allowed", child->id, child_node->details->uname); } } if (is_not_set(child->flags, pe_rsc_provisional)) { } else if (allocated >= max) { pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max); resource_location(child, NULL, -INFINITY, "clone_color:limit_reached", data_set); } else { if (color_instance(child, NULL, max < available_nodes, per_host_max, data_set)) { allocated++; } } } pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d", allocated, rsc->id, max); } node_t * clone_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { GListPtr nodes = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } if (is_set(rsc->flags, pe_rsc_promotable)) { apply_master_prefs(rsc); } set_bit(rsc->flags, pe_rsc_allocating); pe_rsc_trace(rsc, "Processing %s", rsc->id); /* this information is used by sort_clone_instance() when deciding in which * order to allocate clone instances */ for (GListPtr gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; pe_rsc_trace(rsc, "%s: Coloring %s first", rsc->id, constraint->rsc_rh->id); constraint->rsc_rh->cmds->allocate(constraint->rsc_rh, prefer, data_set); } for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, (pe_weights_rollback | pe_weights_positive)); } dump_node_scores((show_scores? LOG_STDOUT : scores_log_level), rsc, __FUNCTION__, rsc->allowed_nodes); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = sort_nodes_by_weight(nodes, NULL, data_set); rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set); distribute_children(rsc, rsc->children, nodes, clone_data->clone_max, clone_data->clone_node_max, data_set); g_list_free(nodes); if (is_set(rsc->flags, pe_rsc_promotable)) { color_promotable(rsc, data_set); } clear_bit(rsc->flags, pe_rsc_provisional); clear_bit(rsc->flags, pe_rsc_allocating); pe_rsc_trace(rsc, "Done allocating %s", rsc->id); return NULL; } static void clone_update_pseudo_status(resource_t * rsc, gboolean * stopping, gboolean * starting, gboolean * active) { GListPtr gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; clone_update_pseudo_status(child, stopping, starting, active); } return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if (rsc->running_on) { *active = TRUE; } gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (*starting && *stopping) { return; } else if (is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid); continue; } else if (is_set(action->flags, pe_action_pseudo) == FALSE && is_set(action->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid); continue; } else if (safe_str_eq(RSC_STOP, action->task)) { pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid); *stopping = TRUE; } else if (safe_str_eq(RSC_START, action->task)) { if (is_set(action->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); } else { pe_rsc_trace(rsc, "Starting due to: %s", action->uuid); pe_rsc_trace(rsc, "%s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); *starting = TRUE; } } } } static action_t * find_rsc_action(pe_resource_t *rsc, const char *task, gboolean active_only, GList **list) { action_t *match = NULL; GListPtr possible = NULL; GListPtr active = NULL; possible = pe__resource_actions(rsc, NULL, task, FALSE); if (active_only) { GListPtr gIter = possible; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_optional) == FALSE) { active = g_list_prepend(active, op); } } - if (active && g_list_length(active) == 1) { + if (active && pcmk__list_of_1(active)) { match = g_list_nth_data(active, 0); } if (list) { *list = active; active = NULL; } - } else if (possible && g_list_length(possible) == 1) { + } else if (possible && pcmk__list_of_1(possible)) { match = g_list_nth_data(possible, 0); } if (list) { *list = possible; possible = NULL; } if (possible) { g_list_free(possible); } if (active) { g_list_free(active); } return match; } static void child_ordering_constraints(resource_t * rsc, pe_working_set_t * data_set) { action_t *stop = NULL; action_t *start = NULL; action_t *last_stop = NULL; action_t *last_start = NULL; GListPtr gIter = NULL; gboolean active_only = TRUE; /* change to false to get the old behavior */ clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->ordered == FALSE) { return; } /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, sort_rsc_id); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; stop = find_rsc_action(child, RSC_STOP, active_only, NULL); if (stop) { if (last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_optional); } last_stop = stop; } start = find_rsc_action(child, RSC_START, active_only, NULL); if (start) { if (last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_optional); } last_start = start; } } } void clone_create_actions(resource_t *rsc, pe_working_set_t *data_set) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify, &clone_data->stop_notify,data_set); child_ordering_constraints(rsc, data_set); if (is_set(rsc->flags, pe_rsc_promotable)) { create_promotable_actions(rsc, data_set); } } void clone_create_pseudo_actions( resource_t * rsc, GListPtr children, notify_data_t **start_notify, notify_data_t **stop_notify, pe_working_set_t * data_set) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; gboolean allow_dependent_migrations = TRUE; action_t *stop = NULL; action_t *stopped = NULL; action_t *start = NULL; action_t *started = NULL; pe_rsc_trace(rsc, "Creating actions for %s", rsc->id); for (GListPtr gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; gboolean starting = FALSE; gboolean stopping = FALSE; child_rsc->cmds->create_actions(child_rsc, data_set); clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active); if (stopping && starting) { allow_dependent_migrations = FALSE; } child_stopping |= stopping; child_starting |= starting; } /* start */ start = create_pseudo_resource_op(rsc, RSC_START, !child_starting, TRUE, data_set); started = create_pseudo_resource_op(rsc, RSC_STARTED, !child_starting, FALSE, data_set); started->priority = INFINITY; if (child_active || child_starting) { update_action_flags(started, pe_action_runnable, __FUNCTION__, __LINE__); } if (start_notify != NULL && *start_notify == NULL) { *start_notify = create_notification_boundaries(rsc, RSC_START, start, started, data_set); } /* stop */ stop = create_pseudo_resource_op(rsc, RSC_STOP, !child_stopping, TRUE, data_set); stopped = create_pseudo_resource_op(rsc, RSC_STOPPED, !child_stopping, TRUE, data_set); stopped->priority = INFINITY; if (allow_dependent_migrations) { update_action_flags(stop, pe_action_migrate_runnable, __FUNCTION__, __LINE__); } if (stop_notify != NULL && *stop_notify == NULL) { *stop_notify = create_notification_boundaries(rsc, RSC_STOP, stop, stopped, data_set); if (start_notify && *start_notify && *stop_notify) { order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional); } } } void clone_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { resource_t *last_rsc = NULL; GListPtr gIter; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id); new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); if (is_set(rsc->flags, pe_rsc_promotable)) { new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); } if (clone_data->ordered) { /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, sort_rsc_id); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->internal_constraints(child_rsc, data_set); order_start_start(rsc, child_rsc, pe_order_runnable_left | pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); if (clone_data->ordered && last_rsc) { order_start_start(last_rsc, child_rsc, pe_order_optional); } order_stop_stop(rsc, child_rsc, pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if (clone_data->ordered && last_rsc) { order_stop_stop(child_rsc, last_rsc, pe_order_optional); } last_rsc = child_rsc; } if (is_set(rsc->flags, pe_rsc_promotable)) { promotable_constraints(rsc, data_set); } } bool assign_node(resource_t * rsc, node_t * node, gboolean force) { bool changed = FALSE; if (rsc->children) { for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; changed |= assign_node(child_rsc, node, force); } return changed; } if (rsc->allocated_to != NULL) { changed = true; } native_assign_node(rsc, NULL, node, force); return changed; } gboolean is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e filter, gboolean current) { node_t *node = NULL; enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); CRM_CHECK(child_rsc && local_node, return FALSE); if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { /* We only want instances that haven't failed */ node = child_rsc->fns->location(child_rsc, NULL, current); } if (filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_trace("Filtered %s", child_rsc->id); return FALSE; } if (node && (node->details == local_node->details)) { return TRUE; } else if (node) { crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname, local_node->details->uname); } else { crm_trace("%s - not allocated %d", child_rsc->id, current); } return FALSE; } pe_resource_t * find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc, enum rsc_role_e filter, gboolean current, pe_working_set_t *data_set) { resource_t *pair = NULL; GListPtr gIter = NULL; GListPtr scratch = NULL; node_t *local_node = NULL; local_node = local_child->fns->location(local_child, NULL, current); if (local_node) { return find_compatible_child_by_node(local_child, local_node, rsc, filter, current); } scratch = g_hash_table_get_values(local_child->allowed_nodes); scratch = sort_nodes_by_weight(scratch, NULL, data_set); gIter = scratch; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pair = find_compatible_child_by_node(local_child, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id); done: g_list_free(scratch); return pair; } void clone_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } void clone_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set) { GListPtr gIter = NULL; gboolean do_interleave = FALSE; const char *interleave_s = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return); CRM_CHECK(rsc_rh != NULL, pe_err("rsc_rh was NULL for %s", constraint->id); return); CRM_CHECK(rsc_lh->variant == pe_native, return); pe_rsc_trace(rsc_rh, "Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc_rh->id, constraint->score); if (is_set(rsc_rh->flags, pe_rsc_promotable)) { if (is_set(rsc_rh->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc_rh, "%s is still provisional", rsc_rh->id); return; } else if (constraint->role_rh == RSC_ROLE_UNKNOWN) { pe_rsc_trace(rsc_rh, "Handling %s as a clone colocation", constraint->id); } else { promotable_colocation_rh(rsc_lh, rsc_rh, constraint, data_set); return; } } /* only the LHS side needs to be labeled as interleave */ interleave_s = g_hash_table_lookup(constraint->rsc_lh->meta, XML_RSC_ATTR_INTERLEAVE); if(crm_is_true(interleave_s) && constraint->rsc_lh->variant > pe_group) { // TODO: Do we actually care about multiple RH copies sharing a LH copy anymore? if (copies_per_node(constraint->rsc_lh) != copies_per_node(constraint->rsc_rh)) { crm_config_err("Cannot interleave %s and %s because" " they do not support the same number of copies per node", constraint->rsc_lh->id, constraint->rsc_rh->id); } else { do_interleave = TRUE; } } if (is_set(rsc_rh->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc_rh, "%s is still provisional", rsc_rh->id); return; } else if (do_interleave) { resource_t *rh_child = NULL; rh_child = find_compatible_child(rsc_lh, rsc_rh, RSC_ROLE_UNKNOWN, FALSE, data_set); if (rh_child) { pe_rsc_debug(rsc_rh, "Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint, data_set); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); assign_node(rsc_lh, NULL, TRUE); } else { pe_rsc_debug(rsc_rh, "Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); } return; } else if (constraint->score >= INFINITY) { GListPtr rhs = NULL; gIter = rsc_rh->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { pe_rsc_trace(rsc_rh, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); rhs = g_list_prepend(rhs, chosen); } } node_list_exclude(rsc_lh->allowed_nodes, rhs, FALSE); g_list_free(rhs); return; } gIter = rsc_rh->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint, data_set); } } enum action_tasks clone_child_action(action_t * action) { enum action_tasks result = no_action; resource_t *child = (resource_t *) action->rsc->children->data; if (safe_str_eq(action->task, "notify") || safe_str_eq(action->task, "notified")) { /* Find the action we're notifying about instead */ int stop = 0; char *key = action->uuid; int lpc = strlen(key); for (; lpc > 0; lpc--) { if (key[lpc] == '_' && stop == 0) { stop = lpc; } else if (key[lpc] == '_') { char *task_mutable = NULL; lpc++; task_mutable = strdup(key + lpc); task_mutable[stop - lpc] = 0; crm_trace("Extracted action '%s' from '%s'", task_mutable, key); result = get_complex_task(child, task_mutable, TRUE); free(task_mutable); break; } } } else { result = get_complex_task(child, action->task, TRUE); } return result; } enum pe_action_flags summary_action_flags(action_t * action, GListPtr children, node_t * node) { GListPtr gIter = NULL; gboolean any_runnable = FALSE; gboolean check_runnable = TRUE; enum action_tasks task = clone_child_action(action); enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); const char *task_s = task2text(task); for (gIter = children; gIter != NULL; gIter = gIter->next) { action_t *child_action = NULL; resource_t *child = (resource_t *) gIter->data; child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node); pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id, node ? node->details->uname : "none", child_action?child_action->uuid:"NA"); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (is_set(flags, pe_action_optional) && is_set(child_flags, pe_action_optional) == FALSE) { pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid, child_action->uuid); flags = crm_clear_bit(__FUNCTION__, __LINE__, action->rsc->id, flags, pe_action_optional); pe_clear_action_bit(action, pe_action_optional); } if (is_set(child_flags, pe_action_runnable)) { any_runnable = TRUE; } } } if (check_runnable && any_runnable == FALSE) { pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid); flags = crm_clear_bit(__FUNCTION__, __LINE__, action->rsc->id, flags, pe_action_runnable); if (node == NULL) { pe_clear_action_bit(action, pe_action_runnable); } } return flags; } enum pe_action_flags clone_action_flags(action_t * action, node_t * node) { return summary_action_flags(action, action->rsc->children, node); } void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { GListPtr gIter = rsc->children; pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); } } void clone_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; rsc->cmds->action_flags(op, NULL); } if (clone_data->start_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->start_notify); expand_notification_data(rsc, clone_data->start_notify, data_set); create_notifications(rsc, clone_data->start_notify, data_set); } if (clone_data->stop_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->stop_notify); expand_notification_data(rsc, clone_data->stop_notify, data_set); create_notifications(rsc, clone_data->stop_notify, data_set); } if (clone_data->promote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->promote_notify); expand_notification_data(rsc, clone_data->promote_notify, data_set); create_notifications(rsc, clone_data->promote_notify, data_set); } if (clone_data->demote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->demote_notify); expand_notification_data(rsc, clone_data->demote_notify, data_set); create_notifications(rsc, clone_data->demote_notify, data_set); } /* Now that the notifcations have been created we can expand the children */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } native_expand(rsc, data_set); /* The notifications are in the graph now, we can destroy the notify_data */ free_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; free_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; free_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; free_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } // Check whether a resource or any of its children is known on node static bool rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node) { if (rsc->children) { for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { resource_t *child = (resource_t *) child_iter->data; if (rsc_known_on(child, node)) { return TRUE; } } } else if (rsc->known_on) { GHashTableIter iter; node_t *known_node = NULL; g_hash_table_iter_init(&iter, rsc->known_on); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) { if (node->details == known_node->details) { return TRUE; } } } return FALSE; } // Look for an instance of clone that is known on node static pe_resource_t * find_instance_on(const pe_resource_t *clone, const pe_node_t *node) { for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (rsc_known_on(child, node)) { return child; } } return NULL; } // For unique clones, probe each instance separately static gboolean probe_unique_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set) { gboolean any_created = FALSE; for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { resource_t *child = (resource_t *) child_iter->data; any_created |= child->cmds->create_probe(child, node, complete, force, data_set); } return any_created; } // For anonymous clones, only a single instance needs to be probed static gboolean probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set) { // First, check if we probed an instance on this node last time pe_resource_t *child = find_instance_on(rsc, node); // Otherwise, check if we plan to start an instance on this node if (child == NULL) { for (GList *child_iter = rsc->children; child_iter && !child; child_iter = child_iter->next) { node_t *local_node = NULL; resource_t *child_rsc = (resource_t *) child_iter->data; local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if (local_node && (local_node->details == node->details)) { child = child_rsc; } } } // Otherwise, use the first clone instance if (child == NULL) { child = rsc->children->data; } return child->cmds->create_probe(child, node, complete, force, data_set); } gboolean clone_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { gboolean any_created = FALSE; CRM_ASSERT(rsc); rsc->children = g_list_sort(rsc->children, sort_rsc_id); if (rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return FALSE; } if (rsc->exclusive_discover) { node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on * * remove the node from allowed_nodes so that the * notification contains only nodes that we might ever run * on */ g_hash_table_remove(rsc->allowed_nodes, node->details->id); /* Bit of a shortcut - might as well take it */ return FALSE; } } if (is_set(rsc->flags, pe_rsc_unique)) { any_created = probe_unique_clone(rsc, node, complete, force, data_set); } else { any_created = probe_anonymous_clone(rsc, node, complete, force, data_set); } return any_created; } void clone_append_meta(resource_t * rsc, xmlNode * xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); name = crm_meta_name(XML_RSC_ATTR_UNIQUE); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); free(name); name = crm_meta_name(XML_RSC_ATTR_NOTIFY); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_notify) ? "true" : "false"); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX); crm_xml_add_int(xml, name, clone_data->clone_max); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX); crm_xml_add_int(xml, name, clone_data->clone_node_max); free(name); if (is_set(rsc->flags, pe_rsc_promotable)) { name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX); crm_xml_add_int(xml, name, clone_data->promoted_max); free(name); name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX); crm_xml_add_int(xml, name, clone_data->promoted_node_max); free(name); /* @COMPAT Maintain backward compatibility with resource agents that * expect the old names (deprecated since 2.0.0). */ name = crm_meta_name(XML_RSC_ATTR_MASTER_MAX); crm_xml_add_int(xml, name, clone_data->promoted_max); free(name); name = crm_meta_name(XML_RSC_ATTR_MASTER_NODEMAX); crm_xml_add_int(xml, name, clone_data->promoted_node_max); free(name); } } GHashTable * clone_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c index 7f06870f8a..a76c4a159c 100644 --- a/lib/pacemaker/pcmk_sched_constraints.c +++ b/lib/pacemaker/pcmk_sched_constraints.c @@ -1,3040 +1,3040 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include <../lib/pengine/unpack.h> enum pe_order_kind { pe_order_kind_optional, pe_order_kind_mandatory, pe_order_kind_serialize, }; #define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \ __rsc = pe_find_constraint_resource(data_set->resources, __name); \ if(__rsc == NULL) { \ crm_config_err("%s: No resource found for %s", __set, __name); \ return FALSE; \ } \ } while(0) enum pe_ordering get_flags(const char *id, enum pe_order_kind kind, const char *action_first, const char *action_then, gboolean invert); enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind); static pe__location_t *generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml, const char *discovery, crm_time_t *next_change, pe_working_set_t *data_set, pe_match_data_t *match_data); static bool evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set) { bool result = FALSE; crm_time_t *next_change = crm_time_new_undefined(); result = pe_evaluate_rules(lifetime, NULL, data_set->now, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, data_set); } crm_time_free(next_change); return result; } gboolean unpack_constraints(xmlNode * xml_constraints, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; xmlNode *lifetime = NULL; for (xml_obj = __xml_first_child_element(xml_constraints); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *tag = crm_element_name(xml_obj); if (id == NULL) { crm_config_err("Constraint <%s...> must have an id", tag); continue; } crm_trace("Processing constraint %s %s", tag, id); lifetime = first_named_child(xml_obj, "lifetime"); if (lifetime) { crm_config_warn("Support for the lifetime tag, used by %s, is deprecated." " The rules it contains should instead be direct descendents of the constraint object", id); } if (lifetime && !evaluate_lifetime(lifetime, data_set)) { crm_info("Constraint %s %s is not active", tag, id); } else if (safe_str_eq(XML_CONS_TAG_RSC_ORDER, tag)) { unpack_rsc_order(xml_obj, data_set); } else if (safe_str_eq(XML_CONS_TAG_RSC_DEPEND, tag)) { unpack_rsc_colocation(xml_obj, data_set); } else if (safe_str_eq(XML_CONS_TAG_RSC_LOCATION, tag)) { unpack_location(xml_obj, data_set); } else if (safe_str_eq(XML_CONS_TAG_RSC_TICKET, tag)) { unpack_rsc_ticket(xml_obj, data_set); } else { pe_err("Unsupported constraint type: %s", tag); } } return TRUE; } static const char * invert_action(const char *action) { if (safe_str_eq(action, RSC_START)) { return RSC_STOP; } else if (safe_str_eq(action, RSC_STOP)) { return RSC_START; } else if (safe_str_eq(action, RSC_PROMOTE)) { return RSC_DEMOTE; } else if (safe_str_eq(action, RSC_DEMOTE)) { return RSC_PROMOTE; } else if (safe_str_eq(action, RSC_PROMOTED)) { return RSC_DEMOTED; } else if (safe_str_eq(action, RSC_DEMOTED)) { return RSC_PROMOTED; } else if (safe_str_eq(action, RSC_STARTED)) { return RSC_STOPPED; } else if (safe_str_eq(action, RSC_STOPPED)) { return RSC_STARTED; } crm_config_warn("Unknown action: %s", action); return NULL; } static enum pe_order_kind get_ordering_type(xmlNode * xml_obj) { enum pe_order_kind kind_e = pe_order_kind_mandatory; const char *kind = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND); if (kind == NULL) { const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); kind_e = pe_order_kind_mandatory; if (score) { // @COMPAT deprecated informally since 1.0.7, formally since 2.0.1 int score_i = char2score(score); if (score_i == 0) { kind_e = pe_order_kind_optional; } pe_warn_once(pe_wo_order_score, "Support for 'score' in rsc_order is deprecated " "and will be removed in a future release (use 'kind' instead)"); } } else if (safe_str_eq(kind, "Mandatory")) { kind_e = pe_order_kind_mandatory; } else if (safe_str_eq(kind, "Optional")) { kind_e = pe_order_kind_optional; } else if (safe_str_eq(kind, "Serialize")) { kind_e = pe_order_kind_serialize; } else { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); crm_config_err("Constraint %s: Unknown type '%s'", id, kind); } return kind_e; } static resource_t * pe_find_constraint_resource(GListPtr rsc_list, const char *id) { GListPtr rIter = NULL; for (rIter = rsc_list; id && rIter; rIter = rIter->next) { resource_t *parent = rIter->data; resource_t *match = parent->fns->find_rsc(parent, id, NULL, pe_find_renamed); if (match != NULL) { if(safe_str_neq(match->id, id)) { /* We found an instance of a clone instead */ match = uber_parent(match); crm_debug("Found %s for %s", match->id, id); } return match; } } crm_trace("No match for %s", id); return NULL; } static gboolean pe_find_constraint_tag(pe_working_set_t * data_set, const char * id, tag_t ** tag) { gboolean rc = FALSE; *tag = NULL; rc = g_hash_table_lookup_extended(data_set->template_rsc_sets, id, NULL, (gpointer*) tag); if (rc == FALSE) { rc = g_hash_table_lookup_extended(data_set->tags, id, NULL, (gpointer*) tag); if (rc == FALSE) { crm_config_warn("No template/tag named '%s'", id); return FALSE; } else if (*tag == NULL) { crm_config_warn("No resource is tagged with '%s'", id); return FALSE; } } else if (*tag == NULL) { crm_config_warn("No resource is derived from template '%s'", id); return FALSE; } return rc; } static gboolean valid_resource_or_tag(pe_working_set_t * data_set, const char * id, resource_t ** rsc, tag_t ** tag) { gboolean rc = FALSE; if (rsc) { *rsc = NULL; *rsc = pe_find_constraint_resource(data_set->resources, id); if (*rsc) { return TRUE; } } if (tag) { *tag = NULL; rc = pe_find_constraint_tag(data_set, id, tag); } return rc; } static gboolean order_is_symmetrical(xmlNode * xml_obj, enum pe_order_kind parent_kind, const char * parent_symmetrical_s) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *kind_s = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND); const char *score_s = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); const char *symmetrical_s = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); enum pe_order_kind kind = parent_kind; if (kind_s || score_s) { kind = get_ordering_type(xml_obj); } if (symmetrical_s == NULL) { symmetrical_s = parent_symmetrical_s; } if (symmetrical_s) { gboolean symmetrical = crm_is_true(symmetrical_s); if (symmetrical && kind == pe_order_kind_serialize) { crm_config_warn("Cannot invert serialized order %s." " Ignoring symmetrical=\"%s\"", id, symmetrical_s); return FALSE; } return symmetrical; } else { if (kind == pe_order_kind_serialize) { return FALSE; } else { return TRUE; } } } static gboolean unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) { int order_id = 0; resource_t *rsc_then = NULL; resource_t *rsc_first = NULL; gboolean invert_bool = TRUE; int min_required_before = 0; enum pe_order_kind kind = pe_order_kind_mandatory; enum pe_ordering cons_weight = pe_order_optional; const char *id_first = NULL; const char *id_then = NULL; const char *action_then = NULL; const char *action_first = NULL; const char *instance_then = NULL; const char *instance_first = NULL; const char *id = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } invert_bool = order_is_symmetrical(xml_obj, kind, NULL); id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN); id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST); action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION); action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION); instance_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_INSTANCE); instance_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_INSTANCE); if (action_first == NULL) { action_first = RSC_START; } if (action_then == NULL) { action_then = action_first; } if (id_then == NULL || id_first == NULL) { crm_config_err("Constraint %s needs two sides lh: %s rh: %s", id, crm_str(id_then), crm_str(id_first)); return FALSE; } rsc_then = pe_find_constraint_resource(data_set->resources, id_then); rsc_first = pe_find_constraint_resource(data_set->resources, id_first); if (rsc_then == NULL) { crm_config_err("Constraint %s: no resource found for name '%s'", id, id_then); return FALSE; } else if (rsc_first == NULL) { crm_config_err("Constraint %s: no resource found for name '%s'", id, id_first); return FALSE; } else if (instance_then && pe_rsc_is_clone(rsc_then) == FALSE) { crm_config_err("Invalid constraint '%s':" " Resource '%s' is not a clone but instance %s was requested", id, id_then, instance_then); return FALSE; } else if (instance_first && pe_rsc_is_clone(rsc_first) == FALSE) { crm_config_err("Invalid constraint '%s':" " Resource '%s' is not a clone but instance %s was requested", id, id_first, instance_first); return FALSE; } if (instance_then) { rsc_then = find_clone_instance(rsc_then, instance_then, data_set); if (rsc_then == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_then, id_then); return FALSE; } } if (instance_first) { rsc_first = find_clone_instance(rsc_first, instance_first, data_set); if (rsc_first == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_first, id_first); return FALSE; } } cons_weight = pe_order_optional; kind = get_ordering_type(xml_obj); if (kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) { crm_trace("Upgrade : recovery - implies right"); cons_weight |= pe_order_implies_then; } if (invert_bool == FALSE) { cons_weight |= get_asymmetrical_flags(kind); } else { cons_weight |= get_flags(id, kind, action_first, action_then, FALSE); } if (pe_rsc_is_clone(rsc_first)) { /* If clone-min is set, require at least that number of instances to be * runnable before allowing dependencies to be runnable. */ const char *min_clones_s = g_hash_table_lookup(rsc_first->meta, XML_RSC_ATTR_INCARNATION_MIN); // @COMPAT 1.1.13: deprecated const char *require_all_s = crm_element_value(xml_obj, "require-all"); if (min_clones_s) { min_required_before = crm_parse_int(min_clones_s, "0"); } else if (require_all_s) { pe_warn_once(pe_wo_require_all, "Support for require-all in ordering constraints " "is deprecated and will be removed in a future release" " (use clone-min clone meta-attribute instead)"); if (crm_is_true(require_all_s) == FALSE) { // require-all=false is deprecated equivalent of clone-min=1 min_required_before = 1; } } } /* If there is a minimum number of instances that must be runnable before * the 'then' action is runnable, we use a pseudo action as an intermediate step * start min number of clones -> pseudo action is runnable -> dependency runnable. */ if (min_required_before) { GListPtr rIter = NULL; char *task = crm_concat(CRM_OP_RELAXED_CLONE, id, ':'); action_t *unordered_action = get_pseudo_op(task, data_set); free(task); /* require the pseudo action to have "min_required_before" number of * actions to be considered runnable before allowing the pseudo action * to be runnable. */ unordered_action->required_runnable_before = min_required_before; update_action_flags(unordered_action, pe_action_requires_any, __FUNCTION__, __LINE__); for (rIter = rsc_first->children; id && rIter; rIter = rIter->next) { resource_t *child = rIter->data; /* order each clone instance before the pseudo action */ custom_action_order(child, generate_op_key(child->id, action_first, 0), NULL, NULL, NULL, unordered_action, pe_order_one_or_more | pe_order_implies_then_printed, data_set); } /* order the "then" dependency to occur after the pseudo action only if * the pseudo action is runnable */ order_id = custom_action_order(NULL, NULL, unordered_action, rsc_then, generate_op_key(rsc_then->id, action_then, 0), NULL, cons_weight | pe_order_runnable_left, data_set); } else { order_id = new_rsc_order(rsc_first, action_first, rsc_then, action_then, cons_weight, data_set); } pe_rsc_trace(rsc_first, "order-%d (%s): %s_%s before %s_%s flags=0x%.6x", order_id, id, rsc_first->id, action_first, rsc_then->id, action_then, cons_weight); if (invert_bool == FALSE) { return TRUE; } action_then = invert_action(action_then); action_first = invert_action(action_first); if (action_then == NULL || action_first == NULL) { crm_config_err("Cannot invert rsc_order constraint %s." " Please specify the inverse manually.", id); return TRUE; } cons_weight = pe_order_optional; if (kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) { crm_trace("Upgrade : recovery - implies left"); cons_weight |= pe_order_implies_first; } cons_weight |= get_flags(id, kind, action_first, action_then, TRUE); order_id = new_rsc_order(rsc_then, action_then, rsc_first, action_first, cons_weight, data_set); pe_rsc_trace(rsc_then, "order-%d (%s): %s_%s before %s_%s flags=0x%.6x", order_id, id, rsc_then->id, action_then, rsc_first->id, action_first, cons_weight); return TRUE; } static gboolean expand_tags_in_sets(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { xmlNode *new_xml = NULL; xmlNode *set = NULL; gboolean any_refs = FALSE; const char *cons_id = NULL; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } new_xml = copy_xml(xml_obj); cons_id = ID(new_xml); for (set = __xml_first_child_element(new_xml); set != NULL; set = __xml_next_element(set)) { xmlNode *xml_rsc = NULL; GListPtr tag_refs = NULL; GListPtr gIter = NULL; if (safe_str_neq((const char *)set->name, XML_CONS_TAG_RSC_SET)) { continue; } for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { resource_t *rsc = NULL; tag_t *tag = NULL; const char *id = ID(xml_rsc); if (safe_str_neq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF)) { continue; } if (valid_resource_or_tag(data_set, id, &rsc, &tag) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", cons_id, id); free_xml(new_xml); return FALSE; } else if (rsc) { continue; } else if (tag) { /* The resource_ref under the resource_set references a template/tag */ xmlNode *last_ref = xml_rsc; /* A sample: Original XML: Now we are appending rsc2 and rsc3 which are tagged with tag1 right after it: */ for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *obj_ref = (const char *) gIter->data; xmlNode *new_rsc_ref = NULL; new_rsc_ref = xmlNewDocRawNode(getDocPtr(set), NULL, (pcmkXmlStr) XML_TAG_RESOURCE_REF, NULL); crm_xml_add(new_rsc_ref, XML_ATTR_ID, obj_ref); xmlAddNextSibling(last_ref, new_rsc_ref); last_ref = new_rsc_ref; } any_refs = TRUE; /* Do not directly free ''. That would break the further __xml_next_element(xml_rsc)) and cause "Invalid read" seen by valgrind. So just record it into a hash table for freeing it later. */ tag_refs = g_list_append(tag_refs, xml_rsc); } } /* Now free '', and finally get: */ for (gIter = tag_refs; gIter != NULL; gIter = gIter->next) { xmlNode *tag_ref = gIter->data; free_xml(tag_ref); } g_list_free(tag_refs); } if (any_refs) { *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } static gboolean tag_to_set(xmlNode * xml_obj, xmlNode ** rsc_set, const char * attr, gboolean convert_rsc, pe_working_set_t * data_set) { const char *cons_id = NULL; const char *id = NULL; resource_t *rsc = NULL; tag_t *tag = NULL; *rsc_set = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } if (attr == NULL) { crm_config_err("No attribute name to process."); return FALSE; } cons_id = crm_element_value(xml_obj, XML_ATTR_ID); if (cons_id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } id = crm_element_value(xml_obj, attr); if (id == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id, &rsc, &tag) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", cons_id, id); return FALSE; } else if (tag) { GListPtr gIter = NULL; /* A template/tag is referenced by the "attr" attribute (first, then, rsc or with-rsc). Add the template/tag's corresponding "resource_set" which contains the resources derived from it or tagged with it under the constraint. */ *rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET); crm_xml_add(*rsc_set, XML_ATTR_ID, id); for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *obj_ref = (const char *) gIter->data; xmlNode *rsc_ref = NULL; rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF); crm_xml_add(rsc_ref, XML_ATTR_ID, obj_ref); } /* Set sequential="false" for the resource_set */ crm_xml_add(*rsc_set, "sequential", XML_BOOLEAN_FALSE); } else if (rsc && convert_rsc) { /* Even a regular resource is referenced by "attr", convert it into a resource_set. Because the other side of the constraint could be a template/tag reference. */ xmlNode *rsc_ref = NULL; *rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET); crm_xml_add(*rsc_set, XML_ATTR_ID, id); rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF); crm_xml_add(rsc_ref, XML_ATTR_ID, id); } else { return TRUE; } /* Remove the "attr" attribute referencing the template/tag */ if (*rsc_set) { xml_remove_prop(xml_obj, attr); } return TRUE; } static gboolean unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role, const char * score, pe_working_set_t * data_set, pe_match_data_t * match_data); static gboolean unpack_simple_location(xmlNode * xml_obj, pe_working_set_t * data_set) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); if(value) { resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, value); return unpack_rsc_location(xml_obj, rsc_lh, NULL, NULL, data_set, NULL); } value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN); if(value) { regex_t *r_patt = calloc(1, sizeof(regex_t)); bool invert = FALSE; GListPtr rIter = NULL; if(value[0] == '!') { value++; invert = TRUE; } if (regcomp(r_patt, value, REG_EXTENDED)) { crm_config_err("Bad regex '%s' for constraint '%s'", value, id); regfree(r_patt); free(r_patt); return FALSE; } for (rIter = data_set->resources; rIter; rIter = rIter->next) { resource_t *r = rIter->data; int nregs = 0; regmatch_t *pmatch = NULL; int status; if(r_patt->re_nsub > 0) { nregs = r_patt->re_nsub + 1; } else { nregs = 1; } pmatch = calloc(nregs, sizeof(regmatch_t)); status = regexec(r_patt, r->id, nregs, pmatch, 0); if(invert == FALSE && status == 0) { pe_re_match_data_t re_match_data = { .string = r->id, .nregs = nregs, .pmatch = pmatch }; pe_match_data_t match_data = { .re = &re_match_data, .params = r->parameters, .meta = r->meta, }; crm_debug("'%s' matched '%s' for %s", r->id, value, id); unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, &match_data); } else if (invert && (status != 0)) { crm_debug("'%s' is an inverted match of '%s' for %s", r->id, value, id); unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, NULL); } else { crm_trace("'%s' does not match '%s' for %s", r->id, value, id); } free(pmatch); } regfree(r_patt); free(r_patt); } return FALSE; } static gboolean unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role, const char * score, pe_working_set_t * data_set, pe_match_data_t * match_data) { pe__location_t *location = NULL; const char *id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *node = crm_element_value(xml_obj, XML_CIB_TAG_NODE); const char *discovery = crm_element_value(xml_obj, XML_LOCATION_ATTR_DISCOVERY); if (rsc_lh == NULL) { /* only a warn as BSC adds the constraint then the resource */ crm_config_warn("No resource (con=%s, rsc=%s)", id, id_lh); return FALSE; } if (score == NULL) { score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); } if (node != NULL && score != NULL) { int score_i = char2score(score); node_t *match = pe_find_node(data_set->nodes, node); if (!match) { return FALSE; } location = rsc2node_new(id, rsc_lh, score_i, discovery, match, data_set); } else { bool empty = TRUE; crm_time_t *next_change = crm_time_new_undefined(); /* This loop is logically parallel to pe_evaluate_rules(), except * instead of checking whether any rule is active, we set up location * constraints for each active rule. */ for (xmlNode *rule_xml = first_named_child(xml_obj, XML_TAG_RULE); rule_xml != NULL; rule_xml = crm_next_same_xml(rule_xml)) { empty = FALSE; crm_trace("Unpacking %s/%s", id, ID(rule_xml)); generate_location_rule(rsc_lh, rule_xml, discovery, next_change, data_set, match_data); } if (empty) { crm_config_err("Invalid location constraint %s:" " rsc_location must contain at least one rule", id); } /* If there is a point in the future when the evaluation of a rule will * change, make sure the scheduler is re-run by that time. */ if (crm_time_is_defined(next_change)) { time_t t = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(t, data_set); } crm_time_free(next_change); return TRUE; } if (role == NULL) { role = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE); } if (location && role) { if (text2role(role) == RSC_ROLE_UNKNOWN) { pe_err("Invalid constraint %s: Bad role %s", id, role); return FALSE; } else { enum rsc_role_e r = text2role(role); switch(r) { case RSC_ROLE_UNKNOWN: case RSC_ROLE_STARTED: case RSC_ROLE_SLAVE: /* Applies to all */ location->role_filter = RSC_ROLE_UNKNOWN; break; default: location->role_filter = r; break; } } } return TRUE; } static gboolean unpack_location_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_lh = NULL; const char *state_lh = NULL; resource_t *rsc_lh = NULL; tag_t *tag_lh = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_lh = NULL; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_location..."); *expanded_xml = new_xml; return TRUE; } id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); if (id_lh == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_lh); return FALSE; } else if (rsc_lh) { /* No template is referenced. */ return TRUE; } state_lh = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "rsc" into a resource_set under the rsc_location constraint. */ if (tag_to_set(new_xml, &rsc_set_lh, XML_LOC_ATTR_SOURCE, FALSE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_lh) { if (state_lh) { /* A "rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_lh, "role", state_lh); xml_remove_prop(new_xml, XML_RULE_ATTR_ROLE); } crm_log_xml_trace(new_xml, "Expanded rsc_location..."); *expanded_xml = new_xml; } else { /* No sets */ free_xml(new_xml); } return TRUE; } static gboolean unpack_location_set(xmlNode * location, xmlNode * set, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *resource = NULL; const char *set_id; const char *role; const char *local_score; if (set == NULL) { crm_config_err("No resource_set object to process."); return FALSE; } set_id = ID(set); if (set_id == NULL) { crm_config_err("resource_set must have an id"); return FALSE; } role = crm_element_value(set, "role"); local_score = crm_element_value(set, XML_RULE_ATTR_SCORE); for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); unpack_rsc_location(location, resource, role, local_score, data_set, NULL); } } return TRUE; } gboolean unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set) { xmlNode *set = NULL; gboolean any_sets = FALSE; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; if (unpack_location_tags(xml_obj, &expanded_xml, data_set) == FALSE) { return FALSE; } if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } for (set = __xml_first_child_element(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_location_set(xml_obj, set, data_set) == FALSE) { if (expanded_xml) { free_xml(expanded_xml); } return FALSE; } } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_location(xml_obj, data_set); } return TRUE; } static int get_node_score(const char *rule, const char *score, gboolean raw, node_t * node, resource_t *rsc) { int score_f = 0; if (score == NULL) { pe_err("Rule %s: no score specified. Assuming 0.", rule); } else if (raw) { score_f = char2score(score); } else { const char *attr_score = pe_node_attribute_calculated(node, score, rsc); if (attr_score == NULL) { crm_debug("Rule %s: node %s did not have a value for %s", rule, node->details->uname, score); score_f = -INFINITY; } else { crm_debug("Rule %s: node %s had value %s for %s", rule, node->details->uname, attr_score, score); score_f = char2score(attr_score); } } return score_f; } static pe__location_t * generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml, const char *discovery, crm_time_t *next_change, pe_working_set_t *data_set, pe_match_data_t *match_data) { const char *rule_id = NULL; const char *score = NULL; const char *boolean = NULL; const char *role = NULL; GListPtr gIter = NULL; GListPtr match_L = NULL; gboolean do_and = TRUE; gboolean accept = TRUE; gboolean raw_score = TRUE; gboolean score_allocated = FALSE; pe__location_t *location_rule = NULL; rule_xml = expand_idref(rule_xml, data_set->input); rule_id = crm_element_value(rule_xml, XML_ATTR_ID); boolean = crm_element_value(rule_xml, XML_RULE_ATTR_BOOLEAN_OP); role = crm_element_value(rule_xml, XML_RULE_ATTR_ROLE); crm_trace("Processing rule: %s", rule_id); if (role != NULL && text2role(role) == RSC_ROLE_UNKNOWN) { pe_err("Bad role specified for %s: %s", rule_id, role); return NULL; } score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE); if (score == NULL) { score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE_ATTRIBUTE); if (score != NULL) { raw_score = FALSE; } } if (safe_str_eq(boolean, "or")) { do_and = FALSE; } location_rule = rsc2node_new(rule_id, rsc, 0, discovery, NULL, data_set); if (location_rule == NULL) { return NULL; } if (match_data && match_data->re && match_data->re->nregs > 0 && match_data->re->pmatch[0].rm_so != -1) { if (raw_score == FALSE) { char *result = pe_expand_re_matches(score, match_data->re); if (result) { score = (const char *) result; score_allocated = TRUE; } } } if (role != NULL) { crm_trace("Setting role filter: %s", role); location_rule->role_filter = text2role(role); if (location_rule->role_filter == RSC_ROLE_SLAVE) { /* Any promotable clone cannot be promoted without being a slave first * Ergo, any constraint for the slave role applies to every role */ location_rule->role_filter = RSC_ROLE_UNKNOWN; } } if (do_and) { GListPtr gIter = NULL; match_L = node_list_dup(data_set->nodes, TRUE, FALSE); for (gIter = match_L; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node->weight = get_node_score(rule_id, score, raw_score, node, rsc); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int score_f = 0; node_t *node = (node_t *) gIter->data; accept = pe_test_rule(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN, data_set->now, next_change, match_data); crm_trace("Rule %s %s on %s", ID(rule_xml), accept ? "passed" : "failed", node->details->uname); score_f = get_node_score(rule_id, score, raw_score, node, rsc); /* if(accept && score_f == -INFINITY) { */ /* accept = FALSE; */ /* } */ if (accept) { node_t *local = pe_find_node_id(match_L, node->details->id); if (local == NULL && do_and) { continue; } else if (local == NULL) { local = node_copy(node); match_L = g_list_append(match_L, local); } if (do_and == FALSE) { local->weight = merge_weights(local->weight, score_f); } crm_trace("node %s now has weight %d", node->details->uname, local->weight); } else if (do_and && !accept) { /* remove it */ node_t *delete = pe_find_node_id(match_L, node->details->id); if (delete != NULL) { match_L = g_list_remove(match_L, delete); crm_trace("node %s did not match", node->details->uname); } free(delete); } } if (score_allocated == TRUE) { free((char *)score); } location_rule->node_list_rh = match_L; if (location_rule->node_list_rh == NULL) { crm_trace("No matching nodes for rule %s", rule_id); return NULL; } crm_trace("%s: %d nodes matched", rule_id, g_list_length(location_rule->node_list_rh)); return location_rule; } static gint sort_cons_priority_lh(gconstpointer a, gconstpointer b) { const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t *)a; const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } CRM_ASSERT(rsc_constraint1->rsc_lh != NULL); CRM_ASSERT(rsc_constraint1->rsc_rh != NULL); if (rsc_constraint1->rsc_lh->priority > rsc_constraint2->rsc_lh->priority) { return -1; } if (rsc_constraint1->rsc_lh->priority < rsc_constraint2->rsc_lh->priority) { return 1; } /* Process clones before primitives and groups */ if (rsc_constraint1->rsc_lh->variant > rsc_constraint2->rsc_lh->variant) { return -1; } else if (rsc_constraint1->rsc_lh->variant < rsc_constraint2->rsc_lh->variant) { return 1; } /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable * clones (probably unnecessary, but avoids having to update regression * tests) */ if (rsc_constraint1->rsc_lh->variant == pe_clone) { if (is_set(rsc_constraint1->rsc_lh->flags, pe_rsc_promotable) && is_not_set(rsc_constraint2->rsc_lh->flags, pe_rsc_promotable)) { return -1; } else if (is_not_set(rsc_constraint1->rsc_lh->flags, pe_rsc_promotable) && is_set(rsc_constraint2->rsc_lh->flags, pe_rsc_promotable)) { return 1; } } return strcmp(rsc_constraint1->rsc_lh->id, rsc_constraint2->rsc_lh->id); } static gint sort_cons_priority_rh(gconstpointer a, gconstpointer b) { const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t *)a; const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } CRM_ASSERT(rsc_constraint1->rsc_lh != NULL); CRM_ASSERT(rsc_constraint1->rsc_rh != NULL); if (rsc_constraint1->rsc_rh->priority > rsc_constraint2->rsc_rh->priority) { return -1; } if (rsc_constraint1->rsc_rh->priority < rsc_constraint2->rsc_rh->priority) { return 1; } /* Process clones before primitives and groups */ if (rsc_constraint1->rsc_rh->variant > rsc_constraint2->rsc_rh->variant) { return -1; } else if (rsc_constraint1->rsc_rh->variant < rsc_constraint2->rsc_rh->variant) { return 1; } /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable * clones (probably unnecessary, but avoids having to update regression * tests) */ if (rsc_constraint1->rsc_rh->variant == pe_clone) { if (is_set(rsc_constraint1->rsc_rh->flags, pe_rsc_promotable) && is_not_set(rsc_constraint2->rsc_rh->flags, pe_rsc_promotable)) { return -1; } else if (is_not_set(rsc_constraint1->rsc_rh->flags, pe_rsc_promotable) && is_set(rsc_constraint2->rsc_rh->flags, pe_rsc_promotable)) { return 1; } } return strcmp(rsc_constraint1->rsc_rh->id, rsc_constraint2->rsc_rh->id); } static void anti_colocation_order(resource_t * first_rsc, int first_role, resource_t * then_rsc, int then_role, pe_working_set_t * data_set) { const char *first_tasks[] = { NULL, NULL }; const char *then_tasks[] = { NULL, NULL }; int first_lpc = 0; int then_lpc = 0; /* Actions to make first_rsc lose first_role */ if (first_role == RSC_ROLE_MASTER) { first_tasks[0] = CRMD_ACTION_DEMOTE; } else { first_tasks[0] = CRMD_ACTION_STOP; if (first_role == RSC_ROLE_SLAVE) { first_tasks[1] = CRMD_ACTION_PROMOTE; } } /* Actions to make then_rsc gain then_role */ if (then_role == RSC_ROLE_MASTER) { then_tasks[0] = CRMD_ACTION_PROMOTE; } else { then_tasks[0] = CRMD_ACTION_START; if (then_role == RSC_ROLE_SLAVE) { then_tasks[1] = CRMD_ACTION_DEMOTE; } } for (first_lpc = 0; first_lpc <= 1 && first_tasks[first_lpc] != NULL; first_lpc++) { for (then_lpc = 0; then_lpc <= 1 && then_tasks[then_lpc] != NULL; then_lpc++) { new_rsc_order(first_rsc, first_tasks[first_lpc], then_rsc, then_tasks[then_lpc], pe_order_anti_colocation, data_set); } } } gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t * rsc_lh, resource_t * rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t * data_set) { rsc_colocation_t *new_con = NULL; if (rsc_lh == NULL) { crm_config_err("No resource found for LHS %s", id); return FALSE; } else if (rsc_rh == NULL) { crm_config_err("No resource found for RHS of %s", id); return FALSE; } new_con = calloc(1, sizeof(rsc_colocation_t)); if (new_con == NULL) { return FALSE; } if (state_lh == NULL || safe_str_eq(state_lh, RSC_ROLE_STARTED_S)) { state_lh = RSC_ROLE_UNKNOWN_S; } if (state_rh == NULL || safe_str_eq(state_rh, RSC_ROLE_STARTED_S)) { state_rh = RSC_ROLE_UNKNOWN_S; } new_con->id = id; new_con->rsc_lh = rsc_lh; new_con->rsc_rh = rsc_rh; new_con->score = score; new_con->role_lh = text2role(state_lh); new_con->role_rh = text2role(state_rh); new_con->node_attribute = node_attr; if (node_attr == NULL) { node_attr = CRM_ATTR_UNAME; } pe_rsc_trace(rsc_lh, "%s ==> %s (%s %d)", rsc_lh->id, rsc_rh->id, node_attr, score); rsc_lh->rsc_cons = g_list_insert_sorted(rsc_lh->rsc_cons, new_con, sort_cons_priority_rh); rsc_rh->rsc_cons_lhs = g_list_insert_sorted(rsc_rh->rsc_cons_lhs, new_con, sort_cons_priority_lh); data_set->colocation_constraints = g_list_append(data_set->colocation_constraints, new_con); if (score <= -INFINITY) { anti_colocation_order(rsc_lh, new_con->role_lh, rsc_rh, new_con->role_rh, data_set); anti_colocation_order(rsc_rh, new_con->role_rh, rsc_lh, new_con->role_lh, data_set); } return TRUE; } /* LHS before RHS */ int new_rsc_order(resource_t * lh_rsc, const char *lh_task, resource_t * rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t * data_set) { char *lh_key = NULL; char *rh_key = NULL; CRM_CHECK(lh_rsc != NULL, return -1); CRM_CHECK(lh_task != NULL, return -1); CRM_CHECK(rh_rsc != NULL, return -1); CRM_CHECK(rh_task != NULL, return -1); #if 0 /* We do not need to test if these reference stonith resources * because the fencer has access to them even when they're not "running" */ if (validate_order_resources(lh_rsc, lh_task, rh_rsc, rh_task)) { return -1; } #endif lh_key = generate_op_key(lh_rsc->id, lh_task, 0); rh_key = generate_op_key(rh_rsc->id, rh_task, 0); return custom_action_order(lh_rsc, lh_key, NULL, rh_rsc, rh_key, NULL, type, data_set); } static char * task_from_action_or_key(action_t *action, const char *key) { char *res = NULL; if (action) { res = strdup(action->task); } else if (key) { parse_op_key(key, NULL, &res, NULL); } return res; } /* when order constraints are made between two resources start and stop actions * those constraints have to be mirrored against the corresponding * migration actions to ensure start/stop ordering is preserved during * a migration */ static void handle_migration_ordering(pe__ordering_t *order, pe_working_set_t *data_set) { char *lh_task = NULL; char *rh_task = NULL; gboolean rh_migratable; gboolean lh_migratable; if (order->lh_rsc == NULL || order->rh_rsc == NULL) { return; } else if (order->lh_rsc == order->rh_rsc) { return; /* don't mess with those constraints built between parent * resources and the children */ } else if (is_parent(order->lh_rsc, order->rh_rsc)) { return; } else if (is_parent(order->rh_rsc, order->lh_rsc)) { return; } lh_migratable = is_set(order->lh_rsc->flags, pe_rsc_allow_migrate); rh_migratable = is_set(order->rh_rsc->flags, pe_rsc_allow_migrate); /* one of them has to be migratable for * the migrate ordering logic to be applied */ if (lh_migratable == FALSE && rh_migratable == FALSE) { return; } /* at this point we have two resources which allow migrations that have an * order dependency set between them. If those order dependencies involve * start/stop actions, we need to mirror the corresponding migrate actions * so order will be preserved. */ lh_task = task_from_action_or_key(order->lh_action, order->lh_action_task); rh_task = task_from_action_or_key(order->rh_action, order->rh_action_task); if (lh_task == NULL || rh_task == NULL) { goto cleanup_order; } if (safe_str_eq(lh_task, RSC_START) && safe_str_eq(rh_task, RSC_START)) { int flags = pe_order_optional; if (lh_migratable && rh_migratable) { /* A start then B start * A migrate_from then B migrate_to */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_MIGRATED, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); } if (rh_migratable) { if (lh_migratable) { flags |= pe_order_apply_first_non_migratable; } /* A start then B start * A start then B migrate_to... only if A start is not a part of a migration*/ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_START, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); } } else if (rh_migratable == TRUE && safe_str_eq(lh_task, RSC_STOP) && safe_str_eq(rh_task, RSC_STOP)) { int flags = pe_order_optional; if (lh_migratable) { flags |= pe_order_apply_first_non_migratable; } /* rh side is at the bottom of the stack during a stop. If we have a constraint * stop B then stop A, if B is migrating via stop/start, and A is migrating using migration actions, * we need to enforce that A's migrate_to action occurs after B's stop action. */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_STOP, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); /* We need to build the stop constraint against migrate_from as well * to account for partial migrations. */ if (order->rh_rsc->partial_migration_target) { custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_STOP, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATED, 0), NULL, flags, data_set); } } else if (safe_str_eq(lh_task, RSC_PROMOTE) && safe_str_eq(rh_task, RSC_START)) { int flags = pe_order_optional; if (rh_migratable) { /* A promote then B start * A promote then B migrate_to */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_PROMOTE, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); } } else if (safe_str_eq(lh_task, RSC_DEMOTE) && safe_str_eq(rh_task, RSC_STOP)) { int flags = pe_order_optional; if (rh_migratable) { /* A demote then B stop * A demote then B migrate_to */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_DEMOTE, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); /* We need to build the demote constraint against migrate_from as well * to account for partial migrations. */ if (order->rh_rsc->partial_migration_target) { custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_DEMOTE, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATED, 0), NULL, flags, data_set); } } } cleanup_order: free(lh_task); free(rh_task); } /* LHS before RHS */ int custom_action_order(resource_t * lh_rsc, char *lh_action_task, action_t * lh_action, resource_t * rh_rsc, char *rh_action_task, action_t * rh_action, enum pe_ordering type, pe_working_set_t * data_set) { pe__ordering_t *order = NULL; if (lh_rsc == NULL && lh_action) { lh_rsc = lh_action->rsc; } if (rh_rsc == NULL && rh_action) { rh_rsc = rh_action->rsc; } if ((lh_action == NULL && lh_rsc == NULL) || (rh_action == NULL && rh_rsc == NULL)) { crm_config_err("Invalid inputs %p.%p %p.%p", lh_rsc, lh_action, rh_rsc, rh_action); free(lh_action_task); free(rh_action_task); return -1; } order = calloc(1, sizeof(pe__ordering_t)); crm_trace("Creating[%d] %s %s %s - %s %s %s", data_set->order_id, lh_rsc?lh_rsc->id:"NA", lh_action_task, lh_action?lh_action->uuid:"NA", rh_rsc?rh_rsc->id:"NA", rh_action_task, rh_action?rh_action->uuid:"NA"); /* CRM_ASSERT(data_set->order_id != 291); */ order->id = data_set->order_id++; order->type = type; order->lh_rsc = lh_rsc; order->rh_rsc = rh_rsc; order->lh_action = lh_action; order->rh_action = rh_action; order->lh_action_task = lh_action_task; order->rh_action_task = rh_action_task; if (order->lh_action_task == NULL && lh_action) { order->lh_action_task = strdup(lh_action->uuid); } if (order->rh_action_task == NULL && rh_action) { order->rh_action_task = strdup(rh_action->uuid); } if (order->lh_rsc == NULL && lh_action) { order->lh_rsc = lh_action->rsc; } if (order->rh_rsc == NULL && rh_action) { order->rh_rsc = rh_action->rsc; } data_set->ordering_constraints = g_list_prepend(data_set->ordering_constraints, order); handle_migration_ordering(order, data_set); return order->id; } enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind) { enum pe_ordering flags = pe_order_optional; if (kind == pe_order_kind_mandatory) { flags |= pe_order_asymmetrical; } else if (kind == pe_order_kind_serialize) { flags |= pe_order_serialize_only; } return flags; } enum pe_ordering get_flags(const char *id, enum pe_order_kind kind, const char *action_first, const char *action_then, gboolean invert) { enum pe_ordering flags = pe_order_optional; if (invert && kind == pe_order_kind_mandatory) { crm_trace("Upgrade %s: implies left", id); flags |= pe_order_implies_first; } else if (kind == pe_order_kind_mandatory) { crm_trace("Upgrade %s: implies right", id); flags |= pe_order_implies_then; if (safe_str_eq(action_first, RSC_START) || safe_str_eq(action_first, RSC_PROMOTE)) { crm_trace("Upgrade %s: runnable", id); flags |= pe_order_runnable_left; } } else if (kind == pe_order_kind_serialize) { flags |= pe_order_serialize_only; } return flags; } static gboolean unpack_order_set(xmlNode * set, enum pe_order_kind parent_kind, resource_t ** rsc, action_t ** begin, action_t ** end, action_t ** inv_begin, action_t ** inv_end, const char *parent_symmetrical_s, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; GListPtr set_iter = NULL; GListPtr resources = NULL; resource_t *last = NULL; resource_t *resource = NULL; int local_kind = parent_kind; gboolean sequential = FALSE; enum pe_ordering flags = pe_order_optional; gboolean symmetrical = TRUE; char *key = NULL; const char *id = ID(set); const char *action = crm_element_value(set, "action"); const char *sequential_s = crm_element_value(set, "sequential"); const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND); /* char *pseudo_id = NULL; char *end_id = NULL; char *begin_id = NULL; */ if (action == NULL) { action = RSC_START; } if (kind_s) { local_kind = get_ordering_type(set); } if (sequential_s == NULL) { sequential_s = "1"; } sequential = crm_is_true(sequential_s); symmetrical = order_is_symmetrical(set, parent_kind, parent_symmetrical_s); if (symmetrical) { flags = get_flags(id, local_kind, action, action, FALSE); } else { flags = get_asymmetrical_flags(local_kind); } for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, resource, ID(xml_rsc)); resources = g_list_append(resources, resource); } } - if (g_list_length(resources) == 1) { + if (pcmk__list_of_1(resources)) { crm_trace("Single set: %s", id); *rsc = resource; *end = NULL; *begin = NULL; *inv_end = NULL; *inv_begin = NULL; goto done; } /* pseudo_id = crm_concat(id, action, '-'); end_id = crm_concat(pseudo_id, "end", '-'); begin_id = crm_concat(pseudo_id, "begin", '-'); */ *rsc = NULL; /* *end = get_pseudo_op(end_id, data_set); *begin = get_pseudo_op(begin_id, data_set); free(pseudo_id); free(begin_id); free(end_id); */ set_iter = resources; while (set_iter != NULL) { resource = (resource_t *) set_iter->data; set_iter = set_iter->next; key = generate_op_key(resource->id, action, 0); /* custom_action_order(NULL, NULL, *begin, resource, strdup(key), NULL, flags|pe_order_implies_first_printed, data_set); custom_action_order(resource, strdup(key), NULL, NULL, NULL, *end, flags|pe_order_implies_then_printed, data_set); */ if (local_kind == pe_order_kind_serialize) { /* Serialize before everything that comes after */ GListPtr gIter = NULL; for (gIter = set_iter; gIter != NULL; gIter = gIter->next) { resource_t *then_rsc = (resource_t *) gIter->data; char *then_key = generate_op_key(then_rsc->id, action, 0); custom_action_order(resource, strdup(key), NULL, then_rsc, then_key, NULL, flags, data_set); } } else if (sequential) { if (last != NULL) { new_rsc_order(last, action, resource, action, flags, data_set); } last = resource; } free(key); } if (symmetrical == FALSE) { goto done; } last = NULL; action = invert_action(action); /* pseudo_id = crm_concat(id, action, '-'); end_id = crm_concat(pseudo_id, "end", '-'); begin_id = crm_concat(pseudo_id, "begin", '-'); *inv_end = get_pseudo_op(end_id, data_set); *inv_begin = get_pseudo_op(begin_id, data_set); free(pseudo_id); free(begin_id); free(end_id); */ flags = get_flags(id, local_kind, action, action, TRUE); set_iter = resources; while (set_iter != NULL) { resource = (resource_t *) set_iter->data; set_iter = set_iter->next; /* key = generate_op_key(resource->id, action, 0); custom_action_order(NULL, NULL, *inv_begin, resource, strdup(key), NULL, flags|pe_order_implies_first_printed, data_set); custom_action_order(resource, key, NULL, NULL, NULL, *inv_end, flags|pe_order_implies_then_printed, data_set); */ if (sequential) { if (last != NULL) { new_rsc_order(resource, action, last, action, flags, data_set); } last = resource; } } done: g_list_free(resources); return TRUE; } static gboolean order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kind kind, pe_working_set_t * data_set, gboolean invert, gboolean symmetrical) { xmlNode *xml_rsc = NULL; xmlNode *xml_rsc_2 = NULL; resource_t *rsc_1 = NULL; resource_t *rsc_2 = NULL; const char *action_1 = crm_element_value(set1, "action"); const char *action_2 = crm_element_value(set2, "action"); const char *sequential_1 = crm_element_value(set1, "sequential"); const char *sequential_2 = crm_element_value(set2, "sequential"); const char *require_all_s = crm_element_value(set1, "require-all"); gboolean require_all = require_all_s ? crm_is_true(require_all_s) : TRUE; enum pe_ordering flags = pe_order_none; if (action_1 == NULL) { action_1 = RSC_START; }; if (action_2 == NULL) { action_2 = RSC_START; }; if (invert) { action_1 = invert_action(action_1); action_2 = invert_action(action_2); } if(safe_str_eq(RSC_STOP, action_1) || safe_str_eq(RSC_DEMOTE, action_1)) { /* Assuming: A -> ( B || C) -> D * The one-or-more logic only applies during the start/promote phase * During shutdown neither B nor can shutdown until D is down, so simply turn require_all back on. */ require_all = TRUE; } if (symmetrical == FALSE) { flags = get_asymmetrical_flags(kind); } else { flags = get_flags(id, kind, action_2, action_1, invert); } /* If we have an un-ordered set1, whether it is sequential or not is irrelevant in regards to set2. */ if (!require_all) { char *task = crm_concat(CRM_OP_RELAXED_SET, ID(set1), ':'); action_t *unordered_action = get_pseudo_op(task, data_set); free(task); update_action_flags(unordered_action, pe_action_requires_any, __FUNCTION__, __LINE__); for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (!crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { continue; } EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); /* Add an ordering constraint between every element in set1 and the pseudo action. * If any action in set1 is runnable the pseudo action will be runnable. */ custom_action_order(rsc_1, generate_op_key(rsc_1->id, action_1, 0), NULL, NULL, NULL, unordered_action, pe_order_one_or_more | pe_order_implies_then_printed, data_set); } for (xml_rsc_2 = __xml_first_child_element(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) { if (!crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { continue; } EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); /* Add an ordering constraint between the pseudo action and every element in set2. * If the pseudo action is runnable, every action in set2 will be runnable */ custom_action_order(NULL, NULL, unordered_action, rsc_2, generate_op_key(rsc_2->id, action_2, 0), NULL, flags | pe_order_runnable_left, data_set); } return TRUE; } if (crm_is_true(sequential_1)) { if (invert == FALSE) { /* get the last one */ const char *rid = NULL; for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_1, rid); } else { /* get the first one */ for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); break; } } } } if (crm_is_true(sequential_2)) { if (invert == FALSE) { /* get the first one */ for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); break; } } } else { /* get the last one */ const char *rid = NULL; for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); } } if (rsc_1 != NULL && rsc_2 != NULL) { new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } else if (rsc_1 != NULL) { for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } else if (rsc_2 != NULL) { xmlNode *xml_rsc = NULL; for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } else { for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_2 = NULL; EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); for (xml_rsc_2 = __xml_first_child_element(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) { if (crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } } } return TRUE; } static gboolean unpack_order_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_first = NULL; const char *id_then = NULL; const char *action_first = NULL; const char *action_then = NULL; resource_t *rsc_first = NULL; resource_t *rsc_then = NULL; tag_t *tag_first = NULL; tag_t *tag_then = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_first = NULL; xmlNode *rsc_set_then = NULL; gboolean any_sets = FALSE; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates/tags. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_order..."); *expanded_xml = new_xml; return TRUE; } id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST); id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN); if (id_first == NULL || id_then == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_first, &rsc_first, &tag_first) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_first); return FALSE; } if (valid_resource_or_tag(data_set, id_then, &rsc_then, &tag_then) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_then); return FALSE; } if (rsc_first && rsc_then) { /* Neither side references any template/tag. */ return TRUE; } action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION); action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "first" into a resource_set under the order constraint. */ if (tag_to_set(new_xml, &rsc_set_first, XML_ORDER_ATTR_FIRST, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_first) { if (action_first) { /* A "first-action" is specified. Move it into the converted resource_set as an "action" attribute. */ crm_xml_add(rsc_set_first, "action", action_first); xml_remove_prop(new_xml, XML_ORDER_ATTR_FIRST_ACTION); } any_sets = TRUE; } /* Convert the template/tag reference in "then" into a resource_set under the order constraint. */ if (tag_to_set(new_xml, &rsc_set_then, XML_ORDER_ATTR_THEN, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_then) { if (action_then) { /* A "then-action" is specified. Move it into the converted resource_set as an "action" attribute. */ crm_xml_add(rsc_set_then, "action", action_then); xml_remove_prop(new_xml, XML_ORDER_ATTR_THEN_ACTION); } any_sets = TRUE; } if (any_sets) { crm_log_xml_trace(new_xml, "Expanded rsc_order..."); *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } gboolean unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) { gboolean any_sets = FALSE; resource_t *rsc = NULL; /* resource_t *last_rsc = NULL; */ action_t *set_end = NULL; action_t *set_begin = NULL; action_t *set_inv_end = NULL; action_t *set_inv_begin = NULL; xmlNode *set = NULL; xmlNode *last = NULL; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; /* action_t *last_end = NULL; action_t *last_begin = NULL; action_t *last_inv_end = NULL; action_t *last_inv_begin = NULL; */ const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); enum pe_order_kind kind = get_ordering_type(xml_obj); gboolean invert_bool = order_is_symmetrical(xml_obj, kind, NULL); gboolean rc = TRUE; rc = unpack_order_tags(xml_obj, &expanded_xml, data_set); if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } else if (rc == FALSE) { return FALSE; } for (set = __xml_first_child_element(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_order_set(set, kind, &rsc, &set_begin, &set_end, &set_inv_begin, &set_inv_end, invert, data_set) == FALSE) { return FALSE; /* Expand orders in order_rsc_sets() instead of via pseudo actions. */ /* } else if(last) { const char *set_action = crm_element_value(set, "action"); const char *last_action = crm_element_value(last, "action"); enum pe_ordering flags = get_flags(id, kind, last_action, set_action, FALSE); if(!set_action) { set_action = RSC_START; } if(!last_action) { last_action = RSC_START; } if(rsc == NULL && last_rsc == NULL) { order_actions(last_end, set_begin, flags); } else { custom_action_order( last_rsc, null_or_opkey(last_rsc, last_action), last_end, rsc, null_or_opkey(rsc, set_action), set_begin, flags, data_set); } if(crm_is_true(invert)) { set_action = invert_action(set_action); last_action = invert_action(last_action); flags = get_flags(id, kind, last_action, set_action, TRUE); if(rsc == NULL && last_rsc == NULL) { order_actions(last_inv_begin, set_inv_end, flags); } else { custom_action_order( last_rsc, null_or_opkey(last_rsc, last_action), last_inv_begin, rsc, null_or_opkey(rsc, set_action), set_inv_end, flags, data_set); } } */ } else if ( /* never called -- Now call it for supporting clones in resource sets */ last) { if (order_rsc_sets(id, last, set, kind, data_set, FALSE, invert_bool) == FALSE) { return FALSE; } if (invert_bool && order_rsc_sets(id, set, last, kind, data_set, TRUE, invert_bool) == FALSE) { return FALSE; } } last = set; /* last_rsc = rsc; last_end = set_end; last_begin = set_begin; last_inv_end = set_inv_end; last_inv_begin = set_inv_begin; */ } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_rsc_order(xml_obj, data_set); } return TRUE; } static gboolean unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *with = NULL; resource_t *resource = NULL; const char *set_id = ID(set); const char *role = crm_element_value(set, "role"); const char *sequential = crm_element_value(set, "sequential"); const char *ordering = crm_element_value(set, "ordering"); int local_score = score; const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE); if (score_s) { local_score = char2score(score_s); } if(ordering == NULL) { ordering = "group"; } if (sequential != NULL && crm_is_true(sequential) == FALSE) { return TRUE; } else if (local_score >= 0 && safe_str_eq(ordering, "group")) { for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); if (with != NULL) { pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, data_set); } with = resource; } } } else if (local_score >= 0) { resource_t *last = NULL; for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); if (last != NULL) { pe_rsc_trace(resource, "Colocating %s with %s", last->id, resource->id); rsc_colocation_new(set_id, NULL, local_score, last, resource, role, role, data_set); } last = resource; } } } else { /* Anti-colocating with every prior resource is * the only way to ensure the intuitive result * (i.e. that no one in the set can run with anyone else in the set) */ for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_with = NULL; EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); for (xml_rsc_with = __xml_first_child_element(set); xml_rsc_with != NULL; xml_rsc_with = __xml_next_element(xml_rsc_with)) { if (crm_str_eq((const char *)xml_rsc_with->name, XML_TAG_RESOURCE_REF, TRUE)) { if (safe_str_eq(resource->id, ID(xml_rsc_with))) { break; } EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with)); pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, with->id); rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, data_set); } } } } } return TRUE; } static gboolean colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *rsc_1 = NULL; resource_t *rsc_2 = NULL; const char *role_1 = crm_element_value(set1, "role"); const char *role_2 = crm_element_value(set2, "role"); const char *sequential_1 = crm_element_value(set1, "sequential"); const char *sequential_2 = crm_element_value(set2, "sequential"); if (sequential_1 == NULL || crm_is_true(sequential_1)) { /* get the first one */ for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); break; } } } if (sequential_2 == NULL || crm_is_true(sequential_2)) { /* get the last one */ const char *rid = NULL; for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); } if (rsc_1 != NULL && rsc_2 != NULL) { rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } else if (rsc_1 != NULL) { for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } else if (rsc_2 != NULL) { for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } else { for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_2 = NULL; EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); for (xml_rsc_2 = __xml_first_child_element(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) { if (crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } } } return TRUE; } static gboolean unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) { int score_i = 0; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); const char *id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET); const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); const char *state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE); const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR); const char *symmetrical = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); // experimental syntax from pacemaker-next (unlikely to be adopted as-is) const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE); const char *instance_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_INSTANCE); resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, id_lh); resource_t *rsc_rh = pe_find_constraint_resource(data_set->resources, id_rh); if (rsc_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_lh); return FALSE; } else if (rsc_rh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_rh); return FALSE; } else if (instance_lh && pe_rsc_is_clone(rsc_lh) == FALSE) { crm_config_err ("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_lh, instance_lh); return FALSE; } else if (instance_rh && pe_rsc_is_clone(rsc_rh) == FALSE) { crm_config_err ("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_rh, instance_rh); return FALSE; } if (instance_lh) { rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set); if (rsc_lh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_lh, id_lh); return FALSE; } } if (instance_rh) { rsc_rh = find_clone_instance(rsc_rh, instance_rh, data_set); if (rsc_rh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_rh, id_rh); return FALSE; } } if (crm_is_true(symmetrical)) { crm_config_warn("The %s colocation constraint attribute has been removed." " It didn't do what you think it did anyway.", XML_CONS_ATTR_SYMMETRICAL); } if (score) { score_i = char2score(score); } rsc_colocation_new(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, data_set); return TRUE; } static gboolean unpack_colocation_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_lh = NULL; const char *id_rh = NULL; const char *state_lh = NULL; const char *state_rh = NULL; resource_t *rsc_lh = NULL; resource_t *rsc_rh = NULL; tag_t *tag_lh = NULL; tag_t *tag_rh = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_lh = NULL; xmlNode *rsc_set_rh = NULL; gboolean any_sets = FALSE; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates/tags. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_colocation..."); *expanded_xml = new_xml; return TRUE; } id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET); if (id_lh == NULL || id_rh == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_lh); return FALSE; } if (valid_resource_or_tag(data_set, id_rh, &rsc_rh, &tag_rh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_rh); return FALSE; } if (rsc_lh && rsc_rh) { /* Neither side references any template/tag. */ return TRUE; } if (tag_lh && tag_rh) { /* A colocation constraint between two templates/tags makes no sense. */ crm_config_err("Either LHS or RHS of %s should be a normal resource instead of a template/tag", id); return FALSE; } state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "rsc" into a resource_set under the colocation constraint. */ if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_lh) { if (state_lh) { /* A "rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_lh, "role", state_lh); xml_remove_prop(new_xml, XML_COLOC_ATTR_SOURCE_ROLE); } any_sets = TRUE; } /* Convert the template/tag reference in "with-rsc" into a resource_set under the colocation constraint. */ if (tag_to_set(new_xml, &rsc_set_rh, XML_COLOC_ATTR_TARGET, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_rh) { if (state_rh) { /* A "with-rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_rh, "role", state_rh); xml_remove_prop(new_xml, XML_COLOC_ATTR_TARGET_ROLE); } any_sets = TRUE; } if (any_sets) { crm_log_xml_trace(new_xml, "Expanded rsc_colocation..."); *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } gboolean unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) { int score_i = 0; xmlNode *set = NULL; xmlNode *last = NULL; gboolean any_sets = FALSE; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); gboolean rc = TRUE; if (score) { score_i = char2score(score); } rc = unpack_colocation_tags(xml_obj, &expanded_xml, data_set); if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } else if (rc == FALSE) { return FALSE; } for (set = __xml_first_child_element(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_colocation_set(set, score_i, data_set) == FALSE) { return FALSE; } else if (last && colocate_rsc_sets(id, last, set, score_i, data_set) == FALSE) { return FALSE; } last = set; } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_colocation(xml_obj, data_set); } return TRUE; } gboolean rsc_ticket_new(const char *id, resource_t * rsc_lh, ticket_t * ticket, const char *state_lh, const char *loss_policy, pe_working_set_t * data_set) { rsc_ticket_t *new_rsc_ticket = NULL; if (rsc_lh == NULL) { crm_config_err("No resource found for LHS %s", id); return FALSE; } new_rsc_ticket = calloc(1, sizeof(rsc_ticket_t)); if (new_rsc_ticket == NULL) { return FALSE; } if (state_lh == NULL || safe_str_eq(state_lh, RSC_ROLE_STARTED_S)) { state_lh = RSC_ROLE_UNKNOWN_S; } new_rsc_ticket->id = id; new_rsc_ticket->ticket = ticket; new_rsc_ticket->rsc_lh = rsc_lh; new_rsc_ticket->role_lh = text2role(state_lh); if (safe_str_eq(loss_policy, "fence")) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { new_rsc_ticket->loss_policy = loss_ticket_fence; } else { crm_config_err("Resetting %s loss-policy to 'stop': fencing is not configured", ticket->id); loss_policy = "stop"; } } if (new_rsc_ticket->loss_policy == loss_ticket_fence) { crm_debug("On loss of ticket '%s': Fence the nodes running %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); } else if (safe_str_eq(loss_policy, "freeze")) { crm_debug("On loss of ticket '%s': Freeze %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_freeze; } else if (safe_str_eq(loss_policy, "demote")) { crm_debug("On loss of ticket '%s': Demote %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_demote; } else if (safe_str_eq(loss_policy, "stop")) { crm_debug("On loss of ticket '%s': Stop %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_stop; } else { if (new_rsc_ticket->role_lh == RSC_ROLE_MASTER) { crm_debug("On loss of ticket '%s': Default to demote %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_demote; } else { crm_debug("On loss of ticket '%s': Default to stop %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_stop; } } pe_rsc_trace(rsc_lh, "%s (%s) ==> %s", rsc_lh->id, role2text(new_rsc_ticket->role_lh), ticket->id); rsc_lh->rsc_tickets = g_list_append(rsc_lh->rsc_tickets, new_rsc_ticket); data_set->ticket_constraints = g_list_append(data_set->ticket_constraints, new_rsc_ticket); if (new_rsc_ticket->ticket->granted == FALSE || new_rsc_ticket->ticket->standby) { rsc_ticket_constraint(rsc_lh, new_rsc_ticket, data_set); } return TRUE; } static gboolean unpack_rsc_ticket_set(xmlNode * set, ticket_t * ticket, const char *loss_policy, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *resource = NULL; const char *set_id = NULL; const char *role = NULL; CRM_CHECK(set != NULL, return FALSE); CRM_CHECK(ticket != NULL, return FALSE); set_id = ID(set); if (set_id == NULL) { crm_config_err("resource_set must have an id"); return FALSE; } role = crm_element_value(set, "role"); for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF); xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); pe_rsc_trace(resource, "Resource '%s' depends on ticket '%s'", resource->id, ticket->id); rsc_ticket_new(set_id, resource, ticket, role, loss_policy, data_set); } return TRUE; } static gboolean unpack_simple_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET); const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY); ticket_t *ticket = NULL; const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); // experimental syntax from pacemaker-next (unlikely to be adopted as-is) const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE); resource_t *rsc_lh = NULL; if (xml_obj == NULL) { crm_config_err("No rsc_ticket constraint object to process."); return FALSE; } if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } if (ticket_str == NULL) { crm_config_err("Invalid constraint '%s': No ticket specified", id); return FALSE; } else { ticket = g_hash_table_lookup(data_set->tickets, ticket_str); } if (ticket == NULL) { crm_config_err("Invalid constraint '%s': No ticket named '%s'", id, ticket_str); return FALSE; } if (id_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource specified", id); return FALSE; } else { rsc_lh = pe_find_constraint_resource(data_set->resources, id_lh); } if (rsc_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_lh); return FALSE; } else if (instance_lh && pe_rsc_is_clone(rsc_lh) == FALSE) { crm_config_err ("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_lh, instance_lh); return FALSE; } if (instance_lh) { rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set); if (rsc_lh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_lh, id_lh); return FALSE; } } rsc_ticket_new(id, rsc_lh, ticket, state_lh, loss_policy, data_set); return TRUE; } static gboolean unpack_rsc_ticket_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_lh = NULL; const char *state_lh = NULL; resource_t *rsc_lh = NULL; tag_t *tag_lh = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_lh = NULL; gboolean any_sets = FALSE; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates/tags. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_ticket..."); *expanded_xml = new_xml; return TRUE; } id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); if (id_lh == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_lh); return FALSE; } else if (rsc_lh) { /* No template/tag is referenced. */ return TRUE; } state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "rsc" into a resource_set under the rsc_ticket constraint. */ if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, FALSE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_lh) { if (state_lh) { /* A "rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_lh, "role", state_lh); xml_remove_prop(new_xml, XML_COLOC_ATTR_SOURCE_ROLE); } any_sets = TRUE; } if (any_sets) { crm_log_xml_trace(new_xml, "Expanded rsc_ticket..."); *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } gboolean unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set) { xmlNode *set = NULL; gboolean any_sets = FALSE; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET); const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY); ticket_t *ticket = NULL; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; gboolean rc = TRUE; if (xml_obj == NULL) { crm_config_err("No rsc_ticket constraint object to process."); return FALSE; } if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } if (ticket_str == NULL) { crm_config_err("Invalid constraint '%s': No ticket specified", id); return FALSE; } else { ticket = g_hash_table_lookup(data_set->tickets, ticket_str); } if (ticket == NULL) { ticket = ticket_new(ticket_str, data_set); if (ticket == NULL) { return FALSE; } } rc = unpack_rsc_ticket_tags(xml_obj, &expanded_xml, data_set); if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } else if (rc == FALSE) { return FALSE; } for (set = __xml_first_child_element(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_rsc_ticket_set(set, ticket, loss_policy, data_set) == FALSE) { return FALSE; } } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_rsc_ticket(xml_obj, data_set); } return TRUE; } diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c index 38e90bf467..9ebdd35856 100644 --- a/lib/pacemaker/pcmk_sched_native.c +++ b/lib/pacemaker/pcmk_sched_native.c @@ -1,3326 +1,3326 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // The controller removes the resource from the CIB, making this redundant // #define DELETE_THEN_REFRESH 1 #define INFINITY_HACK (INFINITY * -100) #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); static void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); static void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); static void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); static void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set); gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); /* *INDENT-OFF* */ enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, }, }; /* *INDENT-ON* */ static gboolean native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr nodes = NULL; node_t *chosen = NULL; node_t *best = NULL; int multiple = 1; int length = 0; gboolean result = FALSE; process_utilization(rsc, &prefer, data_set); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to ? TRUE : FALSE; } // Sort allowed nodes by weight if (rsc->allowed_nodes) { length = g_hash_table_size(rsc->allowed_nodes); } if (length > 0) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set); // First node in sorted list has the best score best = g_list_nth_data(nodes, 0); } if (prefer && nodes) { chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen == NULL) { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); /* Favor the preferred node as long as its weight is at least as good as * the best allowed node's. * * An alternative would be to favor the preferred node even if the best * node is better, when the best node's weight is less than INFINITY. */ } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else if (!can_run_resources(chosen)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Chose preferred node %s for %s (ignoring %d candidates)", chosen->details->uname, rsc->id, length); } } if ((chosen == NULL) && nodes) { /* Either there is no preferred node, or the preferred node is not * available, but there are other nodes allowed to run the resource. */ chosen = best; pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates", chosen ? chosen->details->uname : "", rsc->id, length); if (!pe_rsc_is_unique_clone(rsc->parent) && chosen && (chosen->weight > 0) && can_run_resources(chosen)) { /* If the resource is already running on a node, prefer that node if * it is just as good as the chosen node. * * We don't do this for unique clone instances, because * distribute_children() has already assigned instances to their * running nodes when appropriate, and if we get here, we don't want * remaining unallocated instances to prefer a node that's already * running another instance. */ node_t *running = pe__current_node(rsc); if (running && (can_run_resources(running) == FALSE)) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, running->details->uname); } else if (running) { for (GList *iter = nodes->next; iter; iter = iter->next) { node_t *tmp = (node_t *) iter->data; if (tmp->weight != chosen->weight) { // The nodes are sorted by weight, so no more are equal break; } if (tmp->details == running->details) { // Scores are equal, so prefer the current node chosen = tmp; } multiple++; } } } } if (multiple > 1) { static char score[33]; int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO; score2char_stack(chosen->weight, score, sizeof(score)); do_crm_log(log_level, "Chose node %s for %s from %d nodes with score %s", chosen->details->uname, rsc->id, multiple, score); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } static int node_list_attr_score(GHashTable * list, const char *attr, const char *value) { GHashTableIter iter; node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; if (attr == NULL) { attr = CRM_ATTR_UNAME; } g_hash_table_iter_init(&iter, list); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { int weight = node->weight; if (can_run_resources(node) == FALSE) { weight = -INFINITY; } if (weight > best_score || best_node == NULL) { const char *tmp = pe_node_attribute_raw(node, attr); if (safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } } if (safe_str_neq(attr, CRM_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node ? best_node : "", best_score); } return best_score; } static void node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor, gboolean only_positive) { int score = 0; int new_score = 0; GHashTableIter iter; node_t *node = NULL; if (attr == NULL) { attr = CRM_ATTR_UNAME; } g_hash_table_iter_init(&iter, list1); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { float weight_f = 0; int weight = 0; CRM_LOG_ASSERT(node != NULL); if(node == NULL) { continue; }; score = node_list_attr_score(list2, attr, pe_node_attribute_raw(node, attr)); weight_f = factor * score; /* Round the number */ /* http://c-faq.com/fp/round.html */ weight = (int)(weight_f < 0 ? weight_f - 0.5 : weight_f + 0.5); new_score = merge_weights(weight, node->weight); if (factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO - Decide if we want to filter only if weight == -INFINITY * */ crm_trace("%s: Filtering %d + %f*%d (factor * score)", node->details->uname, node->weight, factor, score); } else if (node->weight == INFINITY_HACK) { crm_trace("%s: Filtering %d + %f*%d (node < 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight > 0) { node->weight = INFINITY_HACK; crm_trace("%s: Filtering %d + %f*%d (score > 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight == 0) { crm_trace("%s: Filtering %d + %f*%d (score == 0)", node->details->uname, node->weight, factor, score); } else { crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score); node->weight = new_score; } } } GHashTable * node_hash_dup(GHashTable * hash) { /* Hack! */ GListPtr list = g_hash_table_get_values(hash); GHashTable *result = node_hash_from_list(list); g_list_free(list); return result; } GHashTable * native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } GHashTable * rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { GHashTable *work = NULL; int multiplier = 1; if (factor < 0) { multiplier = -1; } if (is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); if (is_set(flags, pe_weights_init)) { if (rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last); work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags); } else { work = node_hash_dup(rsc->allowed_nodes); } clear_bit(flags, pe_weights_init); } else if (rsc->variant == pe_group && rsc->children) { GListPtr iter = rsc->children; pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id); work = node_hash_dup(nodes); for(iter = rsc->children; iter->next != NULL; iter = iter->next) { work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags); } } else { pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id); work = node_hash_dup(nodes); node_hash_update(work, rsc->allowed_nodes, attr, factor, is_set(flags, pe_weights_positive)); } if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id); g_hash_table_destroy(work); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if (can_run_any(work)) { GListPtr gIter = NULL; if (is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; crm_trace("Checking %d additional colocation constraints", g_list_length(gIter)); } else if(rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } gIter = ((resource_t*)last->data)->rsc_cons_lhs; crm_trace("Checking %d additional optional group colocation constraints from %s", g_list_length(gIter), ((resource_t*)last->data)->id); } else { gIter = rsc->rsc_cons_lhs; crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id); } for (; gIter != NULL; gIter = gIter->next) { resource_t *other = NULL; rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else { other = constraint->rsc_lh; } pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id); work = rsc_merge_weights(other, rhs, work, constraint->node_attribute, multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback); dump_node_scores(LOG_TRACE, NULL, rhs, work); } } if (is_set(flags, pe_weights_positive)) { node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->weight == INFINITY_HACK) { node->weight = 1; } } } if (nodes) { g_hash_table_destroy(nodes); } clear_bit(rsc->flags, pe_rsc_merging); return work; } static inline bool node_has_been_unfenced(node_t *node) { const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED); return unfenced && strcmp("0", unfenced); } static inline bool is_unfence_device(resource_t *rsc, pe_working_set_t *data_set) { return is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing); } node_t * native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr gIter = NULL; int alloc_details = scores_log_level + 1; if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; GHashTable *archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if (constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_hash_dup(rsc->allowed_nodes); } rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set); if (archive && can_run_any(rsc->allowed_nodes) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive) { g_hash_table_destroy(archive); } } dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_rollback); } if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id); /* make sure it doesn't come up again */ resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } else if(rsc->next_role > rsc->role && is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); rsc->next_role = rsc->role; } dump_node_scores((show_scores? LOG_STDOUT : scores_log_level), rsc, __FUNCTION__, rsc->allowed_nodes); if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; rsc->next_role = rsc->role; assign_to = pe__current_node(rsc); if (assign_to == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_MASTER) { reason = "master"; } else if (is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if (is_set(data_set->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if (is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set)) { pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if (rsc->allocated_to == NULL) { if (is_not_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); if (rsc->is_remote_node) { node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); CRM_ASSERT(remote_node != NULL); if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) { crm_trace("Setting Pacemaker Remote node %s to ONLINE", remote_node->details->id); remote_node->details->online = TRUE; /* We shouldn't consider an unseen remote-node unclean if we are going * to try and connect to it. Otherwise we get an unnecessary fence */ if (remote_node->details->unseen == TRUE) { remote_node->details->unclean = FALSE; } } else { crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)", remote_node->details->id, role2text(rsc->next_role), (rsc->allocated_to? "" : "un")); remote_node->details->shutdown = TRUE; } } return rsc->allocated_to; } static gboolean is_op_dup(resource_t *rsc, const char *name, guint interval_ms) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; guint interval2_ms = 0; CRM_ASSERT(rsc); for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { value = crm_element_value(operation, "name"); if (safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval2_ms = crm_parse_interval_spec(value); if (interval_ms != interval2_ms) { continue; } if (id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err ("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } } } return dup; } static bool op_cannot_recur(const char *name) { return safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE); } static void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval_spec = NULL; const char *node_uname = node? node->details->uname : "n/a"; guint interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; CRM_ASSERT(rsc); /* Only process for the operations without role="Stopped" */ role = crm_element_value(operation, "role"); if (role && text2role(role) == RSC_ROLE_STOPPED) { return; } interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval_ms)) { crm_trace("Not creating duplicate recurring action %s for %dms %s", ID(operation), interval_ms, name); return; } if (op_cannot_recur(name)) { crm_config_err("Ignoring %s because action '%s' cannot be recurring", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { crm_trace("Not creating recurring action %s for disabled resource %s", ID(operation), rsc->id); free(key); return; } pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node_uname); if (start != NULL) { pe_rsc_trace(rsc, "Marking %s %s due to %s", key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory", start->uuid); is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional); } else { pe_rsc_trace(rsc, "Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches == NULL) { is_optional = FALSE; pe_rsc_trace(rsc, "Marking %s mandatory: not active", key); } else { GListPtr gIter = NULL; for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_reschedule)) { is_optional = FALSE; break; } } g_list_free(possible_matches); } if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL) || (role != NULL && text2role(role) != rsc->next_role)) { int log_level = LOG_TRACE; const char *result = "Ignoring"; if (is_optional) { char *after_key = NULL; action_t *cancel_op = NULL; // It's running, so cancel it log_level = LOG_INFO; result = "Cancelling"; cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set); switch (rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if (rsc->next_role == RSC_ROLE_MASTER) { after_key = promote_key(rsc); } else if (rsc->next_role == RSC_ROLE_STOPPED) { after_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: after_key = demote_key(rsc); break; default: break; } if (after_key) { custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL, pe_order_runnable_left, data_set); } } do_crm_log(log_level, "%s action %s (%s vs. %s)", result, key, role ? role : role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); free(key); return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if (is_optional) { pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid); } if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", node_uname, mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } else if (node == NULL || node->details->online == FALSE || node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", node_uname, mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } else if (is_set(mon->flags, pe_action_optional) == FALSE) { pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s", mon->task, interval_ms / 1000, rsc->id, node_uname); } if (rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); free(running_master); } if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); if (rsc->next_role == RSC_ROLE_MASTER) { custom_action_order(rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } else if (rsc->role == RSC_ROLE_MASTER) { custom_action_order(rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } } } static void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp(rsc, start, node, operation, data_set); } } } } static void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval_spec = NULL; const char *node_uname = node? node->details->uname : "n/a"; guint interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval_ms)) { crm_trace("Not creating duplicate recurring action %s for %dms %s", ID(operation), interval_ms, name); return; } if (op_cannot_recur(name)) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { crm_trace("Not creating recurring action %s for disabled resource %s", ID(operation), rsc->id); free(key); return; } // @TODO add support if (is_set(rsc->flags, pe_rsc_unique) == FALSE) { crm_notice("Ignoring %s (recurring monitors for Stopped role are " "not supported for anonymous clones)", ID(operation)); return; } pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on nodes where it should not be running", ID(operation), rsc->id, role2text(rsc->next_role)); /* if the monitor exists on the node where the resource will be running, cancel it */ if (node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches) { action_t *cancel_op = NULL; g_list_free(possible_matches); cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set); if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), node_uname); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *stop_node = (node_t *) gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; if (node && safe_str_eq(stop_node_uname, node_uname)) { continue; } pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if (possible_matches == NULL) { pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); free(rc_inactive); if (is_set(rsc->flags, pe_rsc_managed)) { GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS, FALSE); GListPtr pIter = NULL; for (pIter = probes; pIter != NULL; pIter = pIter->next) { action_t *probe = (action_t *) pIter->data; order_actions(probe, stopped_mon, pe_order_runnable_left); crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname); } g_list_free(probes); } if (probe_complete_ops) { g_list_free(probe_complete_ops); } stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE); for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *stop = (action_t *) local_gIter->data; if (is_set(stop->flags, pe_action_optional) == FALSE) { stop_is_optional = FALSE; } if (is_set(stop->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, stop_key(rsc), stop, NULL, strdup(key), stopped_mon, pe_order_implies_then | pe_order_runnable_left, data_set); } } if (stop_ops) { g_list_free(stop_ops); } if (is_optional == FALSE && probe_is_optional && stop_is_optional && is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__); } if (is_set(stopped_mon->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if (stop_node->details->online == FALSE || stop_node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(stopped_mon->flags, pe_action_runnable) && is_set(stopped_mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task, interval_ms / 1000, rsc->id, crm_str(stop_node_uname)); } } free(key); } static void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } static void handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set) { action_t *migrate_to = NULL; action_t *migrate_from = NULL; action_t *start = NULL; action_t *stop = NULL; gboolean partial = rsc->partial_migration_target ? TRUE : FALSE; pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s", rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE"); start = start_action(rsc, chosen, TRUE); stop = stop_action(rsc, current, TRUE); if (partial == FALSE) { migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set); } migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set); if ((migrate_to && migrate_from) || (migrate_from && partial)) { set_bit(start->flags, pe_action_migrate_runnable); set_bit(stop->flags, pe_action_migrate_runnable); update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */ /* order probes before migrations */ if (partial) { set_bit(migrate_from->flags, pe_action_migrate_runnable); migrate_from->needs = start->needs; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set); } else { set_bit(migrate_from->flags, pe_action_migrate_runnable); set_bit(migrate_to->flags, pe_action_migrate_runnable); migrate_to->needs = start->needs; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set); } custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional | pe_order_implies_first_migratable | pe_order_pseudo_left, data_set); } if (migrate_to) { add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); /* Pacemaker Remote connections don't require pending to be recorded in * the CIB. We can reduce CIB writes by not setting PENDING for them. */ if (rsc->is_remote_node == FALSE) { /* migrate_to takes place on the source node, but can * have an effect on the target node depending on how * the agent is written. Because of this, we have to maintain * a record that the migrate_to occurred, in case the source node * loses membership while the migrate_to action is still in-flight. */ add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true"); } } if (migrate_from) { add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); } } void native_create_actions(resource_t * rsc, pe_working_set_t * data_set) { action_t *start = NULL; node_t *chosen = NULL; node_t *current = NULL; gboolean need_stop = FALSE; gboolean is_moving = FALSE; gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE; GListPtr gIter = NULL; unsigned int num_all_active = 0; unsigned int num_clean_active = 0; bool multiply_active = FALSE; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; CRM_ASSERT(rsc); chosen = rsc->allocated_to; if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } else if (rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc, role2text(rsc->role), role2text(rsc->next_role)); current = pe__find_active_on(rsc, &num_all_active, &num_clean_active); for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { node_t *dangling_source = (node_t *) gIter->data; action_t *stop = stop_action(rsc, dangling_source, FALSE); set_bit(stop->flags, pe_action_dangle); pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s", rsc->id, dangling_source->details->uname); if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, dangling_source, FALSE, data_set); } } if ((num_all_active == 2) && (num_clean_active == 2) && chosen && rsc->partial_migration_source && rsc->partial_migration_target && (current->details == rsc->partial_migration_source->details) && (chosen->details == rsc->partial_migration_target->details)) { /* The chosen node is still the migration target from a partial * migration. Attempt to continue the migration instead of recovering * by stopping the resource everywhere and starting it on a single node. */ pe_rsc_trace(rsc, "Will attempt to continue with a partial migration to target %s from %s", rsc->partial_migration_target->details->id, rsc->partial_migration_source->details->id); } else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) { /* If a resource has "requires" set to nothing or quorum, don't consider * it active on unclean nodes (similar to how all resources behave when * stonith-enabled is false). We can start such resources elsewhere * before fencing completes, and if we considered the resource active on * the failed node, we would attempt recovery for being active on * multiple nodes. */ multiply_active = (num_clean_active > 1); } else { multiply_active = (num_all_active > 1); } if (multiply_active) { if (rsc->partial_migration_target && rsc->partial_migration_source) { // Migration was in progress, but we've chosen a different target crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too", rsc->id, rsc->partial_migration_target->details->uname, rsc->partial_migration_source->details->uname); } else { // Resource was incorrectly multiply active pe_proc_err("Resource %s is active on %u nodes (%s)", rsc->id, num_all_active, recovery2text(rsc->recovery_type)); crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information"); } if (rsc->recovery_type == recovery_stop_start) { need_stop = TRUE; } /* If by chance a partial migration is in process, but the migration * target is not chosen still, clear all partial migration data. */ rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = FALSE; } if (is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, chosen, TRUE); set_bit(start->flags, pe_action_print_always); } if (current && chosen && current->details != chosen->details) { pe_rsc_trace(rsc, "Moving %s", rsc->id); is_moving = TRUE; need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Recovering %s", rsc->id); need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Block %s", rsc->id); need_stop = TRUE; } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) { /* Recovery of a promoted resource */ start = start_action(rsc, chosen, TRUE); if (is_set(start->flags, pe_action_optional) == FALSE) { pe_rsc_trace(rsc, "Forced start %s", rsc->id); need_stop = TRUE; } } pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); /* Create any additional actions required when bringing resource down and * back up to same level. */ role = rsc->role; while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop ? " required" : ""); if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) { break; } role = next_role; } while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) { next_role = rsc_state_matrix[role][rsc->role]; pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop ? " required" : ""); if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { break; } role = next_role; } role = rsc->role; /* Required steps from this role to the next */ while (role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA"); if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } if(is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "No monitor additional ops for blocked resource"); } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Monitor ops for active resource"); start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { pe_rsc_trace(rsc, "Monitor ops for inactive resource"); Recurring_Stopped(rsc, NULL, NULL, data_set); } /* if we are stuck in a partial migration, where the target * of the partial migration no longer matches the chosen target. * A full stop/start is required */ if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) { pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id); allow_migrate = FALSE; } else if (is_moving == FALSE || is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || (current && current->details->unclean) || rsc->next_role < RSC_ROLE_STARTED) { allow_migrate = FALSE; } if (allow_migrate) { handle_migration_actions(rsc, current, chosen, data_set); } } static void rsc_avoids_remote_nodes(resource_t *rsc) { GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->details->remote_rsc) { node->weight = -INFINITY; } } } /*! * \internal * \brief Return allowed nodes as (possibly sorted) list * * Convert a resource's hash table of allowed nodes to a list. If printing to * stdout, sort the list, to keep action ID numbers consistent for regression * test output (while avoiding the performance hit on a live cluster). * * \param[in] rsc Resource to check for allowed nodes * \param[in] data_set Cluster working set * * \return List of resource's allowed nodes * \note Callers should take care not to rely on the list being sorted. */ static GList * allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set) { GList *allowed_nodes = NULL; if (rsc->allowed_nodes) { allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes); } if (is_set(data_set->flags, pe_flag_stdout)) { allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname); } return allowed_nodes; } void native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { /* This function is on the critical path and worth optimizing as much as possible */ pe_resource_t *top = NULL; GList *allowed_nodes = NULL; bool check_unfencing = FALSE; bool check_utilization = FALSE; if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping native constraints for unmanaged resource: %s", rsc->id); return; } top = uber_parent(rsc); // Whether resource requires unfencing check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing) && is_set(rsc->flags, pe_rsc_needs_unfencing); // Whether a non-default placement strategy is used check_utilization = (g_hash_table_size(rsc->utilization) > 0) && safe_str_neq(data_set->placement_strategy, "default"); // Order stops before starts (i.e. restart) custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional | pe_order_implies_then | pe_order_restart, data_set); // Promotable ordering: demote before stop, start before promote if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) { custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_implies_first_master, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } // Certain checks need allowed nodes if (check_unfencing || check_utilization || rsc->container) { allowed_nodes = allowed_nodes_as_list(rsc, data_set); } if (check_unfencing) { /* Check if the node needs to be unfenced first */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); crm_debug("Ordering any stops of %s before %s, and any starts after", rsc->id, unfence->uuid); /* * It would be more efficient to order clone resources once, * rather than order each instance, but ordering the instance * allows us to avoid unnecessary dependencies that might conflict * with user constraints. * * @TODO: This constraint can still produce a transition loop if the * resource has a stop scheduled on the node being unfenced, and * there is a user ordering constraint to start some other resource * (which will be ordered after the unfence) before stopping this * resource. An example is "start some slow-starting cloned service * before stopping an associated virtual IP that may be moving to * it": * stop this -> unfencing -> start that -> stop this */ custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(unfence->uuid), unfence, pe_order_optional|pe_order_same_node, data_set); custom_action_order(NULL, strdup(unfence->uuid), unfence, rsc, start_key(rsc), NULL, pe_order_implies_then_on_node|pe_order_same_node, data_set); } } if (check_utilization) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(current); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } custom_action_order(rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_load, data_set); } for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *next = item->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(next); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, start_key(rsc), NULL, pe_order_load, data_set); custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_load, data_set); free(load_stopped_task); } } if (rsc->container) { resource_t *remote_rsc = NULL; if (rsc->is_remote_node) { // rsc is the implicit remote connection for a guest or bundle node /* Do not allow a guest resource to live on a Pacemaker Remote node, * to avoid nesting remotes. However, allow bundles to run on remote * nodes. */ if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) { rsc_avoids_remote_nodes(rsc->container); } /* If someone cleans up a guest or bundle node's container, we will * likely schedule a (re-)probe of the container and recovery of the * connection. Order the connection stop after the container probe, * so that if we detect the container running, we will trigger a new * transition and avoid the unnecessary recovery. */ new_rsc_order(rsc->container, RSC_STATUS, rsc, RSC_STOP, pe_order_optional, data_set); /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else { remote_rsc = pe__resource_contains_guest_node(data_set, rsc->container); } if (remote_rsc) { /* Force the resource on the Pacemaker Remote node instead of * colocating the resource with the container resource. */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); custom_action_order(rsc->container, generate_op_key(rsc->container->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL, pe_order_implies_first, data_set); if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } rsc_colocation_new("resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, data_set); } } if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) { /* don't allow remote nodes to run stonith devices * or remote connection resources.*/ rsc_avoids_remote_nodes(rsc); } g_list_free(allowed_nodes); } void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set) { if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set); } enum filter_colocation_res filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint, gboolean preview) { if (constraint->score == 0) { return influence_nothing; } /* rh side must be allocated before we can process constraint */ if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) { return influence_nothing; } if ((constraint->role_lh >= RSC_ROLE_SLAVE) && rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable) && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* LH and RH resources have already been allocated, place the correct * priority on LH rsc for the given promotable clone resource role */ return influence_rsc_priority; } if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { // Log an error if we violated a mandatory colocation constraint const pe_node_t *rh_node = rsc_rh->allocated_to; if (rsc_lh->allocated_to == NULL) { // Dependent resource isn't allocated, so constraint doesn't matter return influence_nothing; } if (constraint->score >= INFINITY) { // Dependent resource must colocate with rh_node if ((rh_node == NULL) || (rh_node->details != rsc_lh->allocated_to->details)) { crm_err("%s must be colocated with %s but is not (%s vs. %s)", rsc_lh->id, rsc_rh->id, rsc_lh->allocated_to->details->uname, (rh_node? rh_node->details->uname : "unallocated")); } } else if (constraint->score <= -INFINITY) { // Dependent resource must anti-colocate with rh_node if ((rh_node != NULL) && (rsc_lh->allocated_to->details == rh_node->details)) { crm_err("%s and %s must be anti-colocated but are allocated " "to the same node (%s)", rsc_lh->id, rsc_rh->id, rh_node->details->uname); } } return influence_nothing; } if (constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s", role2text(constraint->role_lh), role2text(rsc_lh->next_role)); return influence_nothing; } if (constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if (constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { crm_trace("LH: Skipping negative constraint: \"%s\" state filter", role2text(constraint->role_lh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { crm_trace("RH: Skipping negative constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } return influence_rsc_location; } static void influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *rh_value = NULL; const char *lh_value = NULL; const char *attribute = CRM_ATTR_ID; int score_multiplier = 1; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { return; } lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute); rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute); if (!safe_str_eq(lh_value, rh_value)) { if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) { rsc_lh->priority = -INFINITY; } return; } if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) { return; } if (constraint->role_lh == RSC_ROLE_SLAVE) { score_multiplier = -1; } rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority); } static void colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = CRM_ATTR_ID; GHashTable *work = NULL; gboolean do_check = FALSE; GHashTableIter iter; node_t *node = NULL; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (rsc_rh->allocated_to) { value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute); do_check = TRUE; } else if (constraint->score < 0) { /* nothing to do: * anti-colocation with something that is not running */ return; } work = node_hash_dup(rsc_lh->allowed_nodes); g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { tmp = pe_node_attribute_raw(node, attribute); if (do_check && safe_str_eq(tmp, value)) { if (constraint->score < INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights(constraint->score, node->weight); } } else if (do_check == FALSE || constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check ? "failed" : "unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } } if (can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { static char score[33]; score2char_stack(constraint->score, score, sizeof(score)); pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); } if (work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set) { enum filter_colocation_res filter_results; CRM_ASSERT(rsc_lh); CRM_ASSERT(rsc_rh); filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE); pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results); switch (filter_results) { case influence_rsc_priority: influence_priority(rsc_lh, rsc_rh, constraint); break; case influence_rsc_location: pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); colocation_match(rsc_lh, rsc_rh, constraint); break; case influence_nothing: default: return; } } static gboolean filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket) { if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) { pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter", role2text(rsc_ticket->role_lh)); return FALSE; } return TRUE; } void rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set) { if (rsc_ticket == NULL) { pe_err("rsc_ticket was NULL"); return; } if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", rsc_ticket->id); return; } if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) { return; } if (rsc_lh->children) { GListPtr gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); } return; } pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)", rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); if ((rsc_ticket->ticket->granted == FALSE) && (rsc_lh->running_on != NULL)) { GListPtr gIter = NULL; switch (rsc_ticket->loss_policy) { case loss_ticket_stop: resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); break; case loss_ticket_demote: // Promotion score will be set to -INFINITY in promotion_order() if (rsc_ticket->role_lh != RSC_ROLE_MASTER) { resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); } break; case loss_ticket_fence: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pe_fence_node(data_set, node, "deadman ticket was lost"); } break; case loss_ticket_freeze: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } if (rsc_lh->running_on != NULL) { clear_bit(rsc_lh->flags, pe_rsc_managed); set_bit(rsc_lh->flags, pe_rsc_block); } break; } } else if (rsc_ticket->ticket->granted == FALSE) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); } } else if (rsc_ticket->ticket->standby) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set); } } } enum pe_action_flags native_action_flags(action_t * action, node_t * node) { return action->flags; } static inline bool is_primitive_action(pe_action_t *action) { return action && action->rsc && (action->rsc->variant == pe_native); } /*! * \internal * \brief Set action bits appropriately when pe_restart_order is used * * \param[in] first 'First' action in an ordering with pe_restart_order * \param[in] then 'Then' action in an ordering with pe_restart_order * \param[in] filter What ordering flags to care about * * \note pe_restart_order is set for "stop resource before starting it" and * "stop later group member before stopping earlier group member" */ static void handle_restart_ordering(pe_action_t *first, pe_action_t *then, enum pe_action_flags filter) { const char *reason = NULL; CRM_ASSERT(is_primitive_action(first)); CRM_ASSERT(is_primitive_action(then)); // We need to update the action in two cases: // ... if 'then' is required if (is_set(filter, pe_action_optional) && is_not_set(then->flags, pe_action_optional)) { reason = "restart"; } /* ... if 'then' is unrunnable start of managed resource (if a resource * should restart but can't start, we still want to stop) */ if (is_set(filter, pe_action_runnable) && is_not_set(then->flags, pe_action_runnable) && is_set(then->rsc->flags, pe_rsc_managed) && safe_str_eq(then->task, RSC_START)) { reason = "stop"; } if (reason == NULL) { return; } pe_rsc_trace(first->rsc, "Handling %s -> %s for %s", first->uuid, then->uuid, reason); // Make 'first' required if it is runnable if (is_set(first->flags, pe_action_runnable)) { pe_action_implies(first, then, pe_action_optional); } // Make 'first' required if 'then' is required if (is_not_set(then->flags, pe_action_optional)) { pe_action_implies(first, then, pe_action_optional); } // Make 'first' unmigratable if 'then' is unmigratable if (is_not_set(then->flags, pe_action_migrate_runnable)) { pe_action_implies(first, then, pe_action_migrate_runnable); } // Make 'then' unrunnable if 'first' is required but unrunnable if (is_not_set(first->flags, pe_action_optional) && is_not_set(first->flags, pe_action_runnable)) { pe_action_implies(then, first, pe_action_runnable); } } enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set) { /* flags == get_action_flags(first, then_node) called from update_action() */ enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, then->uuid, then->flags); if (type & pe_order_asymmetrical) { resource_t *then_rsc = then->rsc; enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0; if (!then_rsc) { /* ignore */ } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) { /* ignore... if 'then' is supposed to be stopped after 'first', but * then is already stopped, there is nothing to be done when non-symmetrical. */ } else if ((then_rsc_role >= RSC_ROLE_STARTED) && safe_str_eq(then->task, RSC_START) && is_set(then->flags, pe_action_optional) && then->node - && g_list_length(then_rsc->running_on) == 1 + && pcmk__list_of_1(then_rsc->running_on) && then->node->details == ((node_t *) then_rsc->running_on->data)->details) { /* Ignore. If 'then' is supposed to be started after 'first', but * 'then' is already started, there is nothing to be done when * asymmetrical -- unless the start is mandatory, which indicates * the resource is restarting, and the ordering is still needed. */ } else if (!(first->flags & pe_action_runnable)) { /* prevent 'then' action from happening if 'first' is not runnable and * 'then' has not yet occurred. */ pe_action_implies(then, first, pe_action_optional); pe_action_implies(then, first, pe_action_runnable); pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid); } else { /* ignore... then is allowed to start/stop if it wants to. */ } } if (type & pe_order_implies_first) { if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) { // Needs is_set(first_flags, pe_action_optional) too? pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } if (is_set(flags, pe_action_migrate_runnable) && is_set(then->flags, pe_action_migrate_runnable) == FALSE && is_set(then->flags, pe_action_optional) == FALSE) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_migrate_runnable); } } if (type & pe_order_implies_first_master) { if ((filter & pe_action_optional) && ((then->flags & pe_action_optional) == FALSE) && then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) { pe_action_implies(first, then, pe_action_optional); if (is_set(first->flags, pe_action_migrate_runnable) && is_set(then->flags, pe_action_migrate_runnable) == FALSE) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_migrate_runnable); } pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); } } if ((type & pe_order_implies_first_migratable) && is_set(filter, pe_action_optional)) { if (((then->flags & pe_action_migrate_runnable) == FALSE) || ((then->flags & pe_action_runnable) == FALSE)) { pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_runnable); } if ((then->flags & pe_action_optional) == 0) { pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } } if ((type & pe_order_pseudo_left) && is_set(filter, pe_action_optional)) { if ((first->flags & pe_action_runnable) == FALSE) { pe_action_implies(then, first, pe_action_migrate_runnable); pe_clear_action_bit(then, pe_action_pseudo); pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid); } } if (is_set(type, pe_order_runnable_left) && is_set(filter, pe_action_runnable) && is_set(then->flags, pe_action_runnable) && is_set(flags, pe_action_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid); pe_action_implies(then, first, pe_action_runnable); pe_action_implies(then, first, pe_action_migrate_runnable); } if (is_set(type, pe_order_implies_then) && is_set(filter, pe_action_optional) && is_set(then->flags, pe_action_optional) && is_set(flags, pe_action_optional) == FALSE) { /* in this case, treat migrate_runnable as if first is optional */ if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid); pe_action_implies(then, first, pe_action_optional); } } if (is_set(type, pe_order_restart)) { handle_restart_ordering(first, then, filter); } if (then_flags != then->flags) { changed |= pe_graph_updated_then; pe_rsc_trace(then->rsc, "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", then->uuid, then->node ? then->node->details->uname : "[none]", then->flags, then_flags, first->uuid, first->flags); if(then->rsc && then->rsc->parent) { /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */ update_action(then, data_set); } } if (first_flags != first->flags) { changed |= pe_graph_updated_first; pe_rsc_trace(first->rsc, "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, first_flags, then->uuid, then->flags); } return changed; } void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { GListPtr gIter = NULL; GHashTableIter iter; node_t *node = NULL; if (constraint == NULL) { pe_err("Constraint is NULL"); return; } else if (rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) { pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)", constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role)); return; } if (constraint->node_list_rh == NULL) { pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id); return; } for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *other_node = NULL; other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (other_node != NULL) { pe_rsc_trace(rsc, "%s + %s: %d + %d", node->details->uname, other_node->details->uname, node->weight, other_node->weight); other_node->weight = merge_weights(other_node->weight, node->weight); } else { other_node = node_copy(node); pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode); g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node); } if (other_node->rsc_discover_mode < constraint->discover_mode) { if (constraint->discover_mode == pe_discover_exclusive) { rsc->exclusive_discover = TRUE; } /* exclusive > never > always... always is default */ other_node->rsc_discover_mode = constraint->discover_mode; } } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight); } } void native_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } #define log_change(a, fmt, args...) do { \ if(a && a->reason && terminal) { \ printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \ } else if(a && a->reason) { \ crm_notice(fmt" \tdue to %s", ##args, a->reason); \ } else if(terminal) { \ printf(" * "fmt"\n", ##args); \ } else { \ crm_notice(fmt, ##args); \ } \ } while(0) #define STOP_SANITY_ASSERT(lineno) do { \ if(current && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if(stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \ CRM_ASSERT(stop != NULL); \ } else if(is_set(stop->flags, pe_action_optional)) { \ crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \ CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \ } \ } while(0) static int rsc_width = 5; static int detail_width = 5; static void LogAction(const char *change, resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal) { int len = 0; char *reason = NULL; char *details = NULL; bool same_host = FALSE; bool same_role = FALSE; bool need_role = FALSE; CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); if(source == NULL) { source = action; } len = strlen(rsc->id); if(len > rsc_width) { rsc_width = len + 2; } if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) { need_role = TRUE; } if(origin != NULL && destination != NULL && origin->details == destination->details) { same_host = TRUE; } if(rsc->role == rsc->next_role) { same_role = TRUE; } if(need_role && origin == NULL) { /* Promoting from Stopped */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname); } else if(need_role && destination == NULL) { /* Demoting a Master or Stopping a Slave */ details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); } else if(origin == NULL || destination == NULL) { /* Starting or stopping a resource */ details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname); } else if(need_role && same_role && same_host) { /* Recovering or restarting a promotable clone resource */ details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); } else if(same_role && same_host) { /* Recovering or Restarting a normal resource */ details = crm_strdup_printf("%s", origin->details->uname); } else if(same_role && need_role) { /* Moving a promotable clone resource */ details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role)); } else if(same_role) { /* Moving a normal resource */ details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname); } else if(same_host) { /* Promoting or demoting a promotable clone resource */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname); } else { /* Moving and promoting/demoting */ details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname); } len = strlen(details); if(len > detail_width) { detail_width = len; } if(source->reason && is_not_set(action->flags, pe_action_runnable)) { reason = crm_strdup_printf(" due to %s (blocked)", source->reason); } else if(source->reason) { reason = crm_strdup_printf(" due to %s", source->reason); } else if(is_not_set(action->flags, pe_action_runnable)) { reason = strdup(" blocked"); } else { reason = strdup(""); } if(terminal) { printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason); } else { crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason); } free(details); free(reason); } void LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { node_t *next = NULL; node_t *current = NULL; pe_node_t *start_node = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *demote = NULL; action_t *promote = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if(rsc->variant == pe_container) { pcmk__bundle_log_actions(rsc, data_set, terminal); return; } if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; LogActions(child_rsc, data_set, terminal); } return; } next = rsc->allocated_to; if (rsc->running_on) { current = pe__current_node(rsc); if (rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if (is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed) ? " unmanaged" : ""); return; } if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) { start_node = NULL; } else { start_node = current; } possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { action_t *migrate_op = NULL; possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE); if (possible_matches) { migrate_op = possible_matches->data; } CRM_CHECK(next != NULL,); if (next == NULL) { } else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) { LogAction("Migrate", rsc, current, next, start, NULL, terminal); } else if (is_set(rsc->flags, pe_rsc_reload)) { LogAction("Reload", rsc, current, next, start, NULL, terminal); } else if (start == NULL || is_set(start->flags, pe_action_optional)) { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { LogAction("Stop", rsc, current, NULL, stop, (stop && stop->reason)? stop : start, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move", rsc, current, next, stop, NULL, terminal); } else if (is_set(rsc->flags, pe_rsc_failed)) { LogAction("Recover", rsc, current, NULL, stop, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else { LogAction("Restart", rsc, current, next, start, NULL, terminal); /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */ } g_list_free(possible_matches); return; } if(stop && (rsc->next_role == RSC_ROLE_STOPPED || (start && is_not_set(start->flags, pe_action_runnable)))) { GListPtr gIter = NULL; key = stop_key(rsc); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; action_t *stop_op = NULL; possible_matches = find_actions(rsc->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op && (stop_op->flags & pe_action_runnable)) { STOP_SANITY_ASSERT(__LINE__); } LogAction("Stop", rsc, node, NULL, stop_op, (stop_op && stop_op->reason)? stop_op : start, terminal); } free(key); } else if (stop && is_set(rsc->flags, pe_rsc_failed)) { /* 'stop' may be NULL if the failure was ignored */ LogAction("Recover", rsc, current, next, stop, start, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (moving) { LogAction("Move", rsc, current, next, stop, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (is_set(rsc->flags, pe_rsc_reload)) { LogAction("Reload", rsc, current, next, start, NULL, terminal); } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) { LogAction("Restart", rsc, current, next, start, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (rsc->role == RSC_ROLE_MASTER) { CRM_LOG_ASSERT(current != NULL); LogAction("Demote", rsc, current, next, demote, NULL, terminal); } else if(rsc->next_role == RSC_ROLE_MASTER) { CRM_LOG_ASSERT(next); LogAction("Promote", rsc, current, next, promote, NULL, terminal); } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) { LogAction("Start", rsc, current, next, start, NULL, terminal); } } gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop; if (rsc->partial_migration_target) { if (rsc->partial_migration_target->details == current->details) { pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname, next->details->uname, rsc->id); continue; } else { pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id); optional = FALSE; } } pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname); stop = stop_action(rsc, current, optional); if(rsc->allocated_to == NULL) { pe_action_set_reason(stop, "node availability", TRUE); } if (is_not_set(rsc->flags, pe_rsc_managed)) { update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set); order_actions(stop, unfence, pe_order_implies_first); if (!node_has_been_unfenced(current)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname); } } } return TRUE; } static void order_after_unfencing(resource_t *rsc, pe_node_t *node, action_t *action, enum pe_ordering order, pe_working_set_t *data_set) { /* When unfencing is in use, we order unfence actions before any probe or * start of resources that require unfencing, and also of fence devices. * * This might seem to violate the principle that fence devices require * only quorum. However, fence agents that unfence often don't have enough * information to even probe or start unless the node is first unfenced. */ if (is_unfence_device(rsc, data_set) || is_set(rsc->flags, pe_rsc_needs_unfencing)) { /* Start with an optional ordering. Requiring unfencing would result in * the node being unfenced, and all its resources being stopped, * whenever a new resource is added -- which would be highly suboptimal. */ action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); order_actions(unfence, action, order); if (!node_has_been_unfenced(node)) { // But unfencing is required if it has never been done char *reason = crm_strdup_printf("required by %s %s", rsc->id, action->task); trigger_unfencing(NULL, node, reason, NULL, data_set); free(reason); } } } gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { action_t *start = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0); start = start_action(rsc, next, TRUE); order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set); if (is_set(start->flags, pe_action_runnable) && optional == FALSE) { update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } return TRUE; } gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; CRM_ASSERT(rsc); CRM_CHECK(next != NULL, return FALSE); pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname); action_list = pe__resource_actions(rsc, next, RSC_START, TRUE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *start = (action_t *) gIter->data; if (is_set(start->flags, pe_action_runnable) == FALSE) { runnable = FALSE; } } g_list_free(action_list); if (runnable) { promote_action(rsc, next, optional); return TRUE; } pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id); action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *promote = (action_t *) gIter->data; update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A"); demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A"); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if (node == NULL) { pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if (node->details->unclean || node->details->online == FALSE) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional ? pe_order_implies_then : pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional ? pe_order_implies_then : pe_order_optional, data_set); return TRUE; } gboolean native_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { enum pe_ordering flags = pe_order_optional; char *key = NULL; action_t *probe = NULL; node_t *running = NULL; node_t *allowed = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if (rc_inactive == NULL) { rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id); return FALSE; } if (pe__is_guest_or_remote_node(node)) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) { pe_rsc_trace(rsc, "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents", rsc->id, node->details->id); return FALSE; } else if (pe__is_guest_node(node) && pe__resource_contains_guest_node(data_set, rsc)) { pe_rsc_trace(rsc, "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes", rsc->id, node->details->id); return FALSE; } else if (rsc->is_remote_node) { pe_rsc_trace(rsc, "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections", rsc->id, node->details->id); return FALSE; } } if (rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set) || any_created; } return any_created; } else if ((rsc->container) && (!rsc->is_remote_node)) { pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id); return FALSE; } if (is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id); return FALSE; } // Check whether resource is already known on node if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) { pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname); return FALSE; } allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (rsc->exclusive_discover || top->exclusive_discover) { if (allowed == NULL) { /* exclusive discover is enabled and this node is not in the allowed list. */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id); return FALSE; } else if (allowed->rsc_discover_mode != pe_discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id); return FALSE; } } if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) { /* If this node was allowed to host this resource it would * have been explicitly added to the 'allowed_nodes' list. * However it wasn't and the node has discovery disabled, so * no need to probe for this resource. */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id); return FALSE; } if (allowed && allowed->rsc_discover_mode == pe_discover_never) { /* this resource is marked as not needing to be discovered on this node */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id); return FALSE; } if (pe__is_guest_node(node)) { resource_t *remote = node->details->remote_rsc->container; if(remote->role == RSC_ROLE_STOPPED) { /* If the container is stopped, then we know anything that * might have been inside it is also stopped and there is * no need to probe. * * If we don't know the container's state on the target * either: * * - the container is running, the transition will abort * and we'll end up in a different case next time, or * * - the container is stopped * * Either way there is no need to probe. * */ if(remote->allocated_to && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) { /* For safety, we order the 'rsc' start after 'remote' * has been probed. * * Using 'top' helps for groups, but we may need to * follow the start's ordering chain backwards. */ custom_action_order(remote, generate_op_key(remote->id, RSC_STATUS, 0), NULL, top, generate_op_key(top->id, RSC_START, 0), NULL, pe_order_optional, data_set); } pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped", rsc->id, node->details->id, remote->id); return FALSE; /* Here we really we want to check if remote->stop is required, * but that information doesn't exist yet */ } else if(node->details->remote_requires_reset || node->details->unclean || is_set(remote->flags, pe_rsc_failed) || remote->next_role == RSC_ROLE_STOPPED || (remote->allocated_to && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL) ) { /* The container is stopping or restarting, don't start * 'rsc' until 'remote' stops as this also implies that * 'rsc' is stopped - avoiding the need to probe */ custom_action_order(remote, generate_op_key(remote->id, RSC_STOP, 0), NULL, top, generate_op_key(top->id, RSC_START, 0), NULL, pe_order_optional, data_set); pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving", rsc->id, node->details->id, remote->id); return FALSE; /* } else { * The container is running so there is no problem probing it */ } } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); order_after_unfencing(rsc, node, probe, pe_order_optional, data_set); /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if (running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if (rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role), is_set(probe->flags, pe_action_runnable), rsc->running_on); if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) { top = rsc; } else { crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id); } if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) { /* Prevent the start from occurring if rsc isn't active, but * don't cause it to stop if it was active already */ flags |= pe_order_runnable_left; } custom_action_order(rsc, NULL, probe, top, generate_op_key(top->id, RSC_START, 0), NULL, flags, data_set); /* Before any reloads, if they exist */ custom_action_order(rsc, NULL, probe, top, reload_key(rsc), NULL, pe_order_optional, data_set); #if 0 // complete is always null currently if (!is_unfence_device(rsc, data_set)) { /* Normally rsc.start depends on probe complete which depends * on rsc.probe. But this can't be the case for fence devices * with unfencing, as it would create graph loops. * * So instead we explicitly order 'rsc.probe then rsc.start' */ order_actions(probe, complete, pe_order_implies_then); } #endif return TRUE; } /*! * \internal * \brief Check whether a resource is known on a particular node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return TRUE if resource (or parent if an anonymous clone) is known */ static bool rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node) { if (pe_hash_table_lookup(rsc->known_on, node->details->id)) { return TRUE; } else if ((rsc->variant == pe_native) && pe_rsc_is_anon_clone(rsc->parent) && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) { /* We check only the parent, not the uber-parent, because we cannot * assume that the resource is known if it is in an anonymously cloned * group (which may be only partially known). */ return TRUE; } return FALSE; } /*! * \internal * \brief Order a resource's start and promote actions relative to fencing * * \param[in] rsc Resource to be ordered * \param[in] stonith_op Fence action * \param[in] data_set Cluster information */ static void native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { node_t *target; GListPtr gIter = NULL; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; switch (action->needs) { case rsc_req_nothing: // Anything other than start or promote requires nothing break; case rsc_req_stonith: order_actions(stonith_op, action, pe_order_optional); break; case rsc_req_quorum: if (safe_str_eq(action->task, RSC_START) && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id) && !rsc_is_known_on(rsc, target)) { /* If we don't know the status of the resource on the node * we're about to shoot, we have to assume it may be active * there. Order the resource start after the fencing. This * is analogous to waiting for all the probes for a resource * to complete before starting it. * * The most likely explanation is that the DC died and took * its status with it. */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(stonith_op, action, pe_order_optional | pe_order_runnable_left); } break; } } } static void native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { GListPtr gIter = NULL; GListPtr action_list = NULL; bool order_implicit = false; resource_t *top = uber_parent(rsc); pe_action_t *parent_stop = NULL; node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Get a list of stop actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE); /* If resource requires fencing, implicit actions must occur after fencing. * * Implied stops and demotes of resources running on guest nodes are always * ordered after fencing, even if the resource does not require fencing, * because guest node "fencing" is actually just a resource stop. */ if (is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) { order_implicit = true; } if (action_list && order_implicit) { parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); } for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; // The stop would never complete, so convert it into a pseudo-action. update_action_flags(action, pe_action_pseudo|pe_action_runnable, __FUNCTION__, __LINE__); if (order_implicit) { update_action_flags(action, pe_action_implied_by_stonith, __FUNCTION__, __LINE__); /* Order the stonith before the parent stop (if any). * * Also order the stonith before the resource stop, unless the * resource is inside a bundle -- that would cause a graph loop. * We can rely on the parent stop's ordering instead. * * User constraints must not order a resource in a guest node * relative to the guest node container resource. The * pe_order_preserve flag marks constraints as generated by the * cluster and thus immune to that check (and is irrelevant if * target is not a guest). */ if (!pe_rsc_is_bundled(rsc)) { order_actions(stonith_op, action, pe_order_preserve); } order_actions(stonith_op, parent_stop, pe_order_preserve); } if (is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is implicit %s %s is fenced", rsc->id, (order_implicit? "after" : "because"), target->details->uname); } else { crm_info("%s is implicit %s %s is fenced", action->uuid, (order_implicit? "after" : "because"), target->details->uname); } if (is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ create_secondary_notification(action, rsc, stonith_op, data_set); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online == FALSE || action->node->details->unclean == TRUE || is_set(rsc->flags, pe_rsc_failed)) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ update_action_flags(action, pe_action_pseudo|pe_action_runnable, __FUNCTION__, __LINE__); if (pe_rsc_is_bundled(rsc)) { /* Do nothing, let the recovery be ordered after the parent's implied stop */ } else if (order_implicit) { order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } } else if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); } else { native_start_constraints(rsc, stonith_op, data_set); native_stop_constraints(rsc, stonith_op, data_set); } } void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set) { GListPtr gIter = NULL; action_t *reload = NULL; if (rsc->children) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; ReloadRsc(child_rsc, node, data_set); } return; } else if (rsc->variant > pe_native) { /* Complex resource with no children */ return; } else if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); return; } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) { pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */ return; } else if (node == NULL) { pe_rsc_trace(rsc, "%s: not active", rsc->id); return; } pe_rsc_trace(rsc, "Processing %s", rsc->id); set_bit(rsc->flags, pe_rsc_reload); reload = custom_action( rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set); pe_action_set_reason(reload, "resource definition change", FALSE); custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, data_set); custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, data_set); } void native_append_meta(resource_t * rsc, xmlNode * xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); resource_t *parent; if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container) { crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id); } } } diff --git a/lib/pacemaker/pcmk_sched_notif.c b/lib/pacemaker/pcmk_sched_notif.c index f3a1476d37..2b73643c96 100644 --- a/lib/pacemaker/pcmk_sched_notif.c +++ b/lib/pacemaker/pcmk_sched_notif.c @@ -1,835 +1,837 @@ /* - * Copyright 2004-2019 Andrew Beekhof + * Copyright 2004-2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include typedef struct notify_entry_s { resource_t *rsc; node_t *node; } notify_entry_t; static gint sort_notify_entries(gconstpointer a, gconstpointer b) { int tmp; const notify_entry_t *entry_a = a; const notify_entry_t *entry_b = b; if (entry_a == NULL && entry_b == NULL) { return 0; } if (entry_a == NULL) { return 1; } if (entry_b == NULL) { return -1; } if (entry_a->rsc == NULL && entry_b->rsc == NULL) { return 0; } if (entry_a->rsc == NULL) { return 1; } if (entry_b->rsc == NULL) { return -1; } tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id); if (tmp != 0) { return tmp; } if (entry_a->node == NULL && entry_b->node == NULL) { return 0; } if (entry_a->node == NULL) { return 1; } if (entry_b->node == NULL) { return -1; } return strcmp(entry_a->node->details->id, entry_b->node->details->id); } static notify_entry_t *dup_notify_entry(notify_entry_t *entry) { notify_entry_t *dup = malloc(sizeof(notify_entry_t)); CRM_ASSERT(dup != NULL); dup->rsc = entry->rsc; dup->node = entry->node; return dup; } static void expand_node_list(GListPtr list, char **uname, char **metal) { GListPtr gIter = NULL; char *node_list = NULL; char *metal_list = NULL; CRM_ASSERT(uname != NULL); if (list == NULL) { *uname = strdup(" "); if(metal) { *metal = strdup(" "); } return; } for (gIter = list; gIter != NULL; gIter = gIter->next) { int len = 0; int existing_len = 0; node_t *node = (node_t *) gIter->data; if (node->details->uname == NULL) { continue; } len = 2 + strlen(node->details->uname); if(node_list) { existing_len = strlen(node_list); } // crm_trace("Adding %s (%dc) at offset %d", node->details->uname, len - 2, existing_len); node_list = realloc_safe(node_list, len + existing_len); sprintf(node_list + existing_len, "%s%s", existing_len == 0 ? "":" ", node->details->uname); if(metal) { existing_len = 0; if(metal_list) { existing_len = strlen(metal_list); } if(node->details->remote_rsc && node->details->remote_rsc->container && node->details->remote_rsc->container->running_on) { node = pe__current_node(node->details->remote_rsc->container); } if (node->details->uname == NULL) { continue; } len = 2 + strlen(node->details->uname); metal_list = realloc_safe(metal_list, len + existing_len); sprintf(metal_list + existing_len, "%s%s", existing_len == 0 ? "":" ", node->details->uname); } } *uname = node_list; if(metal) { *metal = metal_list; } } static void expand_list(GListPtr list, char **rsc_list, char **node_list) { GListPtr gIter = NULL; const char *uname = NULL; const char *rsc_id = NULL; const char *last_rsc_id = NULL; if (rsc_list) { *rsc_list = NULL; } if (list == NULL) { if (rsc_list) { *rsc_list = strdup(" "); } if (node_list) { *node_list = strdup(" "); } return; } if (node_list) { *node_list = NULL; } for (gIter = list; gIter != NULL; gIter = gIter->next) { notify_entry_t *entry = (notify_entry_t *) gIter->data; CRM_LOG_ASSERT(entry != NULL); CRM_LOG_ASSERT(entry && entry->rsc != NULL); if(entry == NULL || entry->rsc == NULL) { continue; } /* Uh, why? */ CRM_LOG_ASSERT(node_list == NULL || entry->node != NULL); if(node_list != NULL && entry->node == NULL) { continue; } uname = NULL; rsc_id = entry->rsc->id; CRM_ASSERT(rsc_id != NULL); /* filter dups */ if (safe_str_eq(rsc_id, last_rsc_id)) { continue; } last_rsc_id = rsc_id; if (rsc_list != NULL) { int existing_len = 0; int len = 2 + strlen(rsc_id); /* +1 space, +1 EOS */ if (*rsc_list) { existing_len = strlen(*rsc_list); } crm_trace("Adding %s (%dc) at offset %d", rsc_id, len - 2, existing_len); *rsc_list = realloc_safe(*rsc_list, len + existing_len); sprintf(*rsc_list + existing_len, "%s%s", existing_len == 0 ? "":" ", rsc_id); } if (entry->node != NULL) { uname = entry->node->details->uname; } if (node_list != NULL && uname) { int existing_len = 0; int len = 2 + strlen(uname); if (*node_list) { existing_len = strlen(*node_list); } crm_trace("Adding %s (%dc) at offset %d", uname, len - 2, existing_len); *node_list = realloc_safe(*node_list, len + existing_len); sprintf(*node_list + existing_len, "%s%s", existing_len == 0 ? "":" ", uname); } } } static void dup_attr(gpointer key, gpointer value, gpointer user_data) { add_hash_param(user_data, key, value); } static void add_notify_data_to_action_meta(notify_data_t *n_data, pe_action_t *action) { for (GSList *item = n_data->keys; item; item = item->next) { pcmk_nvpair_t *nvpair = item->data; add_hash_param(action->meta, nvpair->name, nvpair->value); } } static action_t * pe_notify(resource_t * rsc, node_t * node, action_t * op, action_t * confirm, notify_data_t * n_data, pe_working_set_t * data_set) { char *key = NULL; action_t *trigger = NULL; const char *value = NULL; const char *task = NULL; if (op == NULL || confirm == NULL) { pe_rsc_trace(rsc, "Op=%p confirm=%p", op, confirm); return NULL; } CRM_CHECK(rsc != NULL, return NULL); CRM_CHECK(node != NULL, return NULL); if (node->details->online == FALSE) { pe_rsc_trace(rsc, "Skipping notification for %s: node offline", rsc->id); return NULL; } else if (is_set(op->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping notification for %s: not runnable", op->uuid); return NULL; } value = g_hash_table_lookup(op->meta, "notify_type"); task = g_hash_table_lookup(op->meta, "notify_operation"); pe_rsc_trace(rsc, "Creating notify actions for %s: %s (%s-%s)", op->uuid, rsc->id, value, task); key = generate_notify_key(rsc->id, value, task); trigger = custom_action(rsc, key, op->task, node, is_set(op->flags, pe_action_optional), TRUE, data_set); g_hash_table_foreach(op->meta, dup_attr, trigger->meta); add_notify_data_to_action_meta(n_data, trigger); /* pseudo_notify before notify */ pe_rsc_trace(rsc, "Ordering %s before %s (%d->%d)", op->uuid, trigger->uuid, trigger->id, op->id); order_actions(op, trigger, pe_order_optional); order_actions(trigger, confirm, pe_order_optional); return trigger; } static void pe_post_notify(resource_t * rsc, node_t * node, notify_data_t * n_data, pe_working_set_t * data_set) { action_t *notify = NULL; CRM_CHECK(rsc != NULL, return); if (n_data->post == NULL) { return; /* Nothing to do */ } notify = pe_notify(rsc, node, n_data->post, n_data->post_done, n_data, data_set); if (notify != NULL) { notify->priority = INFINITY; } if (n_data->post_done) { GListPtr gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *mon = (action_t *) gIter->data; const char *interval_ms_s = g_hash_table_lookup(mon->meta, XML_LRM_ATTR_INTERVAL_MS); if ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0")) { pe_rsc_trace(rsc, "Skipping %s: interval", mon->uuid); continue; } else if (safe_str_eq(mon->task, RSC_CANCEL)) { pe_rsc_trace(rsc, "Skipping %s: cancel", mon->uuid); continue; } order_actions(n_data->post_done, mon, pe_order_optional); } } } notify_data_t * create_notification_boundaries(resource_t * rsc, const char *action, action_t * start, action_t * end, pe_working_set_t * data_set) { /* Create the pseudo ops that precede and follow the actual notifications */ /* * Creates two sequences (conditional on start and end being supplied): * pre_notify -> pre_notify_complete -> start, and * end -> post_notify -> post_notify_complete * * 'start' and 'end' may be the same event or ${X} and ${X}ed as per clones */ char *key = NULL; notify_data_t *n_data = NULL; if (is_not_set(rsc->flags, pe_rsc_notify)) { return NULL; } n_data = calloc(1, sizeof(notify_data_t)); n_data->action = action; if (start) { /* create pre-event notification wrappers */ key = generate_notify_key(rsc->id, "pre", start->task); n_data->pre = custom_action(rsc, key, RSC_NOTIFY, NULL, is_set(start->flags, pe_action_optional), TRUE, data_set); update_action_flags(n_data->pre, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(n_data->pre, pe_action_runnable, __FUNCTION__, __LINE__); add_hash_param(n_data->pre->meta, "notify_type", "pre"); add_hash_param(n_data->pre->meta, "notify_operation", n_data->action); add_hash_param(n_data->pre->meta, "notify_key_type", "pre"); add_hash_param(n_data->pre->meta, "notify_key_operation", start->task); /* create pre_notify_complete */ key = generate_notify_key(rsc->id, "confirmed-pre", start->task); n_data->pre_done = custom_action(rsc, key, RSC_NOTIFIED, NULL, is_set(start->flags, pe_action_optional), TRUE, data_set); update_action_flags(n_data->pre_done, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(n_data->pre_done, pe_action_runnable, __FUNCTION__, __LINE__); add_hash_param(n_data->pre_done->meta, "notify_type", "pre"); add_hash_param(n_data->pre_done->meta, "notify_operation", n_data->action); add_hash_param(n_data->pre_done->meta, "notify_key_type", "confirmed-pre"); add_hash_param(n_data->pre_done->meta, "notify_key_operation", start->task); order_actions(n_data->pre_done, start, pe_order_optional); order_actions(n_data->pre, n_data->pre_done, pe_order_optional); } if (end) { /* create post-event notification wrappers */ key = generate_notify_key(rsc->id, "post", end->task); n_data->post = custom_action(rsc, key, RSC_NOTIFY, NULL, is_set(end->flags, pe_action_optional), TRUE, data_set); n_data->post->priority = INFINITY; update_action_flags(n_data->post, pe_action_pseudo, __FUNCTION__, __LINE__); if (is_set(end->flags, pe_action_runnable)) { update_action_flags(n_data->post, pe_action_runnable, __FUNCTION__, __LINE__); } else { update_action_flags(n_data->post, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } add_hash_param(n_data->post->meta, "notify_type", "post"); add_hash_param(n_data->post->meta, "notify_operation", n_data->action); add_hash_param(n_data->post->meta, "notify_key_type", "post"); add_hash_param(n_data->post->meta, "notify_key_operation", end->task); /* create post_notify_complete */ key = generate_notify_key(rsc->id, "confirmed-post", end->task); n_data->post_done = custom_action(rsc, key, RSC_NOTIFIED, NULL, is_set(end->flags, pe_action_optional), TRUE, data_set); n_data->post_done->priority = INFINITY; update_action_flags(n_data->post_done, pe_action_pseudo, __FUNCTION__, __LINE__); if (is_set(end->flags, pe_action_runnable)) { update_action_flags(n_data->post_done, pe_action_runnable, __FUNCTION__, __LINE__); } else { update_action_flags(n_data->post_done, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } add_hash_param(n_data->post_done->meta, "notify_type", "post"); add_hash_param(n_data->post_done->meta, "notify_operation", n_data->action); add_hash_param(n_data->post_done->meta, "notify_key_type", "confirmed-post"); add_hash_param(n_data->post_done->meta, "notify_key_operation", end->task); order_actions(end, n_data->post, pe_order_implies_then); order_actions(n_data->post, n_data->post_done, pe_order_implies_then); } if (start && end) { order_actions(n_data->pre_done, n_data->post, pe_order_optional); } return n_data; } void collect_notification_data(resource_t * rsc, gboolean state, gboolean activity, notify_data_t * n_data) { if(n_data->allowed_nodes == NULL) { n_data->allowed_nodes = rsc->allowed_nodes; } if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; collect_notification_data(child, state, activity, n_data); } return; } if (state) { notify_entry_t *entry = NULL; entry = calloc(1, sizeof(notify_entry_t)); entry->rsc = rsc; if (rsc->running_on) { /* we only take the first one */ entry->node = rsc->running_on->data; } pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(rsc->role)); switch (rsc->role) { case RSC_ROLE_STOPPED: n_data->inactive = g_list_prepend(n_data->inactive, entry); break; case RSC_ROLE_STARTED: n_data->active = g_list_prepend(n_data->active, entry); break; case RSC_ROLE_SLAVE: n_data->slave = g_list_prepend(n_data->slave, entry); n_data->active = g_list_prepend(n_data->active, dup_notify_entry(entry)); break; case RSC_ROLE_MASTER: n_data->master = g_list_prepend(n_data->master, entry); n_data->active = g_list_prepend(n_data->active, dup_notify_entry(entry)); break; default: crm_err("Unsupported notify role"); free(entry); break; } } if (activity) { notify_entry_t *entry = NULL; enum action_tasks task; GListPtr gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_optional) == FALSE && op->node != NULL) { task = text2task(op->task); if(task == stop_rsc && op->node->details->unclean) { // Create anyway (additional noise if node can't be fenced) } else if(is_not_set(op->flags, pe_action_runnable)) { continue; } entry = calloc(1, sizeof(notify_entry_t)); entry->node = op->node; entry->rsc = rsc; switch (task) { case start_rsc: n_data->start = g_list_prepend(n_data->start, entry); break; case stop_rsc: n_data->stop = g_list_prepend(n_data->stop, entry); break; case action_promote: n_data->promote = g_list_prepend(n_data->promote, entry); break; case action_demote: n_data->demote = g_list_prepend(n_data->demote, entry); break; default: free(entry); break; } } } } } #define add_notify_env(n_data, key, value) do { \ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value); \ } while (0) #define add_notify_env_free(n_data, key, value) do { \ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value); \ free(value); value = NULL; \ } while (0) gboolean expand_notification_data(resource_t *rsc, notify_data_t * n_data, pe_working_set_t * data_set) { /* Expand the notification entries into a key=value hashtable * This hashtable is later used in action2xml() */ gboolean required = FALSE; char *rsc_list = NULL; char *node_list = NULL; char *metal_list = NULL; const char *source = NULL; GListPtr nodes = NULL; if (n_data->stop) { n_data->stop = g_list_sort(n_data->stop, sort_notify_entries); } expand_list(n_data->stop, &rsc_list, &node_list); if (rsc_list != NULL && safe_str_neq(" ", rsc_list)) { if (safe_str_eq(n_data->action, RSC_STOP)) { required = TRUE; } } add_notify_env_free(n_data, "notify_stop_resource", rsc_list); add_notify_env_free(n_data, "notify_stop_uname", node_list); if (n_data->start) { n_data->start = g_list_sort(n_data->start, sort_notify_entries); if (rsc_list && safe_str_eq(n_data->action, RSC_START)) { required = TRUE; } } expand_list(n_data->start, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_start_resource", rsc_list); add_notify_env_free(n_data, "notify_start_uname", node_list); if (n_data->demote) { n_data->demote = g_list_sort(n_data->demote, sort_notify_entries); if (safe_str_eq(n_data->action, RSC_DEMOTE)) { required = TRUE; } } expand_list(n_data->demote, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_demote_resource", rsc_list); add_notify_env_free(n_data, "notify_demote_uname", node_list); if (n_data->promote) { n_data->promote = g_list_sort(n_data->promote, sort_notify_entries); if (safe_str_eq(n_data->action, RSC_PROMOTE)) { required = TRUE; } } expand_list(n_data->promote, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_promote_resource", rsc_list); add_notify_env_free(n_data, "notify_promote_uname", node_list); if (n_data->active) { n_data->active = g_list_sort(n_data->active, sort_notify_entries); } expand_list(n_data->active, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_active_resource", rsc_list); add_notify_env_free(n_data, "notify_active_uname", node_list); if (n_data->slave) { n_data->slave = g_list_sort(n_data->slave, sort_notify_entries); } expand_list(n_data->slave, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_slave_resource", rsc_list); add_notify_env_free(n_data, "notify_slave_uname", node_list); if (n_data->master) { n_data->master = g_list_sort(n_data->master, sort_notify_entries); } expand_list(n_data->master, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_master_resource", rsc_list); add_notify_env_free(n_data, "notify_master_uname", node_list); if (n_data->inactive) { n_data->inactive = g_list_sort(n_data->inactive, sort_notify_entries); } expand_list(n_data->inactive, &rsc_list, NULL); add_notify_env_free(n_data, "notify_inactive_resource", rsc_list); nodes = g_hash_table_get_values(n_data->allowed_nodes); if (is_set(data_set->flags, pe_flag_stdout)) { /* If printing to stdout, sort the node list, for consistent * regression test output (while avoiding the performance hit * for the live cluster). */ nodes = g_list_sort(nodes, sort_node_uname); } expand_node_list(nodes, &node_list, NULL); add_notify_env_free(n_data, "notify_available_uname", node_list); g_list_free(nodes); source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET); if (safe_str_eq("host", source)) { expand_node_list(data_set->nodes, &node_list, &metal_list); add_notify_env_free(n_data, "notify_all_hosts", metal_list); } else { expand_node_list(data_set->nodes, &node_list, NULL); } add_notify_env_free(n_data, "notify_all_uname", node_list); if (required && n_data->pre) { update_action_flags(n_data->pre, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); update_action_flags(n_data->pre_done, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } if (required && n_data->post) { update_action_flags(n_data->post, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); update_action_flags(n_data->post_done, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } return required; } /* * \internal * \brief Find any remote connection start relevant to an action * * \param[in] action Action to chek * * \return If action is behind a remote connection, connection's start */ static pe_action_t * find_remote_start(pe_action_t *action) { if (action && action->node) { pe_resource_t *remote_rsc = action->node->details->remote_rsc; if (remote_rsc) { return find_first_action(remote_rsc->actions, NULL, RSC_START, NULL); } } return NULL; } void create_notifications(resource_t * rsc, notify_data_t * n_data, pe_working_set_t * data_set) { GListPtr gIter = NULL; action_t *stop = NULL; action_t *start = NULL; enum action_tasks task = text2task(n_data->action); if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; create_notifications(child, n_data, data_set); } return; } /* Copy notification details into standard ops */ for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_optional) == FALSE && op->node != NULL) { enum action_tasks t = text2task(op->task); switch (t) { case start_rsc: case stop_rsc: case action_promote: case action_demote: add_notify_data_to_action_meta(n_data, op); break; default: break; } } } switch (task) { case start_rsc: - if(g_list_length(n_data->start) == 0) { + if (n_data->start == NULL) { pe_rsc_trace(rsc, "Skipping empty notification for: %s.%s (%s->%s)", n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role)); return; } break; case action_promote: - if(g_list_length(n_data->promote) == 0) { + if (n_data->promote == NULL) { pe_rsc_trace(rsc, "Skipping empty notification for: %s.%s (%s->%s)", n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role)); return; } break; case action_demote: - if(g_list_length(n_data->demote) == 0) { + if (n_data->demote == NULL) { pe_rsc_trace(rsc, "Skipping empty notification for: %s.%s (%s->%s)", n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role)); return; } break; default: /* We cannot do the same for stop_rsc/n_data->stop at it * might be implied by fencing */ break; } pe_rsc_trace(rsc, "Creating notifications for: %s.%s (%s->%s)", n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role)); stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL); start = find_first_action(rsc->actions, NULL, RSC_START, NULL); /* stop / demote */ if (rsc->role != RSC_ROLE_STOPPED) { if (task == stop_rsc || task == action_demote) { gIter = rsc->running_on; for (; gIter != NULL; gIter = gIter->next) { node_t *current_node = (node_t *) gIter->data; /* if this stop action is a pseudo action as a result of the current * node being fenced, this stop action is implied by the fencing * action. There's no reason to send the fenced node a stop notification */ if (stop && is_set(stop->flags, pe_action_pseudo) && (current_node->details->unclean || current_node->details->remote_requires_reset) ) { continue; } pe_notify(rsc, current_node, n_data->pre, n_data->pre_done, n_data, data_set); if (task == action_demote || stop == NULL || is_set(stop->flags, pe_action_optional)) { pe_post_notify(rsc, current_node, n_data, data_set); } } } } /* start / promote */ if (rsc->next_role != RSC_ROLE_STOPPED) { if (rsc->allocated_to == NULL) { pe_proc_err("Next role '%s' but %s is not allocated", role2text(rsc->next_role), rsc->id); } else if (task == start_rsc || task == action_promote) { if (start) { pe_action_t *remote_start = find_remote_start(start); if (remote_start && is_not_set(remote_start->flags, pe_action_runnable)) { /* Start and promote actions for a clone instance behind * a Pacemaker Remote connection happen after the * connection starts. If the connection start is blocked, do * not schedule notifications for these actions. */ return; } } if (task != start_rsc || start == NULL || is_set(start->flags, pe_action_optional)) { pe_notify(rsc, rsc->allocated_to, n_data->pre, n_data->pre_done, n_data, data_set); } pe_post_notify(rsc, rsc->allocated_to, n_data, data_set); } } } void free_notification_data(notify_data_t * n_data) { if (n_data == NULL) { return; } g_list_free_full(n_data->stop, free); g_list_free_full(n_data->start, free); g_list_free_full(n_data->demote, free); g_list_free_full(n_data->promote, free); g_list_free_full(n_data->master, free); g_list_free_full(n_data->slave, free); g_list_free_full(n_data->active, free); g_list_free_full(n_data->inactive, free); pcmk_free_nvpairs(n_data->keys); free(n_data); } void create_secondary_notification(pe_action_t *action, resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set) { notify_data_t *n_data; crm_info("Creating secondary notification for %s", action->uuid); n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); collect_notification_data(rsc, TRUE, FALSE, n_data); add_notify_env(n_data, "notify_stop_resource", rsc->id); add_notify_env(n_data, "notify_stop_uname", action->node->details->uname); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index 2eb6d8e765..0ab6612521 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -1,1003 +1,1005 @@ /* - * Copyright 2004-2018 Andrew Beekhof + * Copyright 2004-2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #define VARIANT_CLONE 1 #include extern gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); static void child_promoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type, resource_t * rsc, resource_t * child, resource_t * last, pe_working_set_t * data_set) { if (child == NULL) { if (clone_data->ordered && last != NULL) { pe_rsc_trace(rsc, "Ordered version (last node)"); /* last child promote before promoted started */ new_rsc_order(last, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set); } return; } /* child promote before global promoted */ new_rsc_order(child, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set); /* global promote before child promote */ new_rsc_order(rsc, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set); if (clone_data->ordered) { pe_rsc_trace(rsc, "Ordered version"); if (last == NULL) { /* global promote before first child promote */ last = rsc; } /* else: child/child relative promote */ order_start_start(last, child, type); new_rsc_order(last, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set); } else { pe_rsc_trace(rsc, "Un-ordered version"); } } static void child_demoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type, resource_t * rsc, resource_t * child, resource_t * last, pe_working_set_t * data_set) { if (child == NULL) { if (clone_data->ordered && last != NULL) { pe_rsc_trace(rsc, "Ordered version (last node)"); /* global demote before first child demote */ new_rsc_order(rsc, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional, data_set); } return; } /* child demote before global demoted */ new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ new_rsc_order(rsc, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); if (clone_data->ordered && last != NULL) { pe_rsc_trace(rsc, "Ordered version"); /* child/child relative demote */ new_rsc_order(child, RSC_DEMOTE, last, RSC_DEMOTE, type, data_set); } else if (clone_data->ordered) { pe_rsc_trace(rsc, "Ordered version (1st node)"); /* first child stop before global stopped */ new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, type, data_set); } else { pe_rsc_trace(rsc, "Un-ordered version"); } } static void check_promotable_actions(resource_t *rsc, gboolean *demoting, gboolean *promoting) { GListPtr gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; check_promotable_actions(child, demoting, promoting); } return; } CRM_ASSERT(demoting != NULL); CRM_ASSERT(promoting != NULL); gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (*promoting && *demoting) { return; } else if (is_set(action->flags, pe_action_optional)) { continue; } else if (safe_str_eq(RSC_DEMOTE, action->task)) { *demoting = TRUE; } else if (safe_str_eq(RSC_PROMOTE, action->task)) { *promoting = TRUE; } } } static void apply_master_location(resource_t *child, GListPtr location_constraints, pe_node_t *chosen) { CRM_CHECK(child && chosen, return); for (GListPtr gIter = location_constraints; gIter; gIter = gIter->next) { pe_node_t *cons_node = NULL; pe__location_t *cons = gIter->data; if (cons->role_filter == RSC_ROLE_MASTER) { pe_rsc_trace(child, "Applying %s to %s", cons->id, child->id); cons_node = pe_find_node_id(cons->node_list_rh, chosen->details->id); } if (cons_node != NULL) { int new_priority = merge_weights(child->priority, cons_node->weight); pe_rsc_trace(child, "\t%s[%s]: %d -> %d (%d)", child->id, cons_node->details->uname, child->priority, new_priority, cons_node->weight); child->priority = new_priority; } } } static node_t * can_be_master(resource_t * rsc) { node_t *node = NULL; node_t *local_node = NULL; resource_t *parent = uber_parent(rsc); clone_variant_data_t *clone_data = NULL; #if 0 enum rsc_role_e role = RSC_ROLE_UNKNOWN; role = rsc->fns->state(rsc, FALSE); crm_info("%s role: %s", rsc->id, role2text(role)); #endif if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (can_be_master(child) == NULL) { pe_rsc_trace(rsc, "Child %s of %s can't be promoted", child->id, rsc->id); return NULL; } } } node = rsc->fns->location(rsc, NULL, FALSE); if (node == NULL) { pe_rsc_trace(rsc, "%s cannot be master: not allocated", rsc->id); return NULL; } else if (is_not_set(rsc->flags, pe_rsc_managed)) { if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { crm_notice("Forcing unmanaged master %s to remain promoted on %s", rsc->id, node->details->uname); } else { return NULL; } } else if (rsc->priority < 0) { pe_rsc_trace(rsc, "%s cannot be master: preference: %d", rsc->id, rsc->priority); return NULL; } else if (can_run_resources(node) == FALSE) { crm_trace("Node can't run any resources: %s", node->details->uname); return NULL; } get_clone_variant_data(clone_data, parent); local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id); if (local_node == NULL) { crm_err("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); return NULL; } else if ((local_node->count < clone_data->promoted_node_max) || is_not_set(rsc->flags, pe_rsc_managed)) { return local_node; } else { pe_rsc_trace(rsc, "%s cannot be master on %s: node full", rsc->id, node->details->uname); } return NULL; } static gint sort_promotable_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc; enum rsc_role_e role1 = RSC_ROLE_UNKNOWN; enum rsc_role_e role2 = RSC_ROLE_UNKNOWN; const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); role1 = resource1->fns->state(resource1, TRUE); role2 = resource2->fns->state(resource2, TRUE); rc = sort_rsc_index(a, b); if (rc != 0) { crm_trace("%s %c %s (index)", resource1->id, rc < 0 ? '<' : '>', resource2->id); return rc; } if (role1 > role2) { crm_trace("%s %c %s (role)", resource1->id, '<', resource2->id); return -1; } else if (role1 < role2) { crm_trace("%s %c %s (role)", resource1->id, '>', resource2->id); return 1; } return sort_clone_instance(a, b, data_set); } static void promotion_order(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = NULL; node_t *node = NULL; node_t *chosen = NULL; clone_variant_data_t *clone_data = NULL; char score[33]; size_t len = sizeof(score); get_clone_variant_data(clone_data, rsc); if (clone_data->merged_master_weights) { return; } clone_data->merged_master_weights = TRUE; pe_rsc_trace(rsc, "Merging weights for %s", rsc->id); set_bit(rsc->flags, pe_rsc_merging); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; pe_rsc_trace(rsc, "Sort index: %s = %d", child->id, child->sort_index); } dump_node_scores(LOG_TRACE, rsc, "Before", rsc->allowed_nodes); gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; chosen = child->fns->location(child, NULL, FALSE); if (chosen == NULL || child->sort_index < 0) { pe_rsc_trace(rsc, "Skipping %s", child->id); continue; } node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); /* adds in master preferences and rsc_location.role=Master */ score2char_stack(child->sort_index, score, len); pe_rsc_trace(rsc, "Adding %s to %s from %s", score, node->details->uname, child->id); node->weight = merge_weights(child->sort_index, node->weight); } dump_node_scores(LOG_TRACE, rsc, "Middle", rsc->allowed_nodes); gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; /* (re-)adds location preferences of resources that the * master instance should/must be colocated with */ if (constraint->role_lh == RSC_ROLE_MASTER) { enum pe_weights flags = constraint->score == INFINITY ? 0 : pe_weights_rollback; pe_rsc_trace(rsc, "RHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, constraint->score); rsc->allowed_nodes = constraint->rsc_rh->cmds->merge_weights(constraint->rsc_rh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, flags); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; /* (re-)adds location preferences of resource that wish to be * colocated with the master instance */ if (constraint->role_rh == RSC_ROLE_MASTER) { pe_rsc_trace(rsc, "LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, constraint->score); rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, (pe_weights_rollback | pe_weights_positive)); } } gIter = rsc->rsc_tickets; for (; gIter != NULL; gIter = gIter->next) { rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data; if (rsc_ticket->role_lh == RSC_ROLE_MASTER && (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby)) { resource_location(rsc, NULL, -INFINITY, "__stateful_without_ticket__", data_set); } } dump_node_scores(LOG_TRACE, rsc, "After", rsc->allowed_nodes); /* write them back and sort */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; chosen = child->fns->location(child, NULL, FALSE); if (is_not_set(child->flags, pe_rsc_managed) && child->next_role == RSC_ROLE_MASTER) { child->sort_index = INFINITY; } else if (chosen == NULL || child->sort_index < 0) { pe_rsc_trace(rsc, "%s: %d", child->id, child->sort_index); } else { node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); child->sort_index = node->weight; } pe_rsc_trace(rsc, "Set sort index: %s = %d", child->id, child->sort_index); } rsc->children = g_list_sort_with_data(rsc->children, sort_promotable_instance, data_set); clear_bit(rsc->flags, pe_rsc_merging); } static gboolean filter_anonymous_instance(resource_t *rsc, const node_t *node) { GListPtr rIter = NULL; char *key = clone_strip(rsc->id); resource_t *parent = uber_parent(rsc); for (rIter = parent->children; rIter; rIter = rIter->next) { /* If there is an active instance on the node, only it receives the * promotion score. Use ->find_rsc() in case this is a cloned group. */ resource_t *child = rIter->data; resource_t *active = parent->fns->find_rsc(child, key, node, pe_find_clone|pe_find_current); if(rsc == active) { pe_rsc_trace(rsc, "Found %s for %s active on %s: done", active->id, key, node->details->uname); free(key); return TRUE; } else if(active) { pe_rsc_trace(rsc, "Found %s for %s on %s: not %s", active->id, key, node->details->uname, rsc->id); free(key); return FALSE; } else { pe_rsc_trace(rsc, "%s on %s: not active", key, node->details->uname); } } for (rIter = parent->children; rIter; rIter = rIter->next) { resource_t *child = rIter->data; /* * We know it's not running, but any score will still count if * the instance has been probed on $node * * Again use ->find_rsc() because we might be a cloned group * and knowing that other members of the group are known here * implies nothing */ rsc = parent->fns->find_rsc(child, key, NULL, pe_find_clone); CRM_LOG_ASSERT(rsc); if(rsc) { pe_rsc_trace(rsc, "Checking %s for %s on %s", rsc->id, key, node->details->uname); if (g_hash_table_lookup(rsc->known_on, node->details->id)) { free(key); return TRUE; } } } free(key); return FALSE; } static const char * lookup_promotion_score(resource_t *rsc, const node_t *node, const char *name) { const char *attr_value = NULL; if (node && name) { char *attr_name = crm_strdup_printf("master-%s", name); attr_value = pe_node_attribute_calculated(node, attr_name, rsc); free(attr_name); } return attr_value; } static int promotion_score(resource_t *rsc, const node_t *node, int not_set_value) { char *name = rsc->id; const char *attr_value = NULL; int score = not_set_value; node_t *match = NULL; CRM_CHECK(node != NULL, return not_set_value); if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; int c_score = promotion_score(child, node, not_set_value); if (score == not_set_value) { score = c_score; } else { score += c_score; } } return score; } if (is_not_set(rsc->flags, pe_rsc_unique) && filter_anonymous_instance(rsc, node)) { pe_rsc_trace(rsc, "Anonymous clone %s is allowed on %s", rsc->id, node->details->uname); } else if (rsc->running_on || g_hash_table_size(rsc->known_on)) { /* If we've probed and/or started the resource anywhere, consider * promotion scores only from nodes where we know the status. However, * if the status of all nodes is unknown (e.g. cluster startup), * skip this code, to make sure we take into account any permanent * promotion scores set previously. */ node_t *known = pe_hash_table_lookup(rsc->known_on, node->details->id); match = pe_find_node_id(rsc->running_on, node->details->id); if ((match == NULL) && (known == NULL)) { pe_rsc_trace(rsc, "skipping %s (aka. %s) promotion score on %s because inactive", rsc->id, rsc->clone_name, node->details->uname); return score; } } match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { return score; } else if (match->weight < 0) { pe_rsc_trace(rsc, "%s on %s has score: %d - ignoring", rsc->id, match->details->uname, match->weight); return score; } if (rsc->clone_name) { /* Use the name the lrm knows this resource as, * since that's what crm_master would have used too */ name = rsc->clone_name; } attr_value = lookup_promotion_score(rsc, node, name); pe_rsc_trace(rsc, "promotion score for %s on %s = %s", name, node->details->uname, crm_str(attr_value)); if ((attr_value == NULL) && is_not_set(rsc->flags, pe_rsc_unique)) { /* If we don't have any LRM history yet, we won't have clone_name -- in * that case, for anonymous clones, try the resource name without any * instance number. */ name = clone_strip(rsc->id); if (strcmp(rsc->id, name)) { attr_value = lookup_promotion_score(rsc, node, name); pe_rsc_trace(rsc, "stripped promotion score for %s on %s = %s", name, node->details->uname, crm_str(attr_value)); } free(name); } if (attr_value != NULL) { score = char2score(attr_value); } return score; } void apply_master_prefs(resource_t *rsc) { int score, new_score; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->applied_master_prefs) { /* Make sure we only do this once */ return; } clone_data->applied_master_prefs = TRUE; for (; gIter != NULL; gIter = gIter->next) { GHashTableIter iter; node_t *node = NULL; resource_t *child_rsc = (resource_t *) gIter->data; g_hash_table_iter_init(&iter, child_rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (can_run_resources(node) == FALSE) { /* This node will never be promoted to master, * so don't apply the promotion score as that may * lead to clone shuffling */ continue; } score = promotion_score(child_rsc, node, 0); if (score > 0) { new_score = merge_weights(node->weight, score); if (new_score != node->weight) { pe_rsc_trace(rsc, "\t%s: Updating preference for %s (%d->%d)", child_rsc->id, node->details->uname, node->weight, new_score); node->weight = new_score; } } new_score = QB_MAX(child_rsc->priority, score); if (new_score != child_rsc->priority) { pe_rsc_trace(rsc, "\t%s: Updating priority (%d->%d)", child_rsc->id, child_rsc->priority, new_score); child_rsc->priority = new_score; } } } } static void set_role_slave(resource_t * rsc, gboolean current) { GListPtr gIter = rsc->children; if (current) { if (rsc->role == RSC_ROLE_STARTED) { rsc->role = RSC_ROLE_SLAVE; } } else { GListPtr allocated = NULL; rsc->fns->location(rsc, &allocated, FALSE); if (allocated) { rsc->next_role = RSC_ROLE_SLAVE; } else { rsc->next_role = RSC_ROLE_STOPPED; } g_list_free(allocated); } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_role_slave(child_rsc, current); } } static void set_role_master(resource_t * rsc) { GListPtr gIter = rsc->children; if (rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_MASTER; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_role_master(child_rsc); } } node_t * color_promotable(resource_t *rsc, pe_working_set_t *data_set) { int promoted = 0; GListPtr gIter = NULL; GListPtr gIter2 = NULL; GHashTableIter iter; node_t *node = NULL; node_t *chosen = NULL; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; char score[33]; size_t len = sizeof(score); clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); /* count now tracks the number of masters allocated */ g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { node->count = 0; } /* * assign priority */ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { GListPtr list = NULL; resource_t *child_rsc = (resource_t *) gIter->data; pe_rsc_trace(rsc, "Assigning priority for %s: %s", child_rsc->id, role2text(child_rsc->next_role)); if (child_rsc->fns->state(child_rsc, TRUE) == RSC_ROLE_STARTED) { set_role_slave(child_rsc, TRUE); } chosen = child_rsc->fns->location(child_rsc, &list, FALSE); - if (g_list_length(list) > 1) { + if (pcmk__list_of_multiple(list)) { crm_config_err("Cannot promote non-colocated child %s", child_rsc->id); } g_list_free(list); if (chosen == NULL) { continue; } next_role = child_rsc->fns->state(child_rsc, FALSE); switch (next_role) { case RSC_ROLE_STARTED: case RSC_ROLE_UNKNOWN: /* * Default to -1 if no value is set * * This allows master locations to be specified * based solely on rsc_location constraints, * but prevents anyone from being promoted if * neither a constraint nor a promotion score is present */ child_rsc->priority = promotion_score(child_rsc, chosen, -1); break; case RSC_ROLE_SLAVE: case RSC_ROLE_STOPPED: child_rsc->priority = -INFINITY; break; case RSC_ROLE_MASTER: /* We will arrive here if we're re-creating actions after a stonith */ break; default: CRM_CHECK(FALSE /* unhandled */ , crm_err("Unknown resource role: %d for %s", next_role, child_rsc->id)); } apply_master_location(child_rsc, child_rsc->rsc_location, chosen); apply_master_location(child_rsc, rsc->rsc_location, chosen); for (gIter2 = child_rsc->rsc_cons; gIter2 != NULL; gIter2 = gIter2->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter2->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->rsc_rh, cons, data_set); } child_rsc->sort_index = child_rsc->priority; pe_rsc_trace(rsc, "Assigning priority for %s: %d", child_rsc->id, child_rsc->priority); if (next_role == RSC_ROLE_MASTER) { child_rsc->sort_index = INFINITY; } } dump_node_scores(LOG_TRACE, rsc, "Pre merge", rsc->allowed_nodes); promotion_order(rsc, data_set); /* mark the first N as masters */ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; score2char_stack(child_rsc->sort_index, score, len); chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (show_scores) { if (is_set(data_set->flags, pe_flag_stdout)) { printf("%s promotion score on %s: %s\n", child_rsc->id, (chosen? chosen->details->uname : "none"), score); } } else { do_crm_log(scores_log_level, "%s promotion score on %s: %s", child_rsc->id, chosen ? chosen->details->uname : "none", score); } chosen = NULL; /* nuke 'chosen' so that we don't promote more than the * required number of instances */ if (child_rsc->sort_index < 0) { pe_rsc_trace(rsc, "Not supposed to promote child: %s", child_rsc->id); } else if ((promoted < clone_data->promoted_max) || is_not_set(rsc->flags, pe_rsc_managed)) { chosen = can_be_master(child_rsc); } pe_rsc_debug(rsc, "%s promotion score: %d", child_rsc->id, child_rsc->priority); if (chosen == NULL) { set_role_slave(child_rsc, FALSE); continue; } else if(child_rsc->role < RSC_ROLE_MASTER && is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", child_rsc->id, role2text(child_rsc->role), role2text(child_rsc->next_role)); set_role_slave(child_rsc, FALSE); continue; } chosen->count++; pe_rsc_info(rsc, "Promoting %s (%s %s)", child_rsc->id, role2text(child_rsc->role), chosen->details->uname); set_role_master(child_rsc); promoted++; } pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d to master", rsc->id, promoted, clone_data->promoted_max); return NULL; } void create_promotable_actions(resource_t * rsc, pe_working_set_t * data_set) { action_t *action = NULL; GListPtr gIter = rsc->children; action_t *action_complete = NULL; gboolean any_promoting = FALSE; gboolean any_demoting = FALSE; resource_t *last_promote_rsc = NULL; resource_t *last_demote_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_debug(rsc, "Creating actions for %s", rsc->id); for (; gIter != NULL; gIter = gIter->next) { gboolean child_promoting = FALSE; gboolean child_demoting = FALSE; resource_t *child_rsc = (resource_t *) gIter->data; pe_rsc_trace(rsc, "Creating actions for %s", child_rsc->id); child_rsc->cmds->create_actions(child_rsc, data_set); check_promotable_actions(child_rsc, &child_demoting, &child_promoting); any_demoting = any_demoting || child_demoting; any_promoting = any_promoting || child_promoting; pe_rsc_trace(rsc, "Created actions for %s: %d %d", child_rsc->id, child_promoting, child_demoting); } /* promote */ action = create_pseudo_resource_op(rsc, RSC_PROMOTE, !any_promoting, TRUE, data_set); action_complete = create_pseudo_resource_op(rsc, RSC_PROMOTED, !any_promoting, TRUE, data_set); action_complete->priority = INFINITY; child_promoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_promote_rsc, data_set); if (clone_data->promote_notify == NULL) { clone_data->promote_notify = create_notification_boundaries(rsc, RSC_PROMOTE, action, action_complete, data_set); } /* demote */ action = create_pseudo_resource_op(rsc, RSC_DEMOTE, !any_demoting, TRUE, data_set); action_complete = create_pseudo_resource_op(rsc, RSC_DEMOTED, !any_demoting, TRUE, data_set); action_complete->priority = INFINITY; child_demoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_demote_rsc, data_set); if (clone_data->demote_notify == NULL) { clone_data->demote_notify = create_notification_boundaries(rsc, RSC_DEMOTE, action, action_complete, data_set); if (clone_data->promote_notify) { /* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day * Requires exposing *_notify */ order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre, pe_order_optional); } } /* restore the correct priority */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->priority = rsc->priority; } } void promote_demote_constraints(resource_t *rsc, pe_working_set_t *data_set) { /* global stopped before start */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); /* global stopped before promote */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before start */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_START, pe_order_optional, data_set); /* global started before promote */ new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before stop */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); /* global demote before demoted */ new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_optional, data_set); /* global demoted before promote */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); } void promotable_constraints(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = rsc->children; resource_t *last_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); promote_demote_constraints(rsc, data_set); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; /* child demote before promote */ new_rsc_order(child_rsc, RSC_DEMOTE, child_rsc, RSC_PROMOTE, pe_order_optional, data_set); child_promoting_constraints(clone_data, pe_order_optional, rsc, child_rsc, last_rsc, data_set); child_demoting_constraints(clone_data, pe_order_optional, rsc, child_rsc, last_rsc, data_set); last_rsc = child_rsc; } } static void node_hash_update_one(GHashTable * hash, node_t * other, const char *attr, int score) { GHashTableIter iter; node_t *node = NULL; const char *value = NULL; if (other == NULL) { return; } else if (attr == NULL) { attr = CRM_ATTR_UNAME; } value = pe_node_attribute_raw(other, attr); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { const char *tmp = pe_node_attribute_raw(node, attr); if (safe_str_eq(value, tmp)) { crm_trace("%s: %d + %d", node->details->uname, node->weight, other->weight); node->weight = merge_weights(node->weight, score); } } } void promotable_colocation_rh(resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set) { GListPtr gIter = NULL; if (is_set(rsc_lh->flags, pe_rsc_provisional)) { GListPtr rhs = NULL; for (gIter = rsc_rh->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE); pe_rsc_trace(rsc_rh, "Processing: %s", child_rsc->id); if (chosen != NULL && next_role == constraint->role_rh) { pe_rsc_trace(rsc_rh, "Applying: %s %s %s %d", child_rsc->id, role2text(next_role), chosen->details->uname, constraint->score); if (constraint->score < INFINITY) { node_hash_update_one(rsc_lh->allowed_nodes, chosen, constraint->node_attribute, constraint->score); } rhs = g_list_prepend(rhs, chosen); } } /* Only do this if it's not a master-master colocation * Doing this unconditionally would prevent the slaves from being started */ if (constraint->role_lh != RSC_ROLE_MASTER || constraint->role_rh != RSC_ROLE_MASTER) { if (constraint->score >= INFINITY) { node_list_exclude(rsc_lh->allowed_nodes, rhs, TRUE); } } g_list_free(rhs); } else if (constraint->role_lh == RSC_ROLE_MASTER) { pe_resource_t *rh_child = find_compatible_child(rsc_lh, rsc_rh, constraint->role_rh, FALSE, data_set); if (rh_child == NULL && constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s can't be promoted %s", rsc_lh->id, constraint->id); rsc_lh->priority = -INFINITY; } else if (rh_child != NULL) { int new_priority = merge_weights(rsc_lh->priority, constraint->score); pe_rsc_debug(rsc_lh, "Applying %s to %s", constraint->id, rsc_lh->id); pe_rsc_debug(rsc_lh, "\t%s: %d->%d", rsc_lh->id, rsc_lh->priority, new_priority); rsc_lh->priority = new_priority; } } return; } diff --git a/lib/pacemaker/pcmk_trans_unpack.c b/lib/pacemaker/pcmk_trans_unpack.c index 23c8ac14aa..d1ff5b73d8 100644 --- a/lib/pacemaker/pcmk_trans_unpack.c +++ b/lib/pacemaker/pcmk_trans_unpack.c @@ -1,333 +1,333 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(transitioner); static crm_action_t * unpack_action(synapse_t * parent, xmlNode * xml_action) { crm_action_t *action = NULL; const char *value = crm_element_value(xml_action, XML_ATTR_ID); if (value == NULL) { crm_err("Actions must have an id!"); crm_log_xml_trace(xml_action, "Action with missing id"); return NULL; } action = calloc(1, sizeof(crm_action_t)); if (action == NULL) { crm_perror(LOG_CRIT, "Cannot unpack action"); crm_log_xml_trace(xml_action, "Lost action"); return NULL; } action->id = crm_parse_int(value, NULL); action->type = action_type_rsc; action->xml = copy_xml(xml_action); action->synapse = parent; if (safe_str_eq(crm_element_name(action->xml), XML_GRAPH_TAG_RSC_OP)) { action->type = action_type_rsc; } else if (safe_str_eq(crm_element_name(action->xml), XML_GRAPH_TAG_PSEUDO_EVENT)) { action->type = action_type_pseudo; } else if (safe_str_eq(crm_element_name(action->xml), XML_GRAPH_TAG_CRM_EVENT)) { action->type = action_type_crm; } action->params = xml2list(action->xml); value = g_hash_table_lookup(action->params, "CRM_meta_timeout"); if (value != NULL) { action->timeout = crm_parse_int(value, NULL); } /* Take start-delay into account for the timeout of the action timer */ value = g_hash_table_lookup(action->params, "CRM_meta_start_delay"); if (value != NULL) { action->timeout += crm_parse_int(value, NULL); } value = g_hash_table_lookup(action->params, "CRM_meta_interval"); if (value != NULL) { action->interval_ms = crm_parse_ms(value); } value = g_hash_table_lookup(action->params, "CRM_meta_can_fail"); if (value != NULL) { crm_str_to_boolean(value, &(action->can_fail)); } crm_trace("Action %d has timer set to %dms", action->id, action->timeout); return action; } static synapse_t * unpack_synapse(crm_graph_t * new_graph, xmlNode * xml_synapse) { const char *value = NULL; xmlNode *inputs = NULL; xmlNode *action_set = NULL; synapse_t *new_synapse = NULL; CRM_CHECK(xml_synapse != NULL, return NULL); crm_trace("looking in synapse %s", ID(xml_synapse)); new_synapse = calloc(1, sizeof(synapse_t)); new_synapse->id = crm_parse_int(ID(xml_synapse), NULL); value = crm_element_value(xml_synapse, XML_CIB_ATTR_PRIORITY); if (value != NULL) { new_synapse->priority = crm_parse_int(value, NULL); } CRM_CHECK(new_synapse->id >= 0, free(new_synapse); return NULL); new_graph->num_synapses++; crm_trace("look for actions in synapse %s", crm_element_value(xml_synapse, XML_ATTR_ID)); for (action_set = __xml_first_child(xml_synapse); action_set != NULL; action_set = __xml_next(action_set)) { if (crm_str_eq((const char *)action_set->name, "action_set", TRUE)) { xmlNode *action = NULL; for (action = __xml_first_child(action_set); action != NULL; action = __xml_next(action)) { crm_action_t *new_action = unpack_action(new_synapse, action); if (new_action == NULL) { continue; } new_graph->num_actions++; crm_trace("Adding action %d to synapse %d", new_action->id, new_synapse->id); new_synapse->actions = g_list_append(new_synapse->actions, new_action); } } } crm_trace("look for inputs in synapse %s", ID(xml_synapse)); for (inputs = __xml_first_child(xml_synapse); inputs != NULL; inputs = __xml_next(inputs)) { if (crm_str_eq((const char *)inputs->name, "inputs", TRUE)) { xmlNode *trigger = NULL; for (trigger = __xml_first_child(inputs); trigger != NULL; trigger = __xml_next(trigger)) { xmlNode *input = NULL; for (input = __xml_first_child(trigger); input != NULL; input = __xml_next(input)) { crm_action_t *new_input = unpack_action(new_synapse, input); if (new_input == NULL) { continue; } crm_trace("Adding input %d to synapse %d", new_input->id, new_synapse->id); new_synapse->inputs = g_list_append(new_synapse->inputs, new_input); } } } } return new_synapse; } static void destroy_action(crm_action_t * action); crm_graph_t * unpack_graph(xmlNode * xml_graph, const char *reference) { /* id = -1; new_graph->abort_priority = 0; new_graph->network_delay = 0; new_graph->stonith_timeout = 0; new_graph->completion_action = tg_done; if (reference) { new_graph->source = strdup(reference); } else { new_graph->source = strdup("unknown"); } if (xml_graph != NULL) { t_id = crm_element_value(xml_graph, "transition_id"); CRM_CHECK(t_id != NULL, free(new_graph); return NULL); new_graph->id = crm_parse_int(t_id, "-1"); time = crm_element_value(xml_graph, "cluster-delay"); CRM_CHECK(time != NULL, free(new_graph); return NULL); new_graph->network_delay = crm_parse_interval_spec(time); time = crm_element_value(xml_graph, "stonith-timeout"); if (time == NULL) { new_graph->stonith_timeout = new_graph->network_delay; } else { new_graph->stonith_timeout = crm_parse_interval_spec(time); } t_id = crm_element_value(xml_graph, "batch-limit"); new_graph->batch_limit = crm_parse_int(t_id, "0"); t_id = crm_element_value(xml_graph, "migration-limit"); new_graph->migration_limit = crm_parse_int(t_id, "-1"); } for (synapse = __xml_first_child(xml_graph); synapse != NULL; synapse = __xml_next(synapse)) { if (crm_str_eq((const char *)synapse->name, "synapse", TRUE)) { synapse_t *new_synapse = unpack_synapse(new_graph, synapse); if (new_synapse != NULL) { new_graph->synapses = g_list_append(new_graph->synapses, new_synapse); } } } crm_debug("Unpacked transition %d: %d actions in %d synapses", new_graph->id, new_graph->num_actions, new_graph->num_synapses); return new_graph; } static void destroy_action(crm_action_t * action) { if (action->timer && action->timer->source_id != 0) { crm_warn("Cancelling timer for action %d (src=%d)", action->id, action->timer->source_id); g_source_remove(action->timer->source_id); } if (action->params) { g_hash_table_destroy(action->params); } free_xml(action->xml); free(action->timer); free(action); } static void destroy_synapse(synapse_t * synapse) { - while (g_list_length(synapse->actions) > 0) { + while (synapse->actions != NULL) { crm_action_t *action = g_list_nth_data(synapse->actions, 0); synapse->actions = g_list_remove(synapse->actions, action); destroy_action(action); } - while (g_list_length(synapse->inputs) > 0) { + while (synapse->inputs != NULL) { crm_action_t *action = g_list_nth_data(synapse->inputs, 0); synapse->inputs = g_list_remove(synapse->inputs, action); destroy_action(action); } free(synapse); } void destroy_graph(crm_graph_t * graph) { if (graph == NULL) { return; } - while (g_list_length(graph->synapses) > 0) { + while (graph->synapses != NULL) { synapse_t *synapse = g_list_nth_data(graph->synapses, 0); graph->synapses = g_list_remove(graph->synapses, synapse); destroy_synapse(synapse); } free(graph->source); free(graph); } lrmd_event_data_t * convert_graph_action(xmlNode * resource, crm_action_t * action, int status, int rc) { xmlNode *xop = NULL; lrmd_event_data_t *op = NULL; GHashTableIter iter; const char *name = NULL; const char *value = NULL; xmlNode *action_resource = NULL; CRM_CHECK(action != NULL, return NULL); CRM_CHECK(action->type == action_type_rsc, return NULL); action_resource = first_named_child(action->xml, XML_CIB_TAG_RESOURCE); CRM_CHECK(action_resource != NULL, crm_log_xml_warn(action->xml, "Bad"); return NULL); op = calloc(1, sizeof(lrmd_event_data_t)); op->rsc_id = strdup(ID(action_resource)); op->interval_ms = action->interval_ms; op->op_type = strdup(crm_element_value(action->xml, XML_LRM_ATTR_TASK)); op->rc = rc; op->op_status = status; op->t_run = time(NULL); op->t_rcchange = op->t_run; op->params = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free); g_hash_table_iter_init(&iter, action->params); while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) { g_hash_table_insert(op->params, strdup(name), strdup(value)); } for (xop = __xml_first_child(resource); xop != NULL; xop = __xml_next(xop)) { int tmp = 0; crm_element_value_int(xop, XML_LRM_ATTR_CALLID, &tmp); crm_debug("Got call_id=%d for %s", tmp, ID(resource)); if (tmp > op->call_id) { op->call_id = tmp; } } op->call_id++; return op; } diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c index fa8d778730..700816b252 100644 --- a/lib/pengine/bundle.c +++ b/lib/pengine/bundle.c @@ -1,1939 +1,1939 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #define PE__VARIANT_BUNDLE 1 #include "./variant.h" static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static int allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, char *buffer, int max) { if(data->ip_range_start == NULL) { return 0; } else if(data->ip_last) { replica->ipaddr = next_ip(data->ip_last); } else { replica->ipaddr = strdup(data->ip_range_start); } data->ip_last = replica->ipaddr; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (data->add_host) { return snprintf(buffer, max, " --add-host=%s-%d:%s", data->prefix, replica->offset, replica->ipaddr); } case PE__CONTAINER_AGENT_RKT: return snprintf(buffer, max, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); default: // PE__CONTAINER_AGENT_UNKNOWN break; } return 0; } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(pe__bundle_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->nreplicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->nreplicas_per_host = 1; /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */ } return TRUE; } return FALSE; } static bool create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, pe_working_set_t *data_set) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr); if(data->host_network) { crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network); } if(data->host_netmask) { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", data->host_netmask); } else { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) { return FALSE; } parent->children = g_list_append(parent->children, replica->ip); } return TRUE; } static bool create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, pe_working_set_t *data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset); crm_xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", PE__CONTAINER_AGENT_DOCKER_S); free(id); xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE); offset += snprintf(buffer+offset, max-offset, " --restart=no"); /* Set a container hostname only if we have an IP to map it to. * The user can set -h or --uts=host themselves if they want a nicer * name for logs, but this makes applications happy who need their * hostname to match the IP they bind to. */ if (data->ip_range_start != NULL) { offset += snprintf(buffer+offset, max-offset, " -h %s-%d", data->prefix, replica->offset); } offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1"); if (data->container_network) { #if 0 offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", replica->ipaddr); #endif offset += snprintf(buffer+offset, max-offset, " --net=%s", data->container_network); } if(data->control_port) { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port); } else { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { pe__bundle_mount_t *mount = pIter->data; if (is_set(mount->flags, pe__bundle_mount_subdir)) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, replica->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target); } if(mount->options) { offset += snprintf(buffer+offset, max-offset, ":%s", mount->options); } } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { pe__bundle_port_t *port = pIter->data; if (replica->ipaddr) { offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s", replica->ipaddr, port->source, port->target); } else if(safe_str_neq(data->container_network, "host")) { // No need to do port mapping if net=host offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target); } } if (data->launcher_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->launcher_options); } if (data->container_host_options) { offset += snprintf(buffer + offset, max - offset, " %s", data->container_host_options); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer); free(buffer); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer); free(dbuffer); if (replica->child) { if (data->container_command) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/pacemaker-remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independently */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * crm_create_nvpair_xml(xml_obj, NULL, * "run_cmd", * "/usr/libexec/pacemaker/pacemaker-execd"); * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { if (data->container_command) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_container, "operations"); crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (!common_unpack(xml_container, &replica->container, parent, data_set)) { return FALSE; } parent->children = g_list_append(parent->children, replica->container); return TRUE; } static bool create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, pe_working_set_t *data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset); crm_xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", PE__CONTAINER_AGENT_PODMAN_S); free(id); xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE); // FIXME: (bandini 2018-08) podman has no restart policies //offset += snprintf(buffer+offset, max-offset, " --restart=no"); /* Set a container hostname only if we have an IP to map it to. * The user can set -h or --uts=host themselves if they want a nicer * name for logs, but this makes applications happy who need their * hostname to match the IP they bind to. */ if (data->ip_range_start != NULL) { offset += snprintf(buffer+offset, max-offset, " -h %s-%d", data->prefix, replica->offset); } offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1"); if (data->container_network) { #if 0 // podman has no support for --link-local-ip offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", replica->ipaddr); #endif offset += snprintf(buffer+offset, max-offset, " --net=%s", data->container_network); } if(data->control_port) { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port); } else { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { pe__bundle_mount_t *mount = pIter->data; if (is_set(mount->flags, pe__bundle_mount_subdir)) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, replica->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target); } if(mount->options) { offset += snprintf(buffer+offset, max-offset, ":%s", mount->options); } } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { pe__bundle_port_t *port = pIter->data; if (replica->ipaddr) { offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s", replica->ipaddr, port->source, port->target); } else if(safe_str_neq(data->container_network, "host")) { // No need to do port mapping if net=host offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target); } } if (data->launcher_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->launcher_options); } if (data->container_host_options) { offset += snprintf(buffer + offset, max - offset, " %s", data->container_host_options); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer); free(buffer); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer); free(dbuffer); if (replica->child) { if (data->container_command) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/pacemaker-remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independently */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * crm_create_nvpair_xml(xml_obj, NULL, * "run_cmd", * "/usr/libexec/pacemaker/pacemaker-execd"); * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { if (data->container_command) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_container, "operations"); crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (!common_unpack(xml_container, &replica->container, parent, data_set)) { return FALSE; } parent->children = g_list_append(parent->children, replica->container); return TRUE; } static bool create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, pe_working_set_t *data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; int volid = 0; id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset); crm_xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", PE__CONTAINER_AGENT_RKT_S); free(id); xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true"); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false"); crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false"); /* Set a container hostname only if we have an IP to map it to. * The user can set -h or --uts=host themselves if they want a nicer * name for logs, but this makes applications happy who need their * hostname to match the IP they bind to. */ if (data->ip_range_start != NULL) { offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d", data->prefix, replica->offset); } offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1"); if (data->container_network) { #if 0 offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", replica->ipaddr); #endif offset += snprintf(buffer+offset, max-offset, " --net=%s", data->container_network); } if(data->control_port) { offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port); } else { offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { pe__bundle_mount_t *mount = pIter->data; if (is_set(mount->flags, pe__bundle_mount_subdir)) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, replica->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source); if(mount->options) { offset += snprintf(buffer+offset, max-offset, ",%s", mount->options); } offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source); if(mount->options) { offset += snprintf(buffer+offset, max-offset, ",%s", mount->options); } offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target); } volid++; } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { pe__bundle_port_t *port = pIter->data; if (replica->ipaddr) { offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s", port->target, replica->ipaddr, port->source); } else { offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source); } } if (data->launcher_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->launcher_options); } if (data->container_host_options) { offset += snprintf(buffer + offset, max - offset, " %s", data->container_host_options); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer); free(buffer); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer); free(dbuffer); if (replica->child) { if (data->container_command) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/pacemaker-remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independently */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * crm_create_nvpair_xml(xml_obj, NULL, * "run_cmd", * "/usr/libexec/pacemaker/pacemaker-execd"); * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { if (data->container_command) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_container, "operations"); crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (!common_unpack(xml_container, &replica->container, parent, data_set)) { return FALSE; } parent->children = g_list_append(parent->children, replica->container); return TRUE; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname); if (match) { ((pe_node_t *) match)->weight = -INFINITY; ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never; } if (rsc->children) { GListPtr child; for (child = rsc->children; child != NULL; child = child->next) { disallow_node((resource_t *) (child->data), uname); } } } static bool create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, pe_working_set_t *data_set) { if (replica->child && valid_network(data)) { GHashTableIter gIter; GListPtr rsc_iter = NULL; node_t *node = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset); char *port_s = NULL; const char *uname = NULL; const char *connect_name = NULL; if (remote_id_conflict(id, data_set)) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", replica->child->id, replica->offset); CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE); } /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the * connection does not have its own IP is a magic string that we use to * support nested remotes (i.e. a bundle running on a remote node). */ connect_name = (replica->ipaddr? replica->ipaddr : "#uname"); if (data->control_port == NULL) { port_s = crm_itoa(DEFAULT_REMOTE_PORT); } /* This sets replica->container as replica->remote's container, which is * similar to what happens with guest nodes. This is how the scheduler * knows that the bundle node is fenced by recovering the container, and * that remote should be ordered relative to the container. */ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id, NULL, NULL, NULL, connect_name, (data->control_port? data->control_port : port_s)); free(port_s); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during data set cleanup to use as * the node ID and uname. */ free(id); id = NULL; uname = ID(xml_remote); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pe_find_node(data_set->nodes, uname); if (node == NULL) { node = pe_create_node(uname, uname, "remote", "-INFINITY", data_set); } else { node->weight = -INFINITY; } node->rsc_discover_mode = pe_discover_never; /* unpack_remote_nodes() ensures that each remote node and guest node * has a pe_node_t entry. Ideally, it would do the same for bundle nodes. * Unfortunately, a bundle has to be mostly unpacked before it's obvious * what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when common_unpack() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) { disallow_node((resource_t *) (rsc_iter->data), uname); } replica->node = node_copy(node); replica->node->weight = 500; replica->node->rsc_discover_mode = pe_discover_exclusive; /* Ensure the node shows up as allowed and with the correct discovery set */ if (replica->child->allowed_nodes != NULL) { g_hash_table_destroy(replica->child->allowed_nodes); } replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); g_hash_table_insert(replica->child->allowed_nodes, (gpointer) replica->node->details->id, node_copy(replica->node)); { node_t *copy = node_copy(replica->node); copy->weight = -INFINITY; g_hash_table_insert(replica->child->parent->allowed_nodes, (gpointer) replica->node->details->id, copy); } if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) { return FALSE; } g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if (pe__is_guest_or_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -INFINITY; } } replica->node->details->remote_rsc = replica->remote; // Ensure pe__is_guest_node() functions correctly immediately replica->remote->container = replica->container; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ g_hash_table_insert(replica->node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); /* One effect of this is that setup_container() will add * replica->remote to replica->container's fillers, which will make * pe__resource_contains_guest_node() true for replica->container. * * replica->child does NOT get added to replica->container's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking replica->container's migration * threshold. */ parent->children = g_list_append(parent->children, replica->remote); } return TRUE; } static bool create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, pe_working_set_t *data_set) { switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: if (!create_docker_resource(parent, data, replica, data_set)) { return FALSE; } break; case PE__CONTAINER_AGENT_PODMAN: if (!create_podman_resource(parent, data, replica, data_set)) { return FALSE; } break; case PE__CONTAINER_AGENT_RKT: if (!create_rkt_resource(parent, data, replica, data_set)) { return FALSE; } break; default: // PE__CONTAINER_AGENT_UNKNOWN return FALSE; } if (create_ip_resource(parent, data, replica, data_set) == FALSE) { return FALSE; } if(create_remote_resource(parent, data, replica, data_set) == FALSE) { return FALSE; } if (replica->child && replica->ipaddr) { add_hash_param(replica->child->meta, "external-ip", replica->ipaddr); } if (replica->remote) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the container is active. * * This makes it possible to have Pacemaker Remote nodes running * containers with pacemaker-remoted inside in order to start * services inside those containers. */ set_bit(replica->remote->flags, pe_rsc_allow_remote_remotes); } return TRUE; } static void mount_add(pe__bundle_variant_data_t *bundle_data, const char *source, const char *target, const char *options, uint32_t flags) { pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t)); mount->source = strdup(source); mount->target = strdup(target); if (options) { mount->options = strdup(options); } mount->flags = flags; bundle_data->mounts = g_list_append(bundle_data->mounts, mount); } static void mount_free(pe__bundle_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(pe__bundle_port_t *port) { free(port->source); free(port->target); free(port); } static pe__bundle_replica_t * replica_for_remote(pe_resource_t *remote) { resource_t *top = remote; pe__bundle_variant_data_t *bundle_data = NULL; if (top == NULL) { return NULL; } while (top->parent != NULL) { top = top->parent; } get_bundle_variant_data(bundle_data, top); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->remote == remote) { return replica; } } CRM_LOG_ASSERT(FALSE); return NULL; } bool pe__bundle_needs_remote_name(pe_resource_t *rsc) { const char *value; if (rsc == NULL) { return FALSE; } value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR); if (safe_str_eq(value, "#uname") == FALSE) { return FALSE; } else { const char *match[3][2] = { { XML_ATTR_TYPE, "remote" }, { XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF }, { XML_AGENT_ATTR_PROVIDER, "pacemaker" }, }; for (int m = 0; m < 3; m++) { value = crm_element_value(rsc->xml, match[m][0]); if (safe_str_neq(value, match[m][1])) { return FALSE; } } } return TRUE; } const char * pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside pe_node_t *node = NULL; pe__bundle_replica_t *replica = NULL; if (!pe__bundle_needs_remote_name(rsc)) { return NULL; } replica = replica_for_remote(rsc); if (replica == NULL) { return NULL; } node = replica->container->allocated_to; if (node == NULL) { /* If it won't be running anywhere after the * transition, go with where it's running now. */ node = pe__current_node(replica->container); } if(node == NULL) { crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } crm_trace("Setting address for bundle connection %s to bundle host %s", rsc->id, node->details->uname); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->details->uname); } return node->details->uname; } gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; pe__bundle_variant_data_t *bundle_data = NULL; bool need_log_mount = TRUE; CRM_ASSERT(rsc != NULL); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t)); rsc->variant_opaque = bundle_data; bundle_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER; } else { xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_RKT; } else { xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN; } else { return FALSE; } } } value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX); if (value == NULL) { // @COMPAT deprecated since 2.0.0 value = crm_element_value(xml_obj, "masters"); } bundle_data->promoted_max = crm_parse_int(value, "0"); if (bundle_data->promoted_max < 0) { pe_err("%s for %s must be nonnegative integer, using 0", XML_RSC_ATTR_PROMOTED_MAX, rsc->id); bundle_data->promoted_max = 0; } value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && bundle_data->promoted_max) { bundle_data->nreplicas = bundle_data->promoted_max; } else { bundle_data->nreplicas = crm_parse_int(value, "1"); } if (bundle_data->nreplicas < 1) { pe_err("'replicas' for %s must be positive integer, using 1", rsc->id); bundle_data->nreplicas = 1; } /* * Communication between containers on the same host via the * floating IPs only works if the container is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); bundle_data->nreplicas_per_host = crm_parse_int(value, "1"); if (bundle_data->nreplicas_per_host < 1) { pe_err("'replicas-per-host' for %s must be positive integer, using 1", rsc->id); bundle_data->nreplicas_per_host = 1; } if (bundle_data->nreplicas_per_host == 1) { clear_bit(rsc->flags, pe_rsc_unique); } bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command"); bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options"); bundle_data->image = crm_element_value_copy(xml_obj, "image"); bundle_data->container_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port"); value = crm_element_value(xml_obj, "add-host"); if (check_boolean(value) == FALSE) { bundle_data->add_host = TRUE; } else { crm_str_to_boolean(value, &bundle_data->add_host); } for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } bundle_data->ports = g_list_append(bundle_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { const char *source = crm_element_value(xml_child, "source-dir"); const char *target = crm_element_value(xml_child, "target-dir"); const char *options = crm_element_value(xml_child, "options"); int flags = pe__bundle_mount_none; if (source == NULL) { source = crm_element_value(xml_child, "source-dir-root"); set_bit(flags, pe__bundle_mount_subdir); } if (source && target) { mount_add(bundle_data, source, target, options, flags); if (strcmp(target, "/var/log") == 0) { need_log_mount = FALSE; } } else { pe_err("Invalid mount directive %s", ID(xml_child)); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(bundle_data)) { char *value = NULL; xmlNode *xml_set = NULL; xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); /* @COMPAT We no longer use the tag, but we need to keep it as * part of the resource name, so that bundles don't restart in a rolling * upgrade. (It also avoids needing to change regression tests.) */ crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix, (bundle_data->promoted_max? "master" : (const char *)xml_resource->name)); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE); value = crm_itoa(bundle_data->nreplicas); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = crm_itoa(bundle_data->nreplicas_per_host); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE, (bundle_data->nreplicas_per_host > 1)? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE); if (bundle_data->promoted_max) { crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE); value = crm_itoa(bundle_data->promoted_max); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_PROMOTED_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GListPtr childIter = NULL; resource_t *new_rsc = NULL; pe__bundle_port_t *port = NULL; int offset = 0, max = 1024; char *buffer = NULL; if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", ID(rsc->xml)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } return FALSE; } bundle_data->child = new_rsc; /* Currently, we always map the default authentication key location * into the same location inside the container. * * Ideally, we would respect the host's PCMK_authkey_location, but: * - it may be different on different nodes; * - the actual connection will do extra checking to make sure the key * file exists and is readable, that we can't do here on the DC * - tools such as crm_resource and crm_simulate may not have the same * environment variables as the cluster, causing operation digests to * differ * * Always using the default location inside the container is fine, * because we control the pacemaker_remote environment, and it avoids * having to pass another environment variable to the container. * * @TODO A better solution may be to have only pacemaker_remote use the * environment variable, and have the cluster nodes use a new * cluster option for key location. This would introduce the limitation * of the location being the same on all cluster nodes, but that's * reasonable. */ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION, DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none); if (need_log_mount) { mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, pe__bundle_mount_subdir); } port = calloc(1, sizeof(pe__bundle_port_t)); if(bundle_data->control_port) { port->source = strdup(bundle_data->control_port); } else { /* If we wanted to respect PCMK_remote_port, we could use * crm_default_remote_port() here and elsewhere in this file instead * of DEFAULT_REMOTE_PORT. * * However, it gains nothing, since we control both the container * environment and the connection resource parameters, and the user * can use a different port if desired by setting control-port. */ port->source = crm_itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); bundle_data->ports = g_list_append(bundle_data->ports, port); buffer = calloc(1, max+1); for (childIter = bundle_data->child->children; childIter != NULL; childIter = childIter->next) { pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t)); replica->child = childIter->data; replica->child->exclusive_discover = TRUE; replica->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if (is_set(replica->child->flags, pe_rsc_notify)) { set_bit(bundle_data->child->flags, pe_rsc_notify); } offset += allocate_ip(bundle_data, replica, buffer+offset, max-offset); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta, XML_RSC_ATTR_TARGET); } bundle_data->container_host_options = buffer; if (bundle_data->attribute_target) { g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(bundle_data->attribute_target)); g_hash_table_replace(bundle_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(bundle_data->attribute_target)); } } else { // Just a naked container, no pacemaker-remote int offset = 0, max = 1024; char *buffer = calloc(1, max+1); for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) { pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t)); replica->offset = lpc; offset += allocate_ip(bundle_data, replica, buffer+offset, max-offset); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); } bundle_data->container_host_options = buffer; } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (!create_container(rsc, bundle_data, replica, data_set)) { pe_err("Failed unpacking resource %s", rsc->id); rsc->fns->free(rsc); return FALSE; } } if (bundle_data->child) { rsc->children = g_list_append(rsc->children, bundle_data->child); } return TRUE; } static int replica_resource_active(pe_resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all) { pe__bundle_variant_data_t *bundle_data = NULL; GListPtr iter = NULL; get_bundle_variant_data(bundle_data, rsc); for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pe__bundle_replica_t *replica = iter->data; int rsc_active; rsc_active = replica_resource_active(replica->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->container, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } /*! * \internal * \brief Find the bundle replica corresponding to a given node * * \param[in] bundle Top-level bundle resource * \param[in] node Node to search for * * \return Bundle replica if found, NULL otherwise */ pe_resource_t * pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_ASSERT(bundle && node); get_bundle_variant_data(bundle_data, bundle); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica && replica->node); if (replica->node->details == node->details) { return replica->child; } } return NULL; } static void print_rsc_in_list(resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } static const char* container_agent_str(enum pe__container_agent t) { switch (t) { case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S; case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S; case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S; default: // PE__CONTAINER_AGENT_UNKNOWN break; } return PE__CONTAINER_AGENT_UNKNOWN_S; } static void bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_concat(pre_text, " ", ' '); get_bundle_variant_data(bundle_data, rsc); status_print("%sid); status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type)); status_print("image=\"%s\" ", bundle_data->image); status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print(">\n"); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); status_print("%s \n", pre_text, replica->offset); print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } int pe__bundle_xml(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = 0; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6 , "id", rsc->id , "type", container_agent_str(bundle_data->agent_type) , "image", bundle_data->image , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique)) , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed)) , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))); CRM_ASSERT(rc == 0); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; char *id = crm_itoa(replica->offset); CRM_ASSERT(replica); rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id); free(id); CRM_ASSERT(rc == 0); if (replica->ip != NULL) { out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip); } if (replica->child != NULL) { out->message(out, crm_map_element_name(replica->child->xml), options, replica->child); } out->message(out, crm_map_element_name(replica->container->xml), options, replica->container); if (replica->remote != NULL) { out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote); } pcmk__output_xml_pop_parent(out); // replica } pcmk__output_xml_pop_parent(out); // bundle return rc; } static void pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica, long options) { node_t *node = NULL; pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } node = pe__current_node(replica->container); pe__common_output_html(out, rsc, buffer, node, options); } int pe__bundle_html(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe__bundle_variant_data_t *bundle_data = NULL; char buffer[LINE_MAX]; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); pcmk__output_create_xml_node(out, "br"); out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); pcmk__output_xml_create_parent(out, "li"); if (is_set(options, pe_print_implicit)) { - if(g_list_length(bundle_data->replicas) > 1) { + if (pcmk__list_of_multiple(bundle_data->replicas)) { snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset); xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer); } pcmk__output_create_xml_node(out, "br"); out->begin_list(out, NULL, NULL, NULL); if (replica->ip != NULL) { out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip); } if (replica->child != NULL) { out->message(out, crm_map_element_name(replica->child->xml), options, replica->child); } out->message(out, crm_map_element_name(replica->container->xml), options, replica->container); if (replica->remote != NULL) { out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote); } out->end_list(out); } else { pe__bundle_replica_output_html(out, replica, options); } pcmk__output_xml_pop_parent(out); } out->end_list(out); return 0; } static void pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica, long options) { node_t *node = NULL; pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } node = pe__current_node(replica->container); pe__common_output_text(out, rsc, buffer, node, options); } int pe__bundle_text(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe__bundle_variant_data_t *bundle_data = NULL; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (is_set(options, pe_print_implicit)) { - if(g_list_length(bundle_data->replicas) > 1) { + if (pcmk__list_of_multiple(bundle_data->replicas)) { out->list_item(out, NULL, "Replica[%d]", replica->offset); } out->begin_list(out, NULL, NULL, NULL); if (replica->ip != NULL) { out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip); } if (replica->child != NULL) { out->message(out, crm_map_element_name(replica->child->xml), options, replica->child); } out->message(out, crm_map_element_name(replica->container->xml), options, replica->container); if (replica->remote != NULL) { out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote); } out->end_list(out); } else { pe__bundle_replica_output_text(out, replica, options); } } out->end_list(out); return 0; } static void print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text, long options, void *print_data) { node_t *node = NULL; pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } node = pe__current_node(replica->container); common_print(rsc, pre_text, buffer, node, options, print_data); } void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { bundle_print_xml(rsc, pre_text, options, print_data); return; } get_bundle_variant_data(bundle_data, rsc); if (pre_text == NULL) { pre_text = " "; } status_print("%sContainer bundle%s: %s [%s]%s%s\n", pre_text, ((bundle_data->nreplicas > 1)? " set" : ""), rsc->id, bundle_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (options & pe_print_html) { status_print("
    • "); } if (is_set(options, pe_print_implicit)) { child_text = crm_strdup_printf(" %s", pre_text); - if(g_list_length(bundle_data->replicas) > 1) { + if (pcmk__list_of_multiple(bundle_data->replicas)) { status_print(" %sReplica[%d]\n", pre_text, replica->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); print_bundle_replica(replica, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } static void free_bundle_replica(pe__bundle_replica_t *replica) { if (replica == NULL) { return; } if (replica->node) { free(replica->node); replica->node = NULL; } if (replica->ip) { free_xml(replica->ip->xml); replica->ip->xml = NULL; replica->ip->fns->free(replica->ip); replica->ip = NULL; } if (replica->container) { free_xml(replica->container->xml); replica->container->xml = NULL; replica->container->fns->free(replica->container); replica->container = NULL; } if (replica->remote) { free_xml(replica->remote->xml); replica->remote->xml = NULL; replica->remote->fns->free(replica->remote); replica->remote = NULL; } free(replica->ipaddr); free(replica); } void pe__free_bundle(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(bundle_data->prefix); free(bundle_data->image); free(bundle_data->control_port); free(bundle_data->host_network); free(bundle_data->host_netmask); free(bundle_data->ip_range_start); free(bundle_data->container_network); free(bundle_data->launcher_options); free(bundle_data->container_command); free(bundle_data->container_host_options); g_list_free_full(bundle_data->replicas, (GDestroyNotify) free_bundle_replica); g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); if(bundle_data->child) { free_xml(bundle_data->child->xml); bundle_data->child->xml = NULL; bundle_data->child->fns->free(bundle_data->child); } common_free(rsc); } enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current) { enum rsc_role_e container_role = RSC_ROLE_UNKNOWN; return container_role; } /*! * \brief Get the number of configured replicas in a bundle * * \param[in] rsc Bundle resource * * \return Number of configured replicas, or 0 on error */ int pe_bundle_replicas(const resource_t *rsc) { if ((rsc == NULL) || (rsc->variant != pe_container)) { return 0; } else { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); return bundle_data->nreplicas; } } void pe__count_bundle(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); for (GList *item = bundle_data->replicas; item != NULL; item = item->next) { pe__bundle_replica_t *replica = item->data; if (replica->ip) { replica->ip->fns->count(replica->ip); } if (replica->child) { replica->child->fns->count(replica->child); } if (replica->container) { replica->container->fns->count(replica->container); } if (replica->remote) { replica->remote->fns->count(replica->remote); } } } diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c index 3d093f7fb3..452b5ad585 100644 --- a/lib/pengine/clone.c +++ b/lib/pengine/clone.c @@ -1,1043 +1,1043 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #define VARIANT_CLONE 1 #include "./variant.h" void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid, pe_working_set_t *data_set) { if (pe_rsc_is_clone(rsc)) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources " "such as %s can be used only as anonymous clones", rsc->id, standard, rid); clone_data->clone_node_max = 1; clone_data->clone_max = QB_MIN(clone_data->clone_max, g_list_length(data_set->nodes)); } } resource_t * find_clone_instance(resource_t * rsc, const char *sub_id, pe_working_set_t * data_set) { char *child_id = NULL; resource_t *child = NULL; const char *child_base = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); child_base = ID(clone_data->xml_obj_child); child_id = crm_concat(child_base, sub_id, ':'); child = pe_find_resource(rsc->children, child_id); free(child_id); return child; } pe_resource_t * pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set) { gboolean as_orphan = FALSE; char *inc_num = NULL; char *inc_max = NULL; resource_t *child_rsc = NULL; xmlNode *child_copy = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE); if (clone_data->total_clones >= clone_data->clone_max) { // If we've already used all available instances, this is an orphan as_orphan = TRUE; } // Allocate instance numbers in numerical order (starting at 0) inc_num = crm_itoa(clone_data->total_clones); inc_max = crm_itoa(clone_data->clone_max); child_copy = copy_xml(clone_data->xml_obj_child); crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num); if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID)); child_rsc = NULL; goto bail; } /* child_rsc->globally_unique = rsc->globally_unique; */ CRM_ASSERT(child_rsc); clone_data->total_clones += 1; pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id); rsc->children = g_list_append(rsc->children, child_rsc); if (as_orphan) { set_bit_recursive(child_rsc, pe_rsc_orphan); } add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max); pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id); bail: free(inc_num); free(inc_max); return child_rsc; } gboolean clone_unpack(resource_t * rsc, pe_working_set_t * data_set) { int lpc = 0; xmlNode *a_child = NULL; xmlNode *xml_obj = rsc->xml; clone_variant_data_t *clone_data = NULL; const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED); const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX); const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); clone_data = calloc(1, sizeof(clone_variant_data_t)); rsc->variant_opaque = clone_data; if (is_set(rsc->flags, pe_rsc_promotable)) { const char *promoted_max = NULL; const char *promoted_node_max = NULL; promoted_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTED_MAX); if (promoted_max == NULL) { // @COMPAT deprecated since 2.0.0 promoted_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_MASTER_MAX); } promoted_node_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTED_NODEMAX); if (promoted_node_max == NULL) { // @COMPAT deprecated since 2.0.0 promoted_node_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_MASTER_NODEMAX); } clone_data->promoted_max = crm_parse_int(promoted_max, "1"); clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1"); } // Implied by calloc() /* clone_data->xml_obj_child = NULL; */ clone_data->clone_node_max = crm_parse_int(max_clones_node, "1"); if (max_clones) { clone_data->clone_max = crm_parse_int(max_clones, "1"); - } else if (g_list_length(data_set->nodes) > 0) { + } else if (pcmk__list_of_multiple(data_set->nodes)) { clone_data->clone_max = g_list_length(data_set->nodes); } else { clone_data->clone_max = 1; /* Handy during crm_verify */ } clone_data->ordered = crm_is_true(ordered); if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) { crm_config_err("Anonymous clones (%s) may only support one copy per node", rsc->id); clone_data->clone_node_max = 1; } pe_rsc_trace(rsc, "Options for %s", rsc->id); pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max); pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max); pe_rsc_trace(rsc, "\tClone is unique: %s", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); pe_rsc_trace(rsc, "\tClone is promotable: %s", is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false"); // Clones may contain a single group or primitive for (a_child = __xml_first_child_element(xml_obj); a_child != NULL; a_child = __xml_next_element(a_child)) { if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE) || crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) { clone_data->xml_obj_child = a_child; break; } } if (clone_data->xml_obj_child == NULL) { crm_config_err("%s has nothing to clone", rsc->id); return FALSE; } /* * Make clones ever so slightly sticky by default * * This helps ensure clone instances are not shuffled around the cluster * for no benefit in situations when pre-allocation is not appropriate */ if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) { add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1"); } /* This ensures that the globally-unique value always exists for children to * inherit when being unpacked, as well as in resource agents' environment. */ add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE); if (clone_data->clone_max <= 0) { /* Create one child instance so that unpack_find_resource() will hook up * any orphans up to the parent correctly. */ if (pe__create_clone_child(rsc, data_set) == NULL) { return FALSE; } } else { // Create a child instance for each available instance number for (lpc = 0; lpc < clone_data->clone_max; lpc++) { if (pe__create_clone_child(rsc, data_set) == NULL) { return FALSE; } } } pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id); return TRUE; } gboolean clone_active(resource_t * rsc, gboolean all) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; gboolean child_active = child_rsc->fns->active(child_rsc, all); if (all == FALSE && child_active) { return TRUE; } else if (all && child_active == FALSE) { return FALSE; } } if (all) { return TRUE; } else { return FALSE; } } static void short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data) { if(suffix == NULL) { suffix = ""; } if (list) { if (options & pe_print_html) { status_print("
  • "); } status_print("%s%s: [%s ]%s", prefix, type, list, suffix); if (options & pe_print_html) { status_print("
  • \n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } } } static const char * configured_role_str(resource_t * rsc) { const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if ((target_role == NULL) && rsc->children && rsc->children->data) { target_role = g_hash_table_lookup(((resource_t*)rsc->children->data)->meta, XML_RSC_ATTR_TARGET_ROLE); } return target_role; } static enum rsc_role_e configured_role(resource_t * rsc) { const char *target_role = configured_role_str(rsc); if (target_role) { return text2role(target_role); } return RSC_ROLE_UNKNOWN; } static void clone_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { char *child_text = crm_concat(pre_text, " ", ' '); const char *target_role = configured_role_str(rsc); GListPtr gIter = rsc->children; status_print("%sid); status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false"); status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print("failure_ignored=\"%s\" ", is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false"); if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->fns->print(child_rsc, child_text, options, print_data); } status_print("%s\n", pre_text); free(child_text); } bool is_set_recursive(resource_t * rsc, long long flag, bool any) { GListPtr gIter; bool all = !any; if(is_set(rsc->flags, flag)) { if(any) { return TRUE; } } else if(all) { return FALSE; } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { if(is_set_recursive(gIter->data, flag, any)) { if(any) { return TRUE; } } else if(all) { return FALSE; } } if(all) { return TRUE; } return FALSE; } void clone_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { char *list_text = NULL; char *child_text = NULL; char *stopped_list = NULL; GListPtr master_list = NULL; GListPtr started_list = NULL; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; int active_instances = 0; if (pre_text == NULL) { pre_text = " "; } if (options & pe_print_xml) { clone_print_xml(rsc, pre_text, options, print_data); return; } get_clone_variant_data(clone_data, rsc); child_text = crm_concat(pre_text, " ", ' '); status_print("%sClone Set: %s [%s]%s%s%s", pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child), is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "", is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("\n
      \n"); } else if ((options & pe_print_log) == 0) { status_print("\n"); } for (; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; resource_t *child_rsc = (resource_t *) gIter->data; gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE); if (options & pe_print_clone_details) { print_full = TRUE; } if (is_set(rsc->flags, pe_rsc_unique)) { // Print individual instance when unique (except stopped orphans) if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) { print_full = TRUE; } // Everything else in this block is for anonymous clones } else if (is_set(options, pe_print_pending) && (child_rsc->pending_task != NULL) && strcmp(child_rsc->pending_task, "probe")) { // Print individual instance when non-probe action is pending print_full = TRUE; } else if (partially_active == FALSE) { // List stopped instances when requested (except orphans) if (is_not_set(child_rsc->flags, pe_rsc_orphan) && is_not_set(options, pe_print_clone_active)) { stopped_list = add_list_element(stopped_list, child_rsc->id); } } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) { // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->fns->active(child_rsc, TRUE)) { // Instance of fully active anonymous clone node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE); if (location) { // Instance is active on a single node enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > RSC_ROLE_SLAVE) { master_list = g_list_append(master_list, location); } else { started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { if (options & pe_print_html) { status_print("
    • \n"); } child_rsc->fns->print(child_rsc, child_text, options, print_data); if (options & pe_print_html) { status_print("
    • \n"); } } } /* Masters */ master_list = g_list_sort(master_list, sort_node_uname); for (gIter = master_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } short_print(list_text, child_text, "Masters", NULL, options, print_data); g_list_free(master_list); free(list_text); list_text = NULL; /* Started/Slaves */ started_list = g_list_sort(started_list, sort_node_uname); for (gIter = started_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } if (is_set(rsc->flags, pe_rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if(role == RSC_ROLE_SLAVE) { short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data); } else { short_print(list_text, child_text, "Slaves", NULL, options, print_data); } } else { short_print(list_text, child_text, "Started", NULL, options, print_data); } g_list_free(started_list); free(list_text); list_text = NULL; if (is_not_set(options, pe_print_clone_active)) { const char *state = "Stopped"; enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_STOPPED) { state = "Stopped (disabled)"; } if (is_not_set(rsc->flags, pe_rsc_unique) && (clone_data->clone_max > active_instances)) { GListPtr nIter; GListPtr list = g_hash_table_get_values(rsc->allowed_nodes); /* Custom stopped list for non-unique clones */ free(stopped_list); stopped_list = NULL; - if (g_list_length(list) == 0) { + if (list == NULL) { /* Clusters with symmetrical=false haven't calculated allowed_nodes yet * If we've not probed for them yet, the Stopped list will be empty */ list = g_hash_table_get_values(rsc->known_on); } list = g_list_sort(list, sort_node_uname); for (nIter = list; nIter != NULL; nIter = nIter->next) { node_t *node = (node_t *)nIter->data; if (pe_find_node(rsc->running_on, node->details->uname) == NULL) { stopped_list = add_list_element(stopped_list, node->details->uname); } } g_list_free(list); } short_print(stopped_list, child_text, state, NULL, options, print_data); free(stopped_list); } if (options & pe_print_html) { status_print("
    \n"); } free(child_text); } int pe__clone_xml(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GListPtr gIter = rsc->children; int rc = pe__name_and_nvpairs_xml(out, true, "clone", 7 , "id", rsc->id , "multi_state", BOOL2STR(is_set(rsc->flags, pe_rsc_promotable)) , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique)) , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed)) , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)) , "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored)) , "target_role", configured_role_str(rsc)); CRM_ASSERT(rc == 0); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc); } pcmk__output_xml_pop_parent(out); return rc; } int pe__clone_html(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); char *list_text = NULL; char *stopped_list = NULL; GListPtr master_list = NULL; GListPtr started_list = NULL; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; int active_instances = 0; get_clone_variant_data(clone_data, rsc); out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s", rsc->id, ID(clone_data->xml_obj_child), is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "", is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); for (; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE); if (options & pe_print_clone_details) { print_full = TRUE; } if (is_set(rsc->flags, pe_rsc_unique)) { // Print individual instance when unique (except stopped orphans) if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) { print_full = TRUE; } // Everything else in this block is for anonymous clones } else if (is_set(options, pe_print_pending) && (child_rsc->pending_task != NULL) && strcmp(child_rsc->pending_task, "probe")) { // Print individual instance when non-probe action is pending print_full = TRUE; } else if (partially_active == FALSE) { // List stopped instances when requested (except orphans) if (is_not_set(child_rsc->flags, pe_rsc_orphan) && is_not_set(options, pe_print_clone_active)) { stopped_list = add_list_element(stopped_list, child_rsc->id); } } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) { // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->fns->active(child_rsc, TRUE)) { // Instance of fully active anonymous clone node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE); if (location) { // Instance is active on a single node enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > RSC_ROLE_SLAVE) { master_list = g_list_append(master_list, location); } else { started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc); } } /* Masters */ master_list = g_list_sort(master_list, sort_node_uname); for (gIter = master_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } if (list_text != NULL) { out->list_item(out, NULL, " Masters: [%s ]", list_text); g_list_free(master_list); free(list_text); list_text = NULL; } /* Started/Slaves */ started_list = g_list_sort(started_list, sort_node_uname); for (gIter = started_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } if (list_text != NULL) { if (is_set(rsc->flags, pe_rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if(role == RSC_ROLE_SLAVE) { out->list_item(out, NULL, " Slaves (target-role): [%s ]", list_text); } else { out->list_item(out, NULL, " Slaves: [%s ]", list_text); } } else { out->list_item(out, NULL, " Started: [%s ]", list_text); } g_list_free(started_list); free(list_text); list_text = NULL; } if (is_not_set(options, pe_print_clone_active)) { const char *state = "Stopped"; enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_STOPPED) { state = "Stopped (disabled)"; } if (is_not_set(rsc->flags, pe_rsc_unique) && (clone_data->clone_max > active_instances)) { GListPtr nIter; GListPtr list = g_hash_table_get_values(rsc->allowed_nodes); /* Custom stopped list for non-unique clones */ free(stopped_list); stopped_list = NULL; - if (g_list_length(list) == 0) { + if (list == NULL) { /* Clusters with symmetrical=false haven't calculated allowed_nodes yet * If we've not probed for them yet, the Stopped list will be empty */ list = g_hash_table_get_values(rsc->known_on); } list = g_list_sort(list, sort_node_uname); for (nIter = list; nIter != NULL; nIter = nIter->next) { node_t *node = (node_t *)nIter->data; if (pe_find_node(rsc->running_on, node->details->uname) == NULL) { stopped_list = add_list_element(stopped_list, node->details->uname); } } g_list_free(list); } if (stopped_list != NULL) { out->list_item(out, NULL, " %s: [%s ]", state, stopped_list); free(stopped_list); } } out->end_list(out); return 0; } int pe__clone_text(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); char *list_text = NULL; char *stopped_list = NULL; GListPtr master_list = NULL; GListPtr started_list = NULL; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; int active_instances = 0; get_clone_variant_data(clone_data, rsc); out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s", rsc->id, ID(clone_data->xml_obj_child), is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "", is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); for (; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE); if (options & pe_print_clone_details) { print_full = TRUE; } if (is_set(rsc->flags, pe_rsc_unique)) { // Print individual instance when unique (except stopped orphans) if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) { print_full = TRUE; } // Everything else in this block is for anonymous clones } else if (is_set(options, pe_print_pending) && (child_rsc->pending_task != NULL) && strcmp(child_rsc->pending_task, "probe")) { // Print individual instance when non-probe action is pending print_full = TRUE; } else if (partially_active == FALSE) { // List stopped instances when requested (except orphans) if (is_not_set(child_rsc->flags, pe_rsc_orphan) && is_not_set(options, pe_print_clone_active)) { stopped_list = add_list_element(stopped_list, child_rsc->id); } } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) { // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->fns->active(child_rsc, TRUE)) { // Instance of fully active anonymous clone node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE); if (location) { // Instance is active on a single node enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > RSC_ROLE_SLAVE) { master_list = g_list_append(master_list, location); } else { started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc); } } /* Masters */ master_list = g_list_sort(master_list, sort_node_uname); for (gIter = master_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } if (list_text != NULL) { out->list_item(out, "Masters", "[%s ]", list_text); g_list_free(master_list); free(list_text); list_text = NULL; } /* Started/Slaves */ started_list = g_list_sort(started_list, sort_node_uname); for (gIter = started_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } if (list_text != NULL) { if (is_set(rsc->flags, pe_rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if(role == RSC_ROLE_SLAVE) { out->list_item(out, "Slaves (target-role)", "[%s ]", list_text); } else { out->list_item(out, "Slaves", "[%s ]", list_text); } } else { out->list_item(out, "Started", "[%s ]", list_text); } g_list_free(started_list); free(list_text); list_text = NULL; } if (is_not_set(options, pe_print_clone_active)) { const char *state = "Stopped"; enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_STOPPED) { state = "Stopped (disabled)"; } if (is_not_set(rsc->flags, pe_rsc_unique) && (clone_data->clone_max > active_instances)) { GListPtr nIter; GListPtr list = g_hash_table_get_values(rsc->allowed_nodes); /* Custom stopped list for non-unique clones */ free(stopped_list); stopped_list = NULL; - if (g_list_length(list) == 0) { + if (list == NULL) { /* Clusters with symmetrical=false haven't calculated allowed_nodes yet * If we've not probed for them yet, the Stopped list will be empty */ list = g_hash_table_get_values(rsc->known_on); } list = g_list_sort(list, sort_node_uname); for (nIter = list; nIter != NULL; nIter = nIter->next) { node_t *node = (node_t *)nIter->data; if (pe_find_node(rsc->running_on, node->details->uname) == NULL) { stopped_list = add_list_element(stopped_list, node->details->uname); } } g_list_free(list); } if (stopped_list != NULL) { out->list_item(out, state, "[%s ]", stopped_list); free(stopped_list); } } out->end_list(out); return 0; } void clone_free(resource_t * rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; CRM_ASSERT(child_rsc); pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); free_xml(child_rsc->xml); child_rsc->xml = NULL; /* There could be a saved unexpanded xml */ free_xml(child_rsc->orig_xml); child_rsc->orig_xml = NULL; child_rsc->fns->free(child_rsc); } g_list_free(rsc->children); if (clone_data) { CRM_ASSERT(clone_data->demote_notify == NULL); CRM_ASSERT(clone_data->stop_notify == NULL); CRM_ASSERT(clone_data->start_notify == NULL); CRM_ASSERT(clone_data->promote_notify == NULL); } common_free(rsc); } enum rsc_role_e clone_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN; GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current); if (a_role > clone_role) { clone_role = a_role; } } pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role)); return clone_role; } /*! * \internal * \brief Check whether a clone has an instance for every node * * \param[in] rsc Clone to check * \param[in] data_set Cluster state */ bool pe__is_universal_clone(pe_resource_t *rsc, pe_working_set_t *data_set) { if (pe_rsc_is_clone(rsc)) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->clone_max == g_list_length(data_set->nodes)) { return TRUE; } } return FALSE; } diff --git a/lib/pengine/native.c b/lib/pengine/native.c index 8fd98bcc41..b0641154dc 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -1,1383 +1,1383 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #define VARIANT_NATIVE 1 #include "./variant.h" /*! * \internal * \brief Check whether a resource is active on multiple nodes */ static bool is_multiply_active(pe_resource_t *rsc) { unsigned int count = 0; if (rsc->variant == pe_native) { pe__find_active_requires(rsc, &count); } return count > 1; } void native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { GListPtr gIter = rsc->running_on; CRM_CHECK(node != NULL, return); for (; gIter != NULL; gIter = gIter->next) { node_t *a_node = (node_t *) gIter->data; CRM_CHECK(a_node != NULL, return); if (safe_str_eq(a_node->details->id, node->details->id)) { return; } } pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname, is_set(rsc->flags, pe_rsc_managed)?"":"(unmanaged)"); rsc->running_on = g_list_append(rsc->running_on, node); if (rsc->variant == pe_native) { node->details->running_rsc = g_list_append(node->details->running_rsc, rsc); } if (rsc->variant == pe_native && node->details->maintenance) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { resource_t *p = rsc->parent; pe_rsc_info(rsc, "resource %s isn't managed", rsc->id); resource_location(rsc, node, INFINITY, "not_managed_default", data_set); while(p && node->details->online) { /* add without the additional location constraint */ p->running_on = g_list_append(p->running_on, node); p = p->parent; } return; } if (is_multiply_active(rsc)) { switch (rsc->recovery_type) { case recovery_stop_only: { GHashTableIter gIter; node_t *local_node = NULL; /* make sure it doesn't come up again */ if (rsc->allowed_nodes != NULL) { g_hash_table_destroy(rsc->allowed_nodes); } rsc->allowed_nodes = node_hash_from_list(data_set->nodes); g_hash_table_iter_init(&gIter, rsc->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) { local_node->weight = -INFINITY; } } break; case recovery_stop_start: break; case recovery_block: clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); /* If the resource belongs to a group or bundle configured with * multiple-active=block, block the entire entity. */ if (rsc->parent && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container) && rsc->parent->recovery_type == recovery_block) { GListPtr gIter = rsc->parent->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; clear_bit(child->flags, pe_rsc_managed); set_bit(child->flags, pe_rsc_block); } } break; } crm_debug("%s is active on multiple nodes including %s: %s", rsc->id, node->details->uname, recovery2text(rsc->recovery_type)); } else { pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname); } if (rsc->parent != NULL) { native_add_running(rsc->parent, node, data_set); } } static void recursive_clear_unique(pe_resource_t *rsc) { clear_bit(rsc->flags, pe_rsc_unique); add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE); for (GList *child = rsc->children; child != NULL; child = child->next) { recursive_clear_unique((pe_resource_t *) child->data); } } gboolean native_unpack(resource_t * rsc, pe_working_set_t * data_set) { resource_t *parent = uber_parent(rsc); native_variant_data_t *native_data = NULL; const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); uint32_t ra_caps = pcmk_get_ra_caps(standard); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); native_data = calloc(1, sizeof(native_variant_data_t)); rsc->variant_opaque = native_data; // Only some agent standards support unique and promotable clones if (is_not_set(ra_caps, pcmk_ra_cap_unique) && is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) { /* @COMPAT We should probably reject this situation as an error (as we * do for promotable below) rather than warn and convert, but that would * be a backward-incompatible change that we should probably do with a * transform at a schema major version bump. */ pe__force_anon(standard, parent, rsc->id, data_set); /* Clear globally-unique on the parent and all its descendents unpacked * so far (clearing the parent should make any future children unpacking * correct). We have to clear this resource explicitly because it isn't * hooked into the parent's children yet. */ recursive_clear_unique(parent); recursive_clear_unique(rsc); } if (is_not_set(ra_caps, pcmk_ra_cap_promotable) && is_set(parent->flags, pe_rsc_promotable)) { pe_err("Resource %s is of type %s and therefore " "cannot be used as a promotable clone resource", rsc->id, standard); return FALSE; } return TRUE; } static bool rsc_is_on_node(resource_t *rsc, const node_t *node, int flags) { pe_rsc_trace(rsc, "Checking whether %s is on %s", rsc->id, node->details->uname); if (is_set(flags, pe_find_current) && rsc->running_on) { for (GListPtr iter = rsc->running_on; iter; iter = iter->next) { node_t *loc = (node_t *) iter->data; if (loc->details == node->details) { return TRUE; } } } else if (is_set(flags, pe_find_inactive) && (rsc->running_on == NULL)) { return TRUE; } else if (is_not_set(flags, pe_find_current) && rsc->allocated_to && (rsc->allocated_to->details == node->details)) { return TRUE; } return FALSE; } resource_t * native_find_rsc(resource_t * rsc, const char *id, const node_t *on_node, int flags) { bool match = FALSE; resource_t *result = NULL; CRM_CHECK(id && rsc && rsc->id, return NULL); if (flags & pe_find_clone) { const char *rid = ID(rsc->xml); if (!pe_rsc_is_clone(uber_parent(rsc))) { match = FALSE; } else if (!strcmp(id, rsc->id) || safe_str_eq(id, rid)) { match = TRUE; } } else if (!strcmp(id, rsc->id)) { match = TRUE; } else if (is_set(flags, pe_find_renamed) && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } else if (is_set(flags, pe_find_any) || (is_set(flags, pe_find_anon) && is_not_set(rsc->flags, pe_rsc_unique))) { match = pe_base_name_eq(rsc, id); } if (match && on_node) { bool match_node = rsc_is_on_node(rsc, on_node, flags); if (match_node == FALSE) { match = FALSE; } } if (match) { return rsc; } for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = rsc->fns->find_rsc(child, id, on_node, flags); if (result) { return result; } } return NULL; } char * native_parameter(resource_t * rsc, node_t * node, gboolean create, const char *name, pe_working_set_t * data_set) { char *value_copy = NULL; const char *value = NULL; GHashTable *hash = NULL; GHashTable *local_hash = NULL; CRM_CHECK(rsc != NULL, return NULL); CRM_CHECK(name != NULL && strlen(name) != 0, return NULL); pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id); if (create || g_hash_table_size(rsc->parameters) == 0) { if (node != NULL) { pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname); } else { pe_rsc_trace(rsc, "Creating default hash"); } local_hash = crm_str_table_new(); get_rsc_attributes(local_hash, rsc, node, data_set); hash = local_hash; } else { hash = rsc->parameters; } value = g_hash_table_lookup(hash, name); if (value == NULL) { /* try meta attributes instead */ value = g_hash_table_lookup(rsc->meta, name); } if (value != NULL) { value_copy = strdup(value); } if (local_hash != NULL) { g_hash_table_destroy(local_hash); } return value_copy; } gboolean native_active(resource_t * rsc, gboolean all) { GListPtr gIter = rsc->running_on; for (; gIter != NULL; gIter = gIter->next) { node_t *a_node = (node_t *) gIter->data; if (a_node->details->unclean) { crm_debug("Resource %s: node %s is unclean", rsc->id, a_node->details->uname); return TRUE; } else if (a_node->details->online == FALSE) { crm_debug("Resource %s: node %s is offline", rsc->id, a_node->details->uname); } else { crm_debug("Resource %s active on %s", rsc->id, a_node->details->uname); return TRUE; } } return FALSE; } struct print_data_s { long options; void *print_data; }; static void native_print_attr(gpointer key, gpointer value, gpointer user_data) { long options = ((struct print_data_s *)user_data)->options; void *print_data = ((struct print_data_s *)user_data)->print_data; status_print("Option: %s = %s\n", (char *)key, (char *)value); } static const char * native_pending_state(resource_t * rsc) { const char *pending_state = NULL; if (safe_str_eq(rsc->pending_task, CRMD_ACTION_START)) { pending_state = "Starting"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STOP)) { pending_state = "Stopping"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE)) { pending_state = "Migrating"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED)) { /* Work might be done in here. */ pending_state = "Migrating"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE)) { pending_state = "Promoting"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE)) { pending_state = "Demoting"; } return pending_state; } static const char * native_pending_task(resource_t * rsc) { const char *pending_task = NULL; if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STATUS)) { pending_task = "Monitoring"; /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, uncomment this and the corresponding part of * unpack.c:unpack_rsc_op(). */ /* } else if (safe_str_eq(rsc->pending_task, "probe")) { pending_task = "Checking"; */ } return pending_task; } static enum rsc_role_e native_displayable_role(resource_t *rsc) { enum rsc_role_e role = rsc->role; if ((role == RSC_ROLE_STARTED) && is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { role = RSC_ROLE_SLAVE; } return role; } static const char * native_displayable_state(resource_t *rsc, long options) { const char *rsc_state = NULL; if (options & pe_print_pending) { rsc_state = native_pending_state(rsc); } if (rsc_state == NULL) { rsc_state = role2text(native_displayable_role(rsc)); } return rsc_state; } static void native_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, options); const char *target_role = NULL; /* resource information. */ status_print("%sxml, XML_ATTR_TYPE)); status_print("role=\"%s\" ", rsc_state); if (rsc->meta) { target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print("active=\"%s\" ", rsc->fns->active(rsc, TRUE) ? "true" : "false"); status_print("orphaned=\"%s\" ", is_set(rsc->flags, pe_rsc_orphan) ? "true" : "false"); status_print("blocked=\"%s\" ", is_set(rsc->flags, pe_rsc_block) ? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print("failure_ignored=\"%s\" ", is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false"); status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on)); if (options & pe_print_pending) { const char *pending_task = native_pending_task(rsc); if (pending_task) { status_print("pending=\"%s\" ", pending_task); } } if (options & pe_print_dev) { status_print("provisional=\"%s\" ", is_set(rsc->flags, pe_rsc_provisional) ? "true" : "false"); status_print("runnable=\"%s\" ", is_set(rsc->flags, pe_rsc_runnable) ? "true" : "false"); status_print("priority=\"%f\" ", (double)rsc->priority); status_print("variant=\"%s\" ", crm_element_name(rsc->xml)); } /* print out the nodes this resource is running on */ if (options & pe_print_rsconly) { status_print("/>\n"); /* do nothing */ } else if (rsc->running_on != NULL) { GListPtr gIter = rsc->running_on; status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; status_print("%s \n", pre_text, node->details->uname, node->details->id, node->details->online ? "false" : "true"); } status_print("%s\n", pre_text); } else { status_print("/>\n"); } } // Append a flag to resource description string's flags list static bool add_output_flag(GString *s, const char *flag_desc, bool have_flags) { g_string_append(s, (have_flags? ", " : " (")); g_string_append(s, flag_desc); return true; } // Append a node name to resource description string's node list static bool add_output_node(GString *s, const char *node, bool have_nodes) { g_string_append(s, (have_nodes? " " : " [ ")); g_string_append(s, node); return true; } /*! * \internal * \brief Create a string description of a resource * * \param[in] rsc Resource to describe * \param[in] name Desired identifier for the resource * \param[in] node If not NULL, node that resource is "on" * \param[in] options Bitmask of pe_print_* * \param[in] target_role Resource's target role * \param[in] show_nodes Whether to display nodes when multiply active * * \return Newly allocated string description of resource * \note Caller must free the result with g_free(). */ static gchar * native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, long options, const char *target_role, bool show_nodes) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *provider = NULL; const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); char *retval = NULL; GString *outstr = NULL; bool have_flags = false; CRM_CHECK(name != NULL, name = "unknown"); CRM_CHECK(kind != NULL, kind = "unknown"); CRM_CHECK(class != NULL, class = "unknown"); if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); } if (is_set(options, pe_print_rsconly) || pcmk__list_of_multiple(rsc->running_on)) { node = NULL; } // We need a string of at least this size outstr = g_string_sized_new(strlen(name) + strlen(class) + strlen(kind) + (provider? (strlen(provider) + 2) : 0) + (node? strlen(node->details->uname) + 1 : 0) + 11); // Resource name and agent g_string_printf(outstr, "%s\t(%s%s%s:%s):\t", name, class, /* @COMPAT This should be a single ':' (see CLBZ#5395) but * to avoid breaking anything relying on it, we're keeping * it like this until the next minor version bump. */ (provider? "::" : ""), (provider? provider : ""), kind); // State on node if (is_set(rsc->flags, pe_rsc_orphan)) { g_string_append(outstr, " ORPHANED"); } if (is_set(rsc->flags, pe_rsc_failed)) { enum rsc_role_e role = native_displayable_role(rsc); if (role > RSC_ROLE_SLAVE) { g_string_append_printf(outstr, " FAILED %s", role2text(role)); } else { g_string_append(outstr, " FAILED"); } } else { g_string_append(outstr, native_displayable_state(rsc, options)); } if (node) { g_string_append_printf(outstr, " %s", node->details->uname); } // Flags, as: ( [...]) if (node && !(node->details->online) && node->details->unclean) { have_flags = add_output_flag(outstr, "UNCLEAN", have_flags); } if (is_set(options, pe_print_pending)) { const char *pending_task = native_pending_task(rsc); if (pending_task) { have_flags = add_output_flag(outstr, pending_task, have_flags); } } if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); /* Only show target role if it limits our abilities (i.e. ignore * Started, as it is the default anyways, and doesn't prevent the * resource from becoming Master). */ if (target_role_e == RSC_ROLE_STOPPED) { have_flags = add_output_flag(outstr, "disabled", have_flags); } else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable) && target_role_e == RSC_ROLE_SLAVE) { have_flags = add_output_flag(outstr, "target-role:", have_flags); g_string_append(outstr, target_role); } } if (is_set(rsc->flags, pe_rsc_block)) { have_flags = add_output_flag(outstr, "blocked", have_flags); } else if (is_not_set(rsc->flags, pe_rsc_managed)) { have_flags = add_output_flag(outstr, "unmanaged", have_flags); } if (is_set(rsc->flags, pe_rsc_failure_ignored)) { have_flags = add_output_flag(outstr, "failure ignored", have_flags); } if (is_set(options, pe_print_dev)) { if (is_set(options, pe_rsc_provisional)) { have_flags = add_output_flag(outstr, "provisional", have_flags); } if (is_not_set(options, pe_rsc_runnable)) { have_flags = add_output_flag(outstr, "non-startable", have_flags); } have_flags = add_output_flag(outstr, "variant:", have_flags); g_string_append_printf(outstr, "%s priority:%f", crm_element_name(rsc->xml), (double) (rsc->priority)); } if (have_flags) { g_string_append(outstr, ")"); } // User-supplied description if (is_set(options, pe_print_rsconly) || pcmk__list_of_multiple(rsc->running_on)) { const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC); if (desc) { g_string_append_printf(outstr, " %s", desc); } } if (show_nodes && is_not_set(options, pe_print_rsconly) && pcmk__list_of_multiple(rsc->running_on)) { bool have_nodes = false; for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *n = (pe_node_t *) iter->data; have_nodes = add_output_node(outstr, n->details->uname, have_nodes); } if (have_nodes) { g_string_append(outstr, " ]"); } } retval = outstr->str; g_string_free(outstr, FALSE); return retval; } void pe__common_output_html(pcmk__output_t *out, resource_t * rsc, const char *name, node_t *node, long options) { const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *target_role = NULL; xmlNodePtr list_node = NULL; const char *cl = NULL; CRM_ASSERT(rsc->variant == pe_native); CRM_ASSERT(kind != NULL); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) { crm_trace("skipping print of internal resource %s", rsc->id); return; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (is_not_set(rsc->flags, pe_rsc_managed)) { cl = "rsc-managed"; } else if (is_set(rsc->flags, pe_rsc_failed)) { cl = "rsc-failed"; } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) { cl = "rsc-failed"; - } else if (g_list_length(rsc->running_on) > 1) { + } else if (pcmk__list_of_multiple(rsc->running_on)) { cl = "rsc-multiple"; } else if (is_set(rsc->flags, pe_rsc_failure_ignored)) { cl = "rsc-failure-ignored"; } else { cl = "rsc-ok"; } { gchar *s = native_output_string(rsc, name, node, options, target_role, true); list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); pcmk_create_html_node(list_node, "span", NULL, cl, s); g_free(s); } if (is_set(options, pe_print_details)) { GHashTableIter iter; gpointer key, value; out->begin_list(out, NULL, NULL, "Options"); g_hash_table_iter_init(&iter, rsc->parameters); while (g_hash_table_iter_next(&iter, &key, &value)) { out->list_item(out, NULL, "Option: %s = %s", (char *) key, (char *) value); } out->end_list(out); } if (is_set(options, pe_print_dev)) { GHashTableIter iter; node_t *n = NULL; out->begin_list(out, NULL, NULL, "Allowed Nodes"); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { out->list_item(out, NULL, "%s %d", n->details->uname, n->weight); } out->end_list(out); } if (is_set(options, pe_print_max_details)) { GHashTableIter iter; node_t *n = NULL; out->begin_list(out, NULL, NULL, "=== Allowed Nodes"); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { pe__output_node(n, FALSE, out); } out->end_list(out); } } void pe__common_output_text(pcmk__output_t *out, resource_t * rsc, const char *name, node_t *node, long options) { const char *target_role = NULL; CRM_ASSERT(rsc->variant == pe_native); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) { crm_trace("skipping print of internal resource %s", rsc->id); return; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } { gchar *s = native_output_string(rsc, name, node, options, target_role, true); out->list_item(out, NULL, "%s", s); g_free(s); } if (is_set(options, pe_print_details)) { GHashTableIter iter; gpointer key, value; out->begin_list(out, NULL, NULL, "Options"); g_hash_table_iter_init(&iter, rsc->parameters); while (g_hash_table_iter_next(&iter, &key, &value)) { out->list_item(out, NULL, "Option: %s = %s", (char *) key, (char *) value); } out->end_list(out); } if (is_set(options, pe_print_dev)) { GHashTableIter iter; node_t *n = NULL; out->begin_list(out, NULL, NULL, "Allowed Nodes"); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { out->list_item(out, NULL, "%s %d", n->details->uname, n->weight); } out->end_list(out); } if (is_set(options, pe_print_max_details)) { GHashTableIter iter; node_t *n = NULL; out->begin_list(out, NULL, NULL, "=== Allowed Nodes"); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { pe__output_node(n, FALSE, out); } out->end_list(out); } } void common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data) { const char *target_role = NULL; CRM_ASSERT(rsc->variant == pe_native); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) { crm_trace("skipping print of internal resource %s", rsc->id); return; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } if ((pre_text == NULL) && (options & pe_print_printf)) { pre_text = " "; } if (options & pe_print_html) { if (is_not_set(rsc->flags, pe_rsc_managed)) { status_print(""); } else if (is_set(rsc->flags, pe_rsc_failed)) { status_print(""); } else if (rsc->running_on == NULL) { status_print(""); } else if (pcmk__list_of_multiple(rsc->running_on)) { status_print(""); } else if (is_set(rsc->flags, pe_rsc_failure_ignored)) { status_print(""); } else { status_print(""); } } { gchar *resource_s = native_output_string(rsc, name, node, options, target_role, false); status_print("%s%s", (pre_text? pre_text : ""), resource_s); g_free(resource_s); } #if CURSES_ENABLED if (is_set(options, pe_print_ncurses) && is_not_set(options, pe_print_rsconly) && !pcmk__list_of_multiple(rsc->running_on)) { /* coverity[negative_returns] False positive */ move(-1, 0); } #endif if (is_set(options, pe_print_html)) { status_print(" "); } if (is_not_set(options, pe_print_rsconly) && pcmk__list_of_multiple(rsc->running_on)) { GListPtr gIter = rsc->running_on; int counter = 0; if (options & pe_print_html) { status_print("
      \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("["); } for (; gIter != NULL; gIter = gIter->next) { node_t *n = (node_t *) gIter->data; counter++; if (options & pe_print_html) { status_print("
    • \n%s", n->details->uname); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" %s", n->details->uname); } else if ((options & pe_print_log)) { status_print("\t%d : %s", counter, n->details->uname); } else { status_print("%s", n->details->uname); } if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" ]"); } } if (options & pe_print_html) { status_print("
    \n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } if (options & pe_print_details) { struct print_data_s pdata; pdata.options = options; pdata.print_data = print_data; g_hash_table_foreach(rsc->parameters, native_print_attr, &pdata); } if (options & pe_print_dev) { GHashTableIter iter; node_t *n = NULL; status_print("%s\tAllowed Nodes", pre_text); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { status_print("%s\t * %s %d", pre_text, n->details->uname, n->weight); } } if (options & pe_print_max_details) { GHashTableIter iter; node_t *n = NULL; status_print("%s\t=== Allowed Nodes\n", pre_text); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { print_node("\t", n, FALSE); } } } void native_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { node_t *node = NULL; CRM_ASSERT(rsc->variant == pe_native); if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } node = pe__current_node(rsc); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data); } int pe__resource_xml(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, options); long is_print_pending = options & pe_print_pending; long is_print_dev = options & pe_print_dev; char ra_name[LINE_MAX]; char *nodes_running_on = NULL; char *priority = NULL; int rc = 0; CRM_ASSERT(rsc->variant == pe_native); /* resource information. */ sprintf(ra_name, "%s%s%s:%s", class, prov ? "::" : "", prov ? prov : "" , crm_element_value(rsc->xml, XML_ATTR_TYPE)); nodes_running_on = crm_itoa(g_list_length(rsc->running_on)); priority = crm_ftoa(rsc->priority); rc = pe__name_and_nvpairs_xml(out, true, "resource", 16 , "id", rsc_printable_id(rsc) , "resource_agent", ra_name , "role", rsc_state , "target_role", (rsc->meta ? g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE) : NULL) , "active", BOOL2STR(rsc->fns->active(rsc, TRUE)) , "orphaned", BOOL2STR(is_set(rsc->flags, pe_rsc_orphan)) , "blocked", BOOL2STR(is_set(rsc->flags, pe_rsc_block)) , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed)) , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)) , "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored)) , "nodes_running_on", nodes_running_on , "pending", (is_print_pending ? native_pending_task(rsc) : NULL) , "provisional", (is_print_dev ? BOOL2STR(is_set(rsc->flags, pe_rsc_provisional)) : NULL) , "runnable", (is_print_dev ? BOOL2STR(is_set(rsc->flags, pe_rsc_runnable)) : NULL) , "priority", (is_print_dev ? priority : NULL) , "variant", (is_print_dev ? crm_element_name(rsc->xml) : NULL)); free(priority); free(nodes_running_on); CRM_ASSERT(rc == 0); if (rsc->running_on != NULL) { GListPtr gIter = rsc->running_on; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; rc = pe__name_and_nvpairs_xml(out, false, "node", 3 , "name", node->details->uname , "id", node->details->id , "cached", BOOL2STR(node->details->online)); CRM_ASSERT(rc == 0); } } pcmk__output_xml_pop_parent(out); return rc; } int pe__resource_html(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); node_t *node = pe__current_node(rsc); CRM_ASSERT(rsc->variant == pe_native); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, options); return 0; } int pe__resource_text(pcmk__output_t *out, va_list args) { unsigned int options = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); node_t *node = pe__current_node(rsc); CRM_ASSERT(rsc->variant == pe_native); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, options); return 0; } void native_free(resource_t * rsc) { pe_rsc_trace(rsc, "Freeing resource action list (not the data)"); common_free(rsc); } enum rsc_role_e native_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e role = rsc->next_role; if (current) { role = rsc->role; } pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role)); return role; } /*! * \internal * \brief List nodes where a resource (or any of its children) is * * \param[in] rsc Resource to check * \param[out] list List to add result to * \param[in] current 0 = where known, 1 = running, 2 = running or pending * * \return If list contains only one node, that node */ pe_node_t * native_location(const pe_resource_t *rsc, GList **list, int current) { node_t *one = NULL; GListPtr result = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; child->fns->location(child, &result, current); } } else if (current) { if (rsc->running_on) { result = g_list_copy(rsc->running_on); } if ((current == 2) && rsc->pending_node && !pe_find_node_id(result, rsc->pending_node->details->id)) { result = g_list_append(result, rsc->pending_node); } } else if (current == FALSE && rsc->allocated_to) { result = g_list_append(NULL, rsc->allocated_to); } if (result && (result->next == NULL)) { one = result->data; } if (list) { GListPtr gIter = result; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) { *list = g_list_append(*list, node); } } } g_list_free(result); return one; } static void get_rscs_brief(GListPtr rsc_list, GHashTable * rsc_table, GHashTable * active_table) { GListPtr gIter = rsc_list; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); int offset = 0; char buffer[LINE_MAX]; int *rsc_counter = NULL; int *active_counter = NULL; if (rsc->variant != pe_native) { continue; } offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class); if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov); } offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind); CRM_LOG_ASSERT(offset > 0); if (rsc_table) { rsc_counter = g_hash_table_lookup(rsc_table, buffer); if (rsc_counter == NULL) { rsc_counter = calloc(1, sizeof(int)); *rsc_counter = 0; g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter); } (*rsc_counter)++; } if (active_table) { GListPtr gIter2 = rsc->running_on; for (; gIter2 != NULL; gIter2 = gIter2->next) { node_t *node = (node_t *) gIter2->data; GHashTable *node_table = NULL; if (node->details->unclean == FALSE && node->details->online == FALSE) { continue; } node_table = g_hash_table_lookup(active_table, node->details->uname); if (node_table == NULL) { node_table = crm_str_table_new(); g_hash_table_insert(active_table, strdup(node->details->uname), node_table); } active_counter = g_hash_table_lookup(node_table, buffer); if (active_counter == NULL) { active_counter = calloc(1, sizeof(int)); *active_counter = 0; g_hash_table_insert(node_table, strdup(buffer), active_counter); } (*active_counter)++; } } } } static void destroy_node_table(gpointer data) { GHashTable *node_table = data; if (node_table) { g_hash_table_destroy(node_table); } } void print_rscs_brief(GListPtr rsc_list, const char *pre_text, long options, void *print_data, gboolean print_all) { GHashTable *rsc_table = crm_str_table_new(); GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_node_table); GHashTableIter hash_iter; char *type = NULL; int *rsc_counter = NULL; get_rscs_brief(rsc_list, rsc_table, active_table); g_hash_table_iter_init(&hash_iter, rsc_table); while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) { GHashTableIter hash_iter2; char *node_name = NULL; GHashTable *node_table = NULL; int active_counter_all = 0; g_hash_table_iter_init(&hash_iter2, active_table); while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) { int *active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (options & pe_print_rsconly) { node_name = NULL; } if (options & pe_print_html) { status_print("
  • \n"); } if (print_all) { status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, rsc_counter ? *rsc_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } else { status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } if (options & pe_print_html) { status_print("
  • \n"); } } if (print_all && active_counter_all == 0) { if (options & pe_print_html) { status_print("
  • \n"); } status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "", active_counter_all, rsc_counter ? *rsc_counter : 0, type); if (options & pe_print_html) { status_print("
  • \n"); } } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } } void pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all) { GHashTable *rsc_table = crm_str_table_new(); GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_node_table); GHashTableIter hash_iter; char *type = NULL; int *rsc_counter = NULL; get_rscs_brief(rsc_list, rsc_table, active_table); g_hash_table_iter_init(&hash_iter, rsc_table); while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) { GHashTableIter hash_iter2; char *node_name = NULL; GHashTable *node_table = NULL; int active_counter_all = 0; g_hash_table_iter_init(&hash_iter2, active_table); while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) { int *active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (options & pe_print_rsconly) { node_name = NULL; } if (print_all) { out->list_item(out, NULL, " %d/%d\t(%s):\tActive %s", *active_counter, rsc_counter ? *rsc_counter : 0, type, (*active_counter > 0) && node_name ? node_name : ""); } else { out->list_item(out, NULL, " %d\t(%s):\tActive %s", *active_counter, type, (*active_counter > 0) && node_name ? node_name : ""); } } if (print_all && active_counter_all == 0) { out->list_item(out, NULL, " %d/%d\t(%s):\tActive", active_counter_all, rsc_counter ? *rsc_counter : 0, type); } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index e2039aa74a..c9fc672518 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,3842 +1,3842 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); #define set_config_flag(data_set, option, flag) do { \ const char *tmp = pe_pref(data_set->config_hash, option); \ if(tmp) { \ if(crm_is_true(tmp)) { \ set_bit(data_set->flags, flag); \ } else { \ clear_bit(data_set->flags, flag); \ } \ } \ } while(0) static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *failed, pe_working_set_t *data_set); static gboolean determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node); static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite, pe_working_set_t *data_set); // Bitmask for warnings we only want to print once uint32_t pe_wo = 0; static gboolean is_dangling_guest_node(node_t *node) { /* we are looking for a remote-node that was supposed to be mapped to a * container resource, but all traces of that container have disappeared * from both the config and the status section. */ if (pe__is_guest_or_remote_node(node) && node->details->remote_rsc && node->details->remote_rsc->container == NULL && is_set(node->details->remote_rsc->flags, pe_rsc_orphan_container_filler)) { return TRUE; } return FALSE; } /*! * \brief Schedule a fence action for a node * * \param[in,out] data_set Current working set of cluster * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed */ void pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason) { CRM_CHECK(node, return); /* A guest node is fenced by marking its container as failed */ if (pe__is_guest_node(node)) { resource_t *rsc = node->details->remote_rsc->container; if (is_set(rsc->flags, pe_rsc_failed) == FALSE) { if (!is_set(rsc->flags, pe_rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", node->details->uname, reason, rsc->id); } else { crm_warn("Guest node %s will be fenced " "(by recovering its guest resource %s): %s", node->details->uname, rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ node->details->remote_requires_reset = TRUE; set_bit(rsc->flags, pe_rsc_failed); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", node->details->uname, reason); set_bit(node->details->remote_rsc->flags, pe_rsc_failed); } else if (pe__is_remote_node(node)) { resource_t *rsc = node->details->remote_rsc; if (rsc && (!is_set(rsc->flags, pe_rsc_managed))) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", node->details->uname, reason); } else if(node->details->remote_requires_reset == FALSE) { node->details->remote_requires_reset = TRUE; crm_warn("Remote node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, data_set); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", node->details->uname, pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean", reason); } else { crm_warn("Cluster node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, data_set); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \ "[(@" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_PROVIDES "'" \ "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \ "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \ "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(unsigned long long flag, const char *xpath, pe_working_set_t *data_set) { xmlXPathObjectPtr result = NULL; if (is_not_set(data_set->flags, flag)) { result = xpath_search(data_set->input, xpath); if (result && (numXpathResults(result) > 0)) { set_bit(data_set->flags, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set) { const char *value = NULL; GHashTable *config_hash = crm_str_table_new(); data_set->config_hash = config_hash; pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set); verify_pe_options(data_set->config_hash); set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes); if(is_not_set(data_set->flags, pe_flag_startup_probes)) { crm_info("Startup probes: disabled (dangerous)"); } value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_notice("Watchdog will be used via SBD if fencing is required " "and stonith-watchdog-timeout is nonzero"); set_bit(data_set->flags, pe_flag_have_stonith_resource); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set); value = pe_pref(data_set->config_hash, "stonith-timeout"); data_set->stonith_timeout = (int) crm_parse_interval_spec(value); crm_debug("STONITH timeout: %d", data_set->stonith_timeout); set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled); crm_debug("STONITH of failed nodes is %s", is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled"); data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action"); if (!strcmp(data_set->stonith_action, "poweroff")) { pe_warn_once(pe_wo_poweroff, "Support for stonith-action of 'poweroff' is deprecated " "and will be removed in a future release (use 'off' instead)"); data_set->stonith_action = "off"; } crm_trace("STONITH will %s nodes", data_set->stonith_action); set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing); crm_debug("Concurrent fencing is %s", is_set(data_set->flags, pe_flag_concurrent_fencing) ? "enabled" : "disabled"); set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything); crm_debug("Stop all active resources: %s", is_set(data_set->flags, pe_flag_stop_everything) ? "true" : "false"); set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster); if (is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(data_set->config_hash, "no-quorum-policy"); if (safe_str_eq(value, "ignore")) { data_set->no_quorum_policy = no_quorum_ignore; } else if (safe_str_eq(value, "freeze")) { data_set->no_quorum_policy = no_quorum_freeze; } else if (safe_str_eq(value, "suicide")) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { int do_panic = 0; crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic); if (do_panic || is_set(data_set->flags, pe_flag_have_quorum)) { data_set->no_quorum_policy = no_quorum_suicide; } else { crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum"); data_set->no_quorum_policy = no_quorum_stop; } } else { crm_config_err("Resetting no-quorum-policy to 'stop': stonith is not configured"); data_set->no_quorum_policy = no_quorum_stop; } } else { data_set->no_quorum_policy = no_quorum_stop; } switch (data_set->no_quorum_policy) { case no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case no_quorum_suicide: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans); crm_trace("Orphan resources are %s", is_set(data_set->flags, pe_flag_stop_rsc_orphans) ? "stopped" : "ignored"); set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans); crm_trace("Orphan resource actions are %s", is_set(data_set->flags, pe_flag_stop_action_orphans) ? "stopped" : "ignored"); set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop); crm_trace("Stopped resources are removed from the status section: %s", is_set(data_set->flags, pe_flag_remove_after_stop) ? "true" : "false"); set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode); crm_trace("Maintenance mode: %s", is_set(data_set->flags, pe_flag_maintenance_mode) ? "true" : "false"); set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal); crm_trace("Start failures are %s", is_set(data_set->flags, pe_flag_start_failure_fatal) ? "always fatal" : "handled by failcount"); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing); } if (is_set(data_set->flags, pe_flag_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes"); } node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s", pe_pref(data_set->config_hash, "node-health-red"), pe_pref(data_set->config_hash, "node-health-yellow"), pe_pref(data_set->config_hash, "node-health-green")); data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); crm_trace("Placement strategy: %s", data_set->placement_strategy); return TRUE; } static void destroy_digest_cache(gpointer ptr) { op_digest_cache_t *data = ptr; free_xml(data->params_all); free_xml(data->params_secure); free_xml(data->params_restart); free(data->digest_all_calc); free(data->digest_restart_calc); free(data->digest_secure_calc); free(data); } node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set) { node_t *new_node = NULL; if (pe_find_node(data_set->nodes, uname) != NULL) { crm_config_warn("Detected multiple node entries with uname=%s" " - this is rarely intended", uname); } new_node = calloc(1, sizeof(node_t)); if (new_node == NULL) { return NULL; } new_node->weight = char2score(score); new_node->fixed = FALSE; new_node->details = calloc(1, sizeof(struct pe_node_shared_s)); if (new_node->details == NULL) { free(new_node); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->rsc_discovery_enabled = TRUE; new_node->details->running_rsc = NULL; new_node->details->type = node_ping; if (safe_str_eq(type, "remote")) { new_node->details->type = node_remote; set_bit(data_set->flags, pe_flag_have_remote_nodes); } else if ((type == NULL) || safe_str_eq(type, "member")) { new_node->details->type = node_member; } new_node->details->attrs = crm_str_table_new(); if (pe__is_guest_or_remote_node(new_node)) { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("remote")); } else { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("cluster")); } new_node->details->utilization = crm_str_table_new(); new_node->details->digest_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_digest_cache); data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname); return new_node; } bool remote_id_conflict(const char *remote_name, pe_working_set_t *data) { bool match = FALSE; #if 1 pe_find_resource(data->resources, remote_name); #else if (data->name_check == NULL) { data->name_check = g_hash_table_new(crm_str_hash, g_str_equal); for (xml_rsc = __xml_first_child_element(parent); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { const char *id = ID(xml_rsc); /* avoiding heap allocation here because we know the duration of this hashtable allows us to */ g_hash_table_insert(data->name_check, (char *) id, (char *) id); } } if (g_hash_table_lookup(data->name_check, remote_name)) { match = TRUE; } #endif if (match) { crm_err("Invalid remote-node name, a resource called '%s' already exists.", remote_name); return NULL; } return match; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = ID(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; for (attr_set = __xml_first_child_element(xml_obj); attr_set != NULL; attr_set = __xml_next_element(attr_set)) { if (safe_str_neq((const char *)attr_set->name, XML_TAG_META_SETS)) { continue; } for (attr = __xml_first_child_element(attr_set); attr != NULL; attr = __xml_next_element(attr)) { const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); if (safe_str_eq(name, XML_RSC_ATTR_REMOTE_NODE)) { remote_name = value; } else if (safe_str_eq(name, "remote-addr")) { remote_server = value; } else if (safe_str_eq(name, "remote-port")) { remote_port = value; } else if (safe_str_eq(name, "remote-connect-timeout")) { connect_timeout = value; } else if (safe_str_eq(name, "remote-allow-migrate")) { remote_allow_migrate=value; } else if (safe_str_eq(name, XML_RSC_ATTR_MANAGED)) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (remote_id_conflict(remote_name, data)) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pe_working_set_t *data_set, node_t *new_node) { if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (is_set(data_set->flags, pe_flag_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } /* We need to be able to determine if a node's status section * exists or not separate from whether the node is unclean. */ new_node->details->unseen = TRUE; } gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) { new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { crm_config_err("Must specify id tag in "); continue; } new_node = pe_create_node(id, uname, type, score, data_set); if (new_node == NULL) { return FALSE; } /* if(data_set->have_quorum == FALSE */ /* && data_set->no_quorum_policy == no_quorum_stop) { */ /* /\* start shutting resources down *\/ */ /* new_node->weight = -INFINITY; */ /* } */ handle_startup_fencing(data_set, new_node); add_node_attrs(xml_obj, new_node, FALSE, data_set); pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, NULL, new_node->details->utilization, NULL, FALSE, data_set); crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); } } if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) { crm_info("Creating a fake local node"); pe_create_node(data_set->localhost, data_set->localhost, NULL, 0, data_set); } return TRUE; } static void setup_container(resource_t * rsc, pe_working_set_t * data_set) { const char *container_id = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; setup_container(child_rsc, data_set); } return; } container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER); if (container_id && safe_str_neq(container_id, rsc->id)) { resource_t *container = pe_find_resource(data_set->resources, container_id); if (container) { rsc->container = container; set_bit(container->flags, pe_rsc_is_container); container->fillers = g_list_append(container->fillers, rsc); pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id); } else { pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id); } } } gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = __xml_first_child_element(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = ID(xml_obj); /* The "pe_find_node" check is here to make sure we don't iterate over * an expanded node that has already been added to the node list. */ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found remote node %s defined by resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, TRUE)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest node %s in resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, TRUE)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = __xml_first_child_element(xml_obj); xml_obj2 != NULL; xml_obj2 = __xml_next_element(xml_obj2)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, ID(xml_obj2), ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pe_working_set_t *data_set, resource_t *new_rsc) { node_t *remote_node = NULL; if (new_rsc->is_remote_node == FALSE) { return; } if (is_set(data_set->flags, pe_flag_quick_location)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pe_find_node(data_set->nodes, new_rsc->id); CRM_CHECK(remote_node != NULL, return;); pe_rsc_trace(new_rsc, "Linking remote connection resource %s to node %s", new_rsc->id, remote_node->details->uname); remote_node->details->remote_rsc = new_rsc; if (new_rsc->container == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(data_set, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); } } static void destroy_tag(gpointer data) { tag_t *tag = data; if (tag) { free(tag->id); g_list_free_full(tag->refs, free); free(tag); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] data_set Where to put resource information * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when common_unpack() calls resource_location() */ gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; GListPtr gIter = NULL; data_set->template_rsc_sets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_obj = __xml_first_child_element(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { resource_t *new_rsc = NULL; if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, TRUE)) { const char *template_id = ID(xml_obj); if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets, template_id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL); } continue; } crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj)); if (common_unpack(xml_obj, &new_rsc, NULL, data_set)) { data_set->resources = g_list_append(data_set->resources, new_rsc); pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { crm_config_err("Failed unpacking %s %s", crm_element_name(xml_obj), crm_element_value(xml_obj, XML_ATTR_ID)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } } } for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; setup_container(rsc, data_set); link_rsc2remotenode(data_set, rsc); } data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority); if (is_set(data_set->flags, pe_flag_quick_location)) { /* Ignore */ } else if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { crm_config_err("Resource start-up disabled since no STONITH resources have been defined"); crm_config_err("Either configure some or disable STONITH with the stonith-enabled option"); crm_config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set) { xmlNode *xml_tag = NULL; data_set->tags = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_tag = __xml_first_child_element(xml_tags); xml_tag != NULL; xml_tag = __xml_next_element(xml_tag)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = ID(xml_tag); if (crm_str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, TRUE) == FALSE) { continue; } if (tag_id == NULL) { crm_config_err("Failed unpacking %s: %s should be specified", crm_element_name(xml_tag), XML_ATTR_ID); continue; } for (xml_obj_ref = __xml_first_child_element(xml_tag); xml_obj_ref != NULL; xml_obj_ref = __xml_next_element(xml_obj_ref)) { const char *obj_ref = ID(xml_obj_ref); if (crm_str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, TRUE) == FALSE) { continue; } if (obj_ref == NULL) { crm_config_err("Failed unpacking %s for tag %s: %s should be specified", crm_element_name(xml_obj_ref), tag_id, XML_ATTR_ID); continue; } if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) { return FALSE; } } } return TRUE; } /* The ticket state section: * "/cib/status/tickets/ticket_state" */ static gboolean unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set) { const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; ticket_t *ticket = NULL; ticket_id = ID(xml_ticket); if (ticket_id == NULL || strlen(ticket_id) == 0) { return FALSE; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, data_set); if (ticket == NULL) { return FALSE; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_ticket, prop_name); if (crm_str_eq(prop_name, XML_ATTR_ID, TRUE)) { continue; } g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value)); } granted = g_hash_table_lookup(ticket->state, "granted"); if (granted && crm_is_true(granted)) { ticket->granted = TRUE; crm_info("We have ticket '%s'", ticket->id); } else { ticket->granted = FALSE; crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, "last-granted"); if (last_granted) { ticket->last_granted = crm_parse_int(last_granted, 0); } standby = g_hash_table_lookup(ticket->state, "standby"); if (standby && crm_is_true(standby)) { ticket->standby = TRUE; if (ticket->granted) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { ticket->standby = FALSE; } crm_trace("Done with ticket state for %s", ticket_id); return TRUE; } static gboolean unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; for (xml_obj = __xml_first_child_element(xml_tickets); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, TRUE) == FALSE) { continue; } unpack_ticket_state(xml_obj, data_set); } return TRUE; } static void unpack_handle_remote_attrs(node_t *this_node, xmlNode *state, pe_working_set_t * data_set) { const char *resource_discovery_enabled = NULL; xmlNode *attrs = NULL; resource_t *rsc = NULL; if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { return; } if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) { return; } crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname); this_node->details->remote_maintenance = crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0"); rsc = this_node->details->remote_rsc; if (this_node->details->remote_requires_reset == FALSE) { this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; } attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); if (pe__shutdown_requested(this_node)) { crm_info("Node %s is shutting down", this_node->details->uname); this_node->details->shutdown = TRUE; if (rsc) { rsc->next_role = RSC_ROLE_STOPPED; } } if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) || (rsc && !is_set(rsc->flags, pe_rsc_managed))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { if (pe__is_remote_node(this_node) && is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_warn("Ignoring %s attribute on remote node %s because stonith is disabled", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("Node %s has resource discovery disabled", this_node->details->uname); this_node->details->rsc_discovery_enabled = FALSE; } } } static bool unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) { bool changed = false; xmlNode *lrm_rsc = NULL; for (xmlNode *state = __xml_first_child_element(status); state != NULL; state = __xml_next_element(state)) { const char *id = NULL; const char *uname = NULL; node_t *this_node = NULL; bool process = FALSE; if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { continue; } id = crm_element_value(state, XML_ATTR_ID); uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); if (this_node == NULL) { crm_info("Node %s is unknown", id); continue; } else if (this_node->details->unpacked) { crm_info("Node %s is already processed", id); continue; } else if (!pe__is_guest_or_remote_node(this_node) && is_set(data_set->flags, pe_flag_stonith_enabled)) { // A redundant test, but preserves the order for regression tests process = TRUE; } else if (pe__is_guest_or_remote_node(this_node)) { bool check = FALSE; resource_t *rsc = this_node->details->remote_rsc; if(fence) { check = TRUE; } else if(rsc == NULL) { /* Not ready yet */ } else if (pe__is_guest_node(this_node) && rsc->role == RSC_ROLE_STARTED && rsc->container->role == RSC_ROLE_STARTED) { /* Both the connection and its containing resource need to be * known to be up before we process resources running in it. */ check = TRUE; crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED); } else if (!pe__is_guest_node(this_node) && rsc->role == RSC_ROLE_STARTED) { check = TRUE; crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED); } if (check) { determine_remote_online_status(data_set, this_node); unpack_handle_remote_attrs(this_node, state, data_set); process = TRUE; } } else if (this_node->details->online) { process = TRUE; } else if (fence) { process = TRUE; } if(process) { crm_trace("Processing lrm resource entries on %shealthy%s node: %s", fence?"un":"", (pe__is_guest_or_remote_node(this_node)? " remote" : ""), this_node->details->uname); changed = TRUE; this_node->details->unpacked = TRUE; lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); unpack_lrm_resources(this_node, lrm_rsc, data_set); } } return changed; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set) { const char *id = NULL; const char *uname = NULL; xmlNode *state = NULL; node_t *this_node = NULL; crm_trace("Beginning unpack"); if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } for (state = __xml_first_child_element(status); state != NULL; state = __xml_next_element(state)) { if (crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) { unpack_tickets_state((xmlNode *) state, data_set); } else if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE)) { xmlNode *attrs = NULL; const char *resource_discovery_enabled = NULL; id = crm_element_value(state, XML_ATTR_ID); uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); if (uname == NULL) { /* error */ continue; } else if (this_node == NULL) { crm_config_warn("Node %s in status section no longer exists", uname); continue; } else if (pe__is_guest_or_remote_node(this_node)) { /* online state for remote nodes is determined by the * rsc state after all the unpacking is done. we do however * need to mark whether or not the node has been fenced as this plays * a role during unpacking cluster node resource state */ this_node->details->remote_was_fenced = crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0"); continue; } crm_trace("Processing node id=%s, uname=%s", id, uname); /* Mark the node as provisionally clean * - at least we have seen it in the current cluster's lifetime */ this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance"))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } crm_trace("determining node state"); determine_online_status(state, this_node, data_set); if (is_not_set(data_set->flags, pe_flag_have_quorum) && this_node->details->online && (data_set->no_quorum_policy == no_quorum_suicide)) { /* Everything else should flow from this automatically * (at least until the scheduler becomes able to migrate off * healthy resources) */ pe_fence_node(data_set, this_node, "cluster does not have quorum"); } } } while(unpack_node_loop(status, FALSE, data_set)) { crm_trace("Start another loop"); } // Now catch any nodes we didn't see unpack_node_loop(status, is_set(data_set->flags, pe_flag_stonith_enabled), data_set); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (data_set->stop_needed != NULL) { for (GList *item = data_set->stop_needed; item; item = item->next) { pe_resource_t *container = item->data; pe_node_t *node = pe__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(data_set->stop_needed); data_set->stop_needed = NULL; } for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *this_node = gIter->data; if (this_node == NULL) { continue; } else if (!pe__is_guest_or_remote_node(this_node)) { continue; } else if(this_node->details->unpacked) { continue; } determine_remote_online_status(data_set, this_node); } return TRUE; } static gboolean determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state, node_t * this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); if (!crm_is_true(in_cluster)) { crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster)); } else if (safe_str_eq(is_peer, ONLINESTATUS)) { if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) { online = TRUE; } else { crm_debug("Node is not ready to run resources: %s", join); } } else if (this_node->details->expected_up == FALSE) { crm_trace("Controller is down: in_cluster=%s", crm_str(in_cluster)); crm_trace("\tis_peer=%s, join=%s, expected=%s", crm_str(is_peer), crm_str(join), crm_str(exp_state)); } else { /* mark it unclean */ pe_fence_node(data_set, this_node, "peer is unexpectedly down"); crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s", crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state)); } return online; } static gboolean determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state, node_t * this_node) { gboolean online = FALSE; gboolean do_terminate = FALSE; bool crmd_online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); const char *terminate = pe_node_attribute_raw(this_node, "terminate"); /* - XML_NODE_IN_CLUSTER ::= true|false - XML_NODE_IS_PEER ::= online|offline - XML_NODE_JOIN_STATE ::= member|down|pending|banned - XML_NODE_EXPECTED ::= member|down */ if (crm_is_true(terminate)) { do_terminate = TRUE; } else if (terminate != NULL && strlen(terminate) > 0) { /* could be a time() value */ char t = terminate[0]; if (t != '0' && isdigit(t)) { do_terminate = TRUE; } } crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate); online = crm_is_true(in_cluster); crmd_online = safe_str_eq(is_peer, ONLINESTATUS); if (exp_state == NULL) { exp_state = CRMD_JOINSTATE_DOWN; } if (this_node->details->shutdown) { crm_debug("%s is shutting down", this_node->details->uname); /* Slightly different criteria since we can't shut down a dead peer */ online = crmd_online; } else if (in_cluster == NULL) { pe_fence_node(data_set, this_node, "peer has not been seen by the cluster"); } else if (safe_str_eq(join, CRMD_JOINSTATE_NACK)) { pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria"); } else if (do_terminate == FALSE && safe_str_eq(exp_state, CRMD_JOINSTATE_DOWN)) { if (crm_is_true(in_cluster) || crmd_online) { crm_info("- Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", this_node->details->uname); } } else if (do_terminate && safe_str_eq(join, CRMD_JOINSTATE_DOWN) && crm_is_true(in_cluster) == FALSE && !crmd_online) { crm_info("Node %s was just shot", this_node->details->uname); online = FALSE; } else if (crm_is_true(in_cluster) == FALSE) { pe_fence_node(data_set, this_node, "peer is no longer part of the cluster"); } else if (!crmd_online) { pe_fence_node(data_set, this_node, "peer process is no longer available"); /* Everything is running at this point, now check join state */ } else if (do_terminate) { pe_fence_node(data_set, this_node, "termination was requested"); } else if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) { crm_info("Node %s is active", this_node->details->uname); } else if (safe_str_eq(join, CRMD_JOINSTATE_PENDING) || safe_str_eq(join, CRMD_JOINSTATE_DOWN)) { crm_info("Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { pe_fence_node(data_set, this_node, "peer was in an unknown state"); crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown); } return online; } static gboolean determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node) { resource_t *rsc = this_node->details->remote_rsc; resource_t *container = NULL; pe_node_t *host = NULL; /* If there is a node state entry for a (former) Pacemaker Remote node * but no resource creating that node, the node's connection resource will * be NULL. Consider it an offline remote node in that case. */ if (rsc == NULL) { this_node->details->online = FALSE; goto remote_online_done; } container = rsc->container; - if (container && (g_list_length(rsc->running_on) == 1)) { + if (container && pcmk__list_of_1(rsc->running_on)) { host = rsc->running_on->data; } /* If the resource is currently started, mark it online. */ if (rsc->role == RSC_ROLE_STARTED) { crm_trace("%s node %s presumed ONLINE because connection resource is started", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) { crm_trace("%s node %s shutting down because connection resource is stopping", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if(container && is_set(container->flags, pe_rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; } else if (rsc->role == RSC_ROLE_STOPPED || (container && container->role == RSC_ROLE_STOPPED)) { crm_trace("%s node %s OFFLINE because its resource is stopped", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = FALSE; } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } remote_online_done: crm_trace("Remote node %s online=%s", this_node->details->id, this_node->details->online ? "TRUE" : "FALSE"); return this_node->details->online; } gboolean determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set_t * data_set) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); if (this_node == NULL) { crm_config_err("No node to check"); return online; } this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) { this_node->details->expected_up = TRUE; } if (this_node->details->type == node_ping) { this_node->details->unclean = FALSE; online = FALSE; /* As far as resource management is concerned, * the node is safely offline. * Anyone caught abusing this logic will be shot */ } else if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { online = determine_online_status_no_fencing(data_set, node_state, this_node); } else { online = determine_online_status_fencing(data_set, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (this_node->details->type == node_ping) { crm_info("Node %s is not a pacemaker node", this_node->details->uname); } else if (this_node->details->unclean) { pe_proc_warn("Node %s is unclean", this_node->details->uname); } else if (this_node->details->online) { crm_info("Node %s is %s", this_node->details->uname, this_node->details->shutdown ? "shutting down" : this_node->details->pending ? "pending" : this_node->details->standby ? "standby" : this_node->details->maintenance ? "maintenance" : "online"); } else { crm_trace("Node %s is offline", this_node->details->uname); } return online; } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!crm_strlen_zero(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; CRM_ASSERT(end); basename = strndup(last_rsc_id, end - last_rsc_id + 1); CRM_ASSERT(basename); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; CRM_ASSERT(end); zero = calloc(base_name_len + 3, sizeof(char)); CRM_ASSERT(zero); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static resource_t * create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set) { resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pe_find_node(data_set->nodes, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set); } link_rsc2remotenode(data_set, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) { /* This orphaned rsc needs to be mapped to a container. */ crm_trace("Detected orphaned container filler %s", rsc_id); set_bit(rsc->flags, pe_rsc_orphan_container_filler); } set_bit(rsc->flags, pe_rsc_orphan); data_set->resources = g_list_append(data_set->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history */ static pe_resource_t * create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id, pe_node_t *node, pe_working_set_t *data_set) { pe_resource_t *top = pe__create_clone_child(parent, data_set); // find_rsc() because we might be a cloned group pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone); pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, node->details->uname); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of clone-max instances); * (3) a newly created orphan (i.e. clone-max instances are already active). * * \param[in] data_set Cluster information * \param[in] node Node on which to check for instance * \param[in] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (without instance) */ static resource_t * find_anonymous_clone(pe_working_set_t * data_set, node_t * node, resource_t * parent, const char *rsc_id) { GListPtr rIter = NULL; pe_resource_t *rsc = NULL; pe_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; CRM_ASSERT(parent != NULL); CRM_ASSERT(pe_rsc_is_clone(parent)); CRM_ASSERT(is_not_set(parent->flags, pe_rsc_unique)); // Check for active (or partially active, for cloned groups) instance pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id); for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) { GListPtr locations = NULL; resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if globally-unique * was flipped from true to false); and * (3) when we re-run calculations on the same data set as part of a * simulation. */ child->fns->location(child, &locations, 2); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (((pe_node_t *)locations->data)->details == node->details) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if globally-unique is switched from true to * false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->running_on) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, node->details->uname); skip_inactive = TRUE; rsc = NULL; } else { pe_rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pe_rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && is_not_set(child->flags, pe_rsc_block)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); /* ... but don't use it if it was already associated with a * pending action on another node */ if (inactive_instance && inactive_instance->pending_node && (inactive_instance->pending_node->details != node->details)) { inactive_instance = NULL; } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has "requires" set to "quorum" or "nothing", and we don't * have a clone instance for every node, we don't want to consume a valid * instance number for unclean nodes. Such instances may appear to be active * according to the history, but should be considered inactive, so we can * start an instance elsewhere. Treat such instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && is_not_set(rsc->flags, pe_rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pe__is_guest_node(node) && !pe__is_universal_clone(parent, data_set)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, data_set); pe_rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static resource_t * unpack_find_resource(pe_working_set_t * data_set, node_t * node, const char *rsc_id, xmlNode * rsc_entry) { resource_t *rsc = NULL; resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when clone-max=0, we create * a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id); if (clone0 && is_not_set(clone0->flags, pe_rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->variant > pe_native) { crm_trace("Resource history for %s is orphaned because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pe_rsc_is_anon_clone(parent)) { if (pe_rsc_is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(data_set, node, parent, base); free(base); CRM_ASSERT(rsc != NULL); } } if (rsc && safe_str_neq(rsc_id, rsc->id) && safe_str_neq(rsc_id, rsc->clone_name)) { free(rsc->clone_name); rsc->clone_name = strdup(rsc_id); pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, node->details->uname, rsc->id, (is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : "")); } return rsc; } static resource_t * process_orphan_resource(xmlNode * rsc_entry, node_t * node, pe_working_set_t * data_set) { resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname); rsc = create_fake_resource(rsc_id, rsc_entry, data_set); if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pe_rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set); } return rsc; } static void process_rsc_state(resource_t * rsc, node_t * node, enum action_fail_response on_fail, xmlNode * migrate_op, pe_working_set_t * data_set) { node_t *tmpnode = NULL; char *reason = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail)); /* process current state */ if (rsc->role != RSC_ROLE_UNKNOWN) { resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { node_t *n = node_copy(node); pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name, n->details->uname); g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n); } if (is_set(iter->flags, pe_rsc_unique)) { break; } iter = iter->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if (rsc->role > RSC_ROLE_STOPPED && node->details->online == FALSE && node->details->maintenance == FALSE && is_set(rsc->flags, pe_rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pe__is_guest_node(node)) { set_bit(rsc->flags, pe_rsc_failed); should_fence = TRUE; } else if (is_set(data_set->flags, pe_flag_stonith_enabled)) { if (pe__is_remote_node(node) && node->details->remote_rsc && is_not_set(node->details->remote_rsc->flags, pe_rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ node->details->unseen = TRUE; reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(data_set, node, reason); } free(reason); } if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = action_fail_ignore; } switch (on_fail) { case action_fail_ignore: /* nothing to do */ break; case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(data_set, node, reason); free(reason); break; case action_fail_standby: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case action_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); break; case action_fail_migrate: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set); break; case action_fail_stop: rsc->next_role = RSC_ROLE_STOPPED; break; case action_fail_recover: if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { set_bit(rsc->flags, pe_rsc_failed); stop_action(rsc, node, FALSE); } break; case action_fail_restart_container: set_bit(rsc->flags, pe_rsc_failed); if (rsc->container && pe_rsc_is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ data_set->stop_needed = g_list_prepend(data_set->stop_needed, rsc->container); } else if (rsc->container) { stop_action(rsc->container, node, FALSE); } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { stop_action(rsc, node, FALSE); } break; case action_fail_reset_remote: set_bit(rsc->flags, pe_rsc_failed); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); } if (tmpnode && pe__is_remote_node(tmpnode) && tmpnode->details->remote_was_fenced == 0) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(data_set, tmpnode, "remote connection is unrecoverable"); } } /* require the stop action regardless if fencing is occurring or not. */ if (rsc->role > RSC_ROLE_STOPPED) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->remote_reconnect_ms) { rsc->next_role = RSC_ROLE_STOPPED; } break; } /* ensure a remote-node connection failure forces an unclean remote-node * to be fenced. By setting unseen = FALSE, the remote-node failure will * result in a fencing operation regardless if we're going to attempt to * reconnect to the remote-node in this transition or not. */ if (is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); if (tmpnode && tmpnode->details->unclean) { tmpnode->details->unseen = FALSE; } } if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { if (is_set(rsc->flags, pe_rsc_orphan)) { if (is_set(rsc->flags, pe_rsc_managed)) { crm_config_warn("Detected active orphan %s running on %s", rsc->id, node->details->uname); } else { crm_config_warn("Cluster configured not to stop active orphans." " %s must be stopped manually on %s", rsc->id, node->details->uname); } } native_add_running(rsc, node, data_set); if (on_fail != action_fail_ignore) { set_bit(rsc->flags, pe_rsc_failed); } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); free(rsc->clone_name); rsc->clone_name = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP, FALSE); GListPtr gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { action_t *stop = (action_t *) gIter->data; stop->flags |= pe_action_optional; } g_list_free(possible_matches); } } /* create active recurring operations as optional */ static void process_recurring(node_t * node, resource_t * rsc, int start_index, int stop_index, GListPtr sorted_op_list, pe_working_set_t * data_set) { int counter = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = ID(rsc_op); const char *interval_ms_s = NULL; counter++; if (node->details->online == FALSE) { pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname); continue; } else if (counter < start_index) { pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter); continue; } interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); if (interval_ms == 0) { pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (safe_str_eq(status, "-1")) { pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = generate_op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname); custom_action(rsc, key, task, node, TRUE, TRUE, data_set); } } void calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; *stop_index = -1; *start_index = -1; for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (safe_str_eq(task, CRMD_ACTION_STOP) && safe_str_eq(status, "0")) { *stop_index = counter; } else if (safe_str_eq(task, CRMD_ACTION_START) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && safe_str_eq(task, CRMD_ACTION_STATUS)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if (safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) { implied_monitor_start = counter; } } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE) || safe_str_eq(task, CRMD_ACTION_DEMOTE)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } static resource_t * unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set) { GListPtr gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; const char *task = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; xmlNode *migrate_op = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum action_fail_response on_fail = FALSE; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; crm_trace("[%s] Processing %s on %s", crm_element_name(rsc_entry), rsc_id, node->details->uname); /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } /* find the resource */ rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); if (rsc == NULL) { rsc = process_orphan_resource(rsc_entry, node, data_set); } CRM_ASSERT(rsc != NULL); /* process operations */ saved_role = rsc->role; on_fail = action_fail_ignore; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { migrate_op = rsc_op; } unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail, migrate_op, data_set); if (get_target_role(rsc, &req_role)) { if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); rsc->next_role = req_role; } else if (req_role > rsc->next_role) { pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if (saved_role > rsc->role) { rsc->role = saved_role; } return rsc; } static void handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child_element(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { resource_t *rsc; resource_t *container; const char *rsc_id; const char *container_id; if (safe_str_neq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE)) { continue; } container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER); rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); if (container_id == NULL || rsc_id == NULL) { continue; } container = pe_find_resource(data_set->resources, container_id); if (container == NULL) { continue; } rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL || is_set(rsc->flags, pe_rsc_orphan_container_filler) == FALSE || rsc->container != NULL) { continue; } pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s", rsc->id, container_id); rsc->container = container; container->fillers = g_list_append(container->fillers, rsc); } } gboolean unpack_lrm_resources(node_t * node, xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; gboolean found_orphaned_container_filler = FALSE; CRM_CHECK(node != NULL, return FALSE); crm_trace("Unpacking resources on %s", node->details->uname); for (rsc_entry = __xml_first_child_element(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); if (!rsc) { continue; } if (is_set(rsc->flags, pe_rsc_orphan_container_filler)) { found_orphaned_container_filler = TRUE; } } } /* now that all the resource state has been unpacked for this node * we have to go back and map any orphaned container fillers to their * container resource */ if (found_orphaned_container_filler) { handle_orphaned_container_fillers(lrm_rsc_list, data_set); } return TRUE; } static void set_active(resource_t * rsc) { resource_t *top = uber_parent(rsc); if (top && is_set(top->flags, pe_rsc_promotable)) { rsc->role = RSC_ROLE_SLAVE; } else { rsc->role = RSC_ROLE_STARTED; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { node_t *node = value; int *score = user_data; node->weight = *score; } #define STATUS_PATH_MAX 1024 static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, bool success_only, pe_working_set_t *data_set) { int offset = 0; char xpath[STATUS_PATH_MAX]; xmlNode *xml = NULL; offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node); offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']", resource); /* Need to check against transition_magic too? */ if (source && safe_str_eq(op, CRMD_ACTION_MIGRATE)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op, source); } else if (source && safe_str_eq(op, CRMD_ACTION_MIGRATED)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op, source); } else { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op); } CRM_LOG_ASSERT(offset > 0); xml = get_xpath_object(xpath, data_set->input, LOG_DEBUG); if (xml && success_only) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_LRM_OP_ERROR; crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status); if ((rc != PCMK_OCF_OK) || (status != PCMK_LRM_OP_DONE)) { return NULL; } } return xml; } static int pe__call_id(xmlNode *op_xml) { int id = 0; if (op_xml) { crm_element_value_int(op_xml, XML_LRM_ATTR_CALLID, &id); } return id; } /*! * \brief Check whether a stop happened on the same node after some event * * \param[in] rsc Resource being checked * \param[in] node Node being checked * \param[in] xml_op Event that stop is being compared to * \param[in] data_set Cluster working set * * \return TRUE if stop happened after event, FALSE otherwise * * \note This is really unnecessary, but kept as a safety mechanism. We * currently don't save more than one successful event in history, so this * only matters when processing really old CIB files that we don't * technically support anymore, or as preparation for logging an extended * history in the future. */ static bool stop_happened_after(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->uname, NULL, TRUE, data_set); return (stop_op && (pe__call_id(stop_op) > pe__call_id(xml_op))); } static void unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { /* A successful migration sequence is: * migrate_to on source node * migrate_from on target node * stop on source node * * If a migrate_to is followed by a stop, the entire migration (successful * or failed) is complete, and we don't care what happened on the target. * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from failed, make sure the resource gets * stopped on both source and target (if up). * * If the migrate_to and migrate_from both succeeded (which also implies the * resource is no longer running on the source), but there is no stop, the * migration is considered to be "dangling". Schedule a stop on the source * in this case. */ int from_rc = 0; int from_status = 0; pe_node_t *target_node = NULL; pe_node_t *source_node = NULL; xmlNode *migrate_from = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(source, node->details->uname), return); if (stop_happened_after(rsc, node, xml_op, data_set)) { return; } // Clones are not allowed to migrate, so role can't be master rsc->role = RSC_ROLE_STARTED; target_node = pe_find_node(data_set->nodes, target); source_node = pe_find_node(data_set->nodes, source); // Check whether there was a migrate_from action on the target migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target, source, FALSE, data_set); if (migrate_from) { crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc); crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status); pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d", ID(migrate_from), target, from_status, from_rc); } if (migrate_from && from_rc == PCMK_OCF_OK && from_status == PCMK_LRM_OP_DONE) { /* The migrate_to and migrate_from both succeeded, so mark the migration * as "dangling". This will be used to schedule a stop action on the * source without affecting the target. */ pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op), source); rsc->role = RSC_ROLE_STOPPED; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } else if (migrate_from && (from_status != PCMK_LRM_OP_PENDING)) { // Failed if (target_node && target_node->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node, target_node->details->online); native_add_running(rsc, target_node, data_set); } } else { // Pending, or complete but erased if (target_node && target_node->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node, target_node->details->online); native_add_running(rsc, target_node, data_set); if (source_node && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ rsc->partial_migration_target = target_node; rsc->partial_migration_source = source_node; } } else { /* Consider it failed here - forces a restart, prevents migration */ set_bit(rsc->flags, pe_rsc_failed); clear_bit(rsc->flags, pe_rsc_allow_migrate); } } } static void unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { int target_stop_id = 0; int target_migrate_from_id = 0; xmlNode *target_stop = NULL; xmlNode *target_migrate_from = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(source, node->details->uname), return); /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be master. */ rsc->role = RSC_ROLE_STARTED; // Check for stop on the target target_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, target, NULL, TRUE, data_set); target_stop_id = pe__call_id(target_stop); // Check for migrate_from on the target target_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target, source, TRUE, data_set); target_migrate_from_id = pe__call_id(target_migrate_from); if ((target_stop == NULL) || (target_stop_id < target_migrate_from_id)) { /* There was no stop on the source, or a stop that happened before a * migrate_from, so assume the resource is still active on the target * (if it is up). */ node_t *target_node = pe_find_node(data_set->nodes, target); pe_rsc_trace(rsc, "stop (%d) + migrate_from (%d)", target_stop_id, target_migrate_from_id); if (target_node && target_node->details->online) { native_add_running(rsc, target_node, data_set); } } else if (target_migrate_from == NULL) { /* We know there was a stop on the target, but there may not have been a * migrate_from (the stop could have happened before migrate_from was * scheduled or attempted). * * That means this could be a "dangling" migration. But first, check * whether there is a newer migrate_from or start on the source node -- * it's possible the failed migration was followed by a successful * full restart or migration in the reverse direction, in which case we * don't want to force it to stop. */ xmlNode *source_migrate_from = NULL; xmlNode *source_start = NULL; int source_migrate_to_id = pe__call_id(xml_op); source_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, source, NULL, TRUE, data_set); if (pe__call_id(source_migrate_from) > source_migrate_to_id) { return; } source_start = find_lrm_op(rsc->id, CRMD_ACTION_START, source, NULL, TRUE, data_set); if (pe__call_id(source_start) > source_migrate_to_id) { return; } // Mark node as having dangling migration so we can force a stop later rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } } static void unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *source_stop = NULL; xmlNode *source_migrate_to = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(target, node->details->uname), return); /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be master. */ rsc->role = RSC_ROLE_STARTED; // Check for a stop on the source source_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, source, NULL, TRUE, data_set); // Check for a migrate_to on the source source_migrate_to = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, source, target, TRUE, data_set); if ((source_stop == NULL) || (pe__call_id(source_stop) < pe__call_id(source_migrate_to))) { /* There was no stop on the source, or a stop that happened before * migrate_to, so assume the resource is still active on the source (if * it is up). */ pe_node_t *source_node = pe_find_node(data_set->nodes, source); if (source_node && source_node->details->online) { native_add_running(rsc, source_node, data_set); } } } static void record_failed_op(xmlNode *op, const pe_node_t *node, const pe_resource_t *rsc, pe_working_set_t *data_set) { xmlNode *xIter = NULL; const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY); if (node->details->online == FALSE) { return; } for (xIter = data_set->failed->children; xIter; xIter = xIter->next) { const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY); const char *uname = crm_element_value(xIter, XML_ATTR_UNAME); if(safe_str_eq(op_key, key) && safe_str_eq(uname, node->details->uname)) { crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname); return; } } crm_trace("Adding entry %s on %s", op_key, node->details->uname); crm_xml_add(op, XML_ATTR_UNAME, node->details->uname); crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id); add_node_copy(data_set->failed, op); } static const char *get_op_key(xmlNode *xml_op) { const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); if(key == NULL) { key = ID(xml_op); } return key; } static const char * last_change_str(xmlNode *xml_op) { time_t when; const char *when_s = NULL; if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &when) == pcmk_ok) { when_s = crm_now_string(&when); if (when_s) { // Skip day of week to make message shorter when_s = strchr(when_s, ' '); if (when_s) { ++when_s; } } } return ((when_s && *when_s)? when_s : "unknown time"); } static void unpack_rsc_op_failure(resource_t * rsc, node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = false; action_t *action = NULL; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); CRM_ASSERT(rsc); CRM_CHECK(task != NULL, return); *last_failure = xml_op; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) { is_probe = true; } if (exit_reason == NULL) { exit_reason = ""; } if (is_not_set(data_set->flags, pe_flag_symmetric_cluster) && (rc == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " rc=%d id=%s", services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, (is_probe? "probe" : task), rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); } else { crm_warn("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " rc=%d id=%s", services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, (is_probe? "probe" : task), rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); if (is_probe && (rc != PCMK_OCF_OK) && (rc != PCMK_OCF_NOT_RUNNING) && (rc != PCMK_OCF_RUNNING_MASTER)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the resource-discovery option for location constraints", rsc->id, node->details->uname); } record_failed_op(xml_op, node, rsc, data_set); } action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) || (action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) || (action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) || (*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) { pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), fail2text(action->on_fail), action->uuid, key); *on_fail = action->on_fail; } if (!strcmp(task, CRMD_ACTION_STOP)) { resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set); } else if (!strcmp(task, CRMD_ACTION_MIGRATE)) { unpack_migrate_to_failure(rsc, node, xml_op, data_set); } else if (!strcmp(task, CRMD_ACTION_MIGRATED)) { unpack_migrate_from_failure(rsc, node, xml_op, data_set); } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (!strcmp(task, CRMD_ACTION_DEMOTE)) { if (action->on_fail == action_fail_block) { rsc->role = RSC_ROLE_MASTER; rsc->next_role = RSC_ROLE_STOPPED; } else if(rc == PCMK_OCF_NOT_RUNNING) { rsc->role = RSC_ROLE_STOPPED; } else { /* Staying in master role would put the scheduler and controller * into a loop. Setting slave role is not dangerous because the * resource will be stopped as part of recovery, and any master * promotion will be ordered after that stop. */ rsc->role = RSC_ROLE_SLAVE; } } if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) { /* leave stopped */ pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id); rsc->role = RSC_ROLE_STOPPED; } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "Setting %s active", rsc->id); set_active(rsc); } pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s", rsc->id, role2text(rsc->role), node->details->unclean ? "true" : "false", fail2text(action->on_fail), role2text(action->fail_role)); if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { rsc->next_role = action->fail_role; } if (action->fail_role == RSC_ROLE_STOPPED) { int score = -INFINITY; resource_t *fail_rsc = rsc; if (fail_rsc->parent) { resource_t *parent = uber_parent(fail_rsc); if (pe_rsc_is_clone(parent) && is_not_set(parent->flags, pe_rsc_unique)) { /* For clone resources, if a child fails on an operation * with on-fail = stop, all the resources fail. Do this by preventing * the parent from coming up again. */ fail_rsc = parent; } } crm_notice("%s will not be started under current conditions", fail_rsc->id); /* make sure it doesn't come up again */ if (fail_rsc->allowed_nodes != NULL) { g_hash_table_destroy(fail_rsc->allowed_nodes); } fail_rsc->allowed_nodes = node_hash_from_list(data_set->nodes); g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score); } pe_free_action(action); } /*! * \internal * \brief Remap operation status based on action result * * Given an action result, determine an appropriate operation status for the * purposes of responding to the action (the status provided by the executor is * not directly usable since the executor does not know what was expected). * * \param[in,out] rsc Resource that operation history entry is for * \param[in] rc Actual return code of operation * \param[in] target_rc Expected return code of operation * \param[in] node Node where operation was executed * \param[in] xml_op Operation history entry XML from CIB status * \param[in,out] on_fail What should be done about the result * \param[in] data_set Current cluster working set * * \return Operation status based on return code and action info * \note This may update the resource's current and next role. */ static int determine_op_status( resource_t *rsc, int rc, int target_rc, node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = false; int result = PCMK_LRM_OP_DONE; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); CRM_ASSERT(rsc); CRM_CHECK(task != NULL, return PCMK_LRM_OP_ERROR); if (exit_reason == NULL) { exit_reason = ""; } crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) { is_probe = true; task = "probe"; } if (target_rc < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * target_rc in the transition key, which (along with the similar case * of a corrupted transition key in the CIB) will be reported to this * function as -1. Pacemaker 2.0+ does not support rolling upgrades from * those versions or processing of saved CIB files from those versions, * so we do not need to care much about this case. */ result = PCMK_LRM_OP_ERROR; crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)", key, node->details->uname); } else if (target_rc != rc) { result = PCMK_LRM_OP_ERROR; pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", key, node->details->uname, target_rc, services_ocf_exitcode_str(target_rc), rc, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason); } switch (rc) { case PCMK_OCF_OK: if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Probe found %s active on %s at %s", rsc->id, node->details->uname, last_change_str(xml_op)); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || target_rc == rc || is_not_set(rsc->flags, pe_rsc_managed)) { result = PCMK_LRM_OP_DONE; rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } break; case PCMK_OCF_RUNNING_MASTER: if (is_probe && (rc != target_rc)) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Probe found %s active and promoted on %s at %s", rsc->id, node->details->uname, last_change_str(xml_op)); } rsc->role = RSC_ROLE_MASTER; break; case PCMK_OCF_DEGRADED_MASTER: case PCMK_OCF_FAILED_MASTER: rsc->role = RSC_ROLE_MASTER; result = PCMK_LRM_OP_ERROR; break; case PCMK_OCF_NOT_CONFIGURED: result = PCMK_LRM_OP_ERROR_FATAL; break; case PCMK_OCF_UNIMPLEMENT_FEATURE: if (interval_ms > 0) { result = PCMK_LRM_OP_NOTSUPPORTED; break; } // fall through case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!pe_can_fence(data_set, node) && !strcmp(task, CRMD_ACTION_STOP)) { /* If a stop fails and we can't fence, there's nothing else we can do */ pe_proc_err("No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " CRM_XS " rc=%d id=%s", rsc->id, task, node->details->uname, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, last_change_str(xml_op), rc, ID(xml_op)); clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); } result = PCMK_LRM_OP_ERROR_HARD; break; default: if (result == PCMK_LRM_OP_DONE) { crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", rc, task, rsc->id, node->details->uname, last_change_str(xml_op)); result = PCMK_LRM_OP_ERROR; } break; } return result; } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(xmlNode *xml_op, const char *task, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { if (!strcmp(task, "start") || !strcmp(task, "monitor")) { if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pe__add_param_check(xml_op, rsc, node, pe_check_last_failure, data_set); } else { op_digest_cache_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, get_op_key(xml_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn, pe_working_set_t *data_set) { pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id); if (remote_node) { pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, data_set); order_actions(fence, action, pe_order_implies_then); } } static bool should_ignore_failure_timeout(pe_resource_t *rsc, xmlNode *xml_op, const char *task, guint interval_ms, bool is_last_failure, pe_working_set_t *data_set) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if (rsc->remote_reconnect_ms && is_set(data_set->flags, pe_flag_stonith_enabled) && (interval_ms != 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); if (remote_node && !remote_node->details->remote_was_fenced) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in] rsc Resource that operation happened to * \param[in] node Node that operation happened on * \param[in] rc Actual result of operation * \param[in] xml_op Operation history entry XML * \param[in] data_set Current working set * * \return TRUE if operation history entry is expired, FALSE otherwise */ static bool check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc, xmlNode *xml_op, pe_working_set_t *data_set) { bool expired = FALSE; bool is_last_failure = crm_ends_with(ID(xml_op), "_last_failure_0"); time_t last_run = 0; guint interval_ms = 0; int unexpired_fail_count = 0; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *clear_reason = NULL; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((rsc->failure_timeout > 0) && (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_run) == 0)) { // Resource has a failure-timeout, and history entry has a timestamp time_t now = get_effective_time(data_set); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + rsc->failure_timeout)) && !should_ignore_failure_timeout(rsc, xml_op, task, interval_ms, is_last_failure, data_set)) { expired = TRUE; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(node, rsc, &last_failure, pe_fc_effective, xml_op, data_set); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds" " last-failure@%lld", ID(xml_op), (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, rsc->failure_timeout, (long long) last_failure); last_failure += rsc->failure_timeout + 1; if (unexpired_fail_count && (now < last_failure)) { pe__update_recheck_time(last_failure, data_set); } } if (expired) { if (pe_get_failcount(node, rsc, NULL, pe_fc_default, xml_op, data_set)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ expired = FALSE; } } else if (is_last_failure && rsc->remote_reconnect_ms) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(xml_op, task, rsc, node, data_set)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { // Schedule clearing of the fail count pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason, data_set); if (is_set(data_set->flags, pe_flag_stonith_enabled) && rsc->remote_reconnect_ms) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until * after unpack_node_loop() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", task, rsc->id); order_after_remote_fencing(clear_op, rsc, data_set); } } if (expired && (interval_ms == 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { switch(rc) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: // Don't expire probes that return these values expired = FALSE; break; } } return expired; } int pe__target_rc_from_xml(xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } static enum action_fail_response get_action_on_fail(resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) { int result = action_fail_recover; action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); result = action->on_fail; pe_free_action(action); return result; } static void update_resource_state(resource_t * rsc, node_t * node, xmlNode * xml_op, const char * task, int rc, xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { gboolean clear_past_failure = FALSE; CRM_ASSERT(rsc); CRM_ASSERT(xml_op); if (rc == PCMK_OCF_NOT_RUNNING) { clear_past_failure = TRUE; } else if (rc == PCMK_OCF_NOT_INSTALLED) { rsc->role = RSC_ROLE_STOPPED; } else if (safe_str_eq(task, CRMD_ACTION_STATUS)) { if (last_failure) { const char *op_key = get_op_key(xml_op); const char *last_failure_key = get_op_key(last_failure); if (safe_str_eq(op_key, last_failure_key)) { clear_past_failure = TRUE; } } if (rsc->role < RSC_ROLE_STARTED) { set_active(rsc); } } else if (safe_str_eq(task, CRMD_ACTION_START)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_STOP)) { rsc->role = RSC_ROLE_STOPPED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { /* Demote from Master does not clear an error */ rsc->role = RSC_ROLE_SLAVE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) { unpack_migrate_to_success(rsc, node, xml_op, data_set); } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname); set_active(rsc); } /* clear any previous failure actions */ if (clear_past_failure) { switch (*on_fail) { case action_fail_stop: case action_fail_fence: case action_fail_migrate: case action_fail_standby: pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop", rsc->id, fail2text(*on_fail)); break; case action_fail_block: case action_fail_ignore: case action_fail_recover: case action_fail_restart_container: *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; break; case action_fail_reset_remote: if (rsc->remote_reconnect_ms == 0) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } break; } } } /*! * \internal * \brief Remap informational monitor results to usual values * * Certain OCF result codes are for providing extended information to the * user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by pacemaker. * * \param[in] rc Actual result of a monitor action * \param[in] xml_op Operation history XML * \param[in] node Node that operation happened on * \param[in] rsc Resource that operation happened to * \param[in] data_set Cluster working set * * \return Result code that pacemaker should use * * \note If the result is remapped, and the node is not shutting down or failed, * the operation will be recorded in the data set's list of failed * operations, to highlight it for the user. */ static int remap_monitor_rc(int rc, xmlNode *xml_op, const pe_node_t *node, const pe_resource_t *rsc, pe_working_set_t *data_set) { int remapped_rc = rc; switch (rc) { case PCMK_OCF_DEGRADED: remapped_rc = PCMK_OCF_OK; break; case PCMK_OCF_DEGRADED_MASTER: remapped_rc = PCMK_OCF_RUNNING_MASTER; break; default: break; } if (rc != remapped_rc) { crm_trace("Remapping monitor result %d to %d", rc, remapped_rc); if (!node->details->shutdown || node->details->online) { record_failed_op(xml_op, node, rsc, data_set); } } return remapped_rc; } static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *on_fail, pe_working_set_t *data_set) { int rc = 0; int task_id = 0; int target_rc = 0; int status = PCMK_LRM_OP_UNKNOWN; guint interval_ms = 0; const char *task = NULL; const char *task_key = NULL; const char *exit_reason = NULL; bool expired = FALSE; resource_t *parent = rsc; enum action_fail_response failure_strategy = action_fail_recover; CRM_CHECK(rsc && node && xml_op, return); target_rc = pe__target_rc_from_xml(xml_op); task_key = get_op_key(xml_op); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); if (exit_reason == NULL) { exit_reason = ""; } crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id); crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); CRM_CHECK(task != NULL, return); CRM_CHECK(status <= PCMK_LRM_OP_INVALID, return); CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return); if (!strcmp(task, CRMD_ACTION_NOTIFY) || !strcmp(task, CRMD_ACTION_METADATA)) { /* safe to ignore these */ return; } if (is_not_set(rsc->flags, pe_rsc_unique)) { parent = uber_parent(rsc); } pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)", task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role)); if (node->details->unclean) { pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean." " Further action depends on the value of the stop's on-fail attribute", node->details->uname, rsc->id); } /* It should be possible to call remap_monitor_rc() first then call * check_operation_expiry() only if rc != target_rc, because there should * never be a fail count without at least one unexpected result in the * resource history. That would be more efficient by avoiding having to call * check_operation_expiry() for expected results. * * However, we do have such configurations in the scheduler regression * tests, even if it shouldn't be possible with the current code. It's * probably a good idea anyway, but that would require updating the test * inputs to something currently possible. */ if ((status != PCMK_LRM_OP_NOT_INSTALLED) && check_operation_expiry(rsc, node, rc, xml_op, data_set)) { expired = TRUE; } if (!strcmp(task, CRMD_ACTION_STATUS)) { rc = remap_monitor_rc(rc, xml_op, node, rsc, data_set); } if (expired && (rc != target_rc)) { const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC); if (interval_ms == 0) { crm_notice("Ignoring expired %s failure on %s " CRM_XS " actual=%d expected=%d magic=%s", task_key, node->details->uname, rc, target_rc, magic); goto done; } else if(node->details->online && node->details->unclean == FALSE) { /* Reschedule the recurring monitor. CancelXmlOp() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so check_action_definition() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has an op-digest (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s after failure expired on %s " CRM_XS " actual=%d expected=%d magic=%s", task_key, node->details->uname, rc, target_rc, magic); crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout"); goto done; } } /* If the executor reported an operation status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) { status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set); pe_rsc_trace(rsc, "Remapped %s status to %d", task_key, status); } switch (status) { case PCMK_LRM_OP_CANCELLED: // Should never happen pe_err("Resource history contains cancellation '%s' " "(%s of %s on %s at %s)", ID(xml_op), task, rsc->id, node->details->uname, last_change_str(xml_op)); break; case PCMK_LRM_OP_PENDING: if (!strcmp(task, CRMD_ACTION_START)) { set_bit(rsc->flags, pe_rsc_start_pending); set_active(rsc); } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (!strcmp(task, CRMD_ACTION_MIGRATE) && node->details->unclean) { /* If a pending migrate_to action is out on a unclean node, * we have to force the stop action on the target. */ const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); node_t *target = pe_find_node(data_set->nodes, migrate_target); if (target) { stop_action(rsc, target, FALSE); } } if (rsc->pending_task == NULL) { if ((interval_ms != 0) || strcmp(task, CRMD_ACTION_STATUS)) { rsc->pending_task = strdup(task); rsc->pending_node = node; } else { /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, enable the below and the corresponding part of * native.c:native_pending_task(). */ #if 0 rsc->pending_task = strdup("probe"); rsc->pending_node = node; #endif } } break; case PCMK_LRM_OP_DONE: pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s", task, rsc->id, node->details->uname, last_change_str(xml_op), ID(xml_op)); update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_INSTALLED: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if (failure_strategy == action_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " CRM_XS " status=%d rc=%d id=%s", task, rsc->id, node->details->uname, status, rc, ID(xml_op)); /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */ *on_fail = action_fail_migrate; } resource_location(parent, node, -INFINITY, "hard-error", data_set); unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_CONNECTED: if (pe__is_guest_or_remote_node(node) && is_set(node->details->remote_rsc->flags, pe_rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ set_bit(node->details->remote_rsc->flags, pe_rsc_failed); } // fall through case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_ERROR_HARD: case PCMK_LRM_OP_ERROR_FATAL: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_INVALID: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if ((failure_strategy == action_fail_ignore) || (failure_strategy == action_fail_restart_container && !strcmp(task, CRMD_ACTION_STOP))) { crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s " "succeeded " CRM_XS " rc=%d id=%s", task, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); set_bit(rsc->flags, pe_rsc_failure_ignored); record_failed_op(xml_op, node, rsc, data_set); if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); if(status == PCMK_LRM_OP_ERROR_HARD) { do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s)" CRM_XS " rc=%d id=%s", parent->id, node->details->uname, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rc, ID(xml_op)); resource_location(parent, node, -INFINITY, "hard-error", data_set); } else if(status == PCMK_LRM_OP_ERROR_FATAL) { crm_err("Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " CRM_XS " rc=%d id=%s", parent->id, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rc, ID(xml_op)); resource_location(parent, NULL, -INFINITY, "fatal-error", data_set); } } break; } done: pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s", rsc->id, task, role2text(rsc->role), role2text(rsc->next_role)); } static void add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, pe_working_set_t *data_set) { const char *cluster_name = NULL; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID), strdup(node->details->id)); if (safe_str_eq(node->details->id, data_set->dc_uuid)) { data_set->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE)); } cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name"); if (cluster_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME), strdup(cluster_name)); } pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, NULL, node->details->attrs, NULL, overwrite, data_set); if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { const char *site_name = pe_node_attribute_raw(node, "site-name"); if (site_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(site_name)); } else if (cluster_name) { /* Default to cluster-name if unset */ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(cluster_name)); } } } static GListPtr extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GListPtr gIter = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", ID(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter, pe_working_set_t * data_set) { GListPtr output = NULL; GListPtr intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE); node_t *this_node = NULL; xmlNode *node_state = NULL; for (node_state = __xml_first_child_element(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { const char *uname = crm_element_value(node_state, XML_ATTR_UNAME); if (node != NULL && safe_str_neq(uname, node)) { continue; } this_node = pe_find_node(data_set->nodes, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pe__is_guest_or_remote_node(this_node)) { determine_remote_online_status(data_set, this_node); } else { determine_online_status(node_state, this_node, data_set); } if (this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); for (lrm_rsc = __xml_first_child_element(tmp); lrm_rsc != NULL; lrm_rsc = __xml_next_element(lrm_rsc)) { if (crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) { const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if (rsc != NULL && safe_str_neq(rsc_id, rsc)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index 6f3f48443d..586d92c5b6 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2597 +1,2597 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled); #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t * pe_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; CRM_CHECK(action != NULL, return NULL); if (action->action_details == NULL) { action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); CRM_CHECK(action->action_details != NULL, return NULL); } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters == NULL) { details->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); } if (details->versioned_meta == NULL) { details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); } return details; } static void pe_free_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; if ((action == NULL) || (action->action_details == NULL)) { return; } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters) { free_xml(details->versioned_parameters); } if (details->versioned_meta) { free_xml(details->versioned_meta); } action->action_details = NULL; } #endif /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return true if node can be fenced, false otherwise */ bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node) { if (pe__is_guest_node(node)) { /* Guest nodes are fenced by stopping their container resource. We can * do that if the container's host is either online or fenceable. */ pe_resource_t *rsc = node->details->remote_rsc->container; for (GList *n = rsc->running_on; n != NULL; n = n->next) { pe_node_t *container_node = n->data; if (!container_node->details->online && !pe_can_fence(data_set, container_node)) { return false; } } return true; } else if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) { return false; /* Turned off */ } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) { return false; /* No devices */ } else if (is_set(data_set->flags, pe_flag_have_quorum)) { return true; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return true; } else if(node == NULL) { return false; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return true; } crm_trace("Cannot fence %s", node->details->uname); return false; } node_t * node_copy(const node_t *this_node) { node_t *new_node = NULL; CRM_CHECK(this_node != NULL, return NULL); new_node = calloc(1, sizeof(node_t)); CRM_ASSERT(new_node != NULL); crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = merge_weights(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { node_t *new_node = node_copy(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } GHashTable * node_hash_from_list(GListPtr list) { GListPtr gIter = list; GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *n = node_copy(node); g_hash_table_insert(result, (gpointer) n->details->id, n); } return result; } GListPtr node_list_dup(GListPtr list1, gboolean reset, gboolean filter) { GListPtr result = NULL; GListPtr gIter = list1; for (; gIter != NULL; gIter = gIter->next) { node_t *new_node = NULL; node_t *this_node = (node_t *) gIter->data; if (filter && this_node->weight < 0) { continue; } new_node = node_copy(this_node); if (reset) { new_node->weight = 0; } if (new_node != NULL) { result = g_list_prepend(result, new_node); } } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { const char *name_a = ((const node_t *) a)->details->uname; const char *name_b = ((const node_t *) b)->details->uname; while (*name_a && *name_b) { if (isdigit(*name_a) && isdigit(*name_b)) { // If node names contain a number, sort numerically char *end_a = NULL; char *end_b = NULL; long num_a = strtol(name_a, &end_a, 10); long num_b = strtol(name_b, &end_b, 10); // allow ordering e.g. 007 > 7 size_t len_a = end_a - name_a; size_t len_b = end_b - name_b; if (num_a < num_b) { return -1; } else if (num_a > num_b) { return 1; } else if (len_a < len_b) { return -1; } else if (len_a > len_b) { return 1; } name_a = end_a; name_b = end_b; } else { // Compare non-digits case-insensitively int lower_a = tolower(*name_a); int lower_b = tolower(*name_b); if (lower_a < lower_b) { return -1; } else if (lower_a > lower_b) { return 1; } ++name_a; ++name_b; } } if (!*name_a && *name_b) { return -1; } else if (*name_a && !*name_b) { return 1; } return 0; } void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes) { GHashTable *hash = nodes; GHashTableIter iter; node_t *node = NULL; if (level == LOG_NEVER) { return; } if (rsc) { hash = rsc->allowed_nodes; } if (rsc && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't show the allocation scores for orphans */ return; } if (level == LOG_STDOUT) { char score[128]; int len = sizeof(score); /* For now we want this in sorted order to keep the regression tests happy */ GListPtr gIter = NULL; GListPtr list = g_hash_table_get_values(hash); list = g_list_sort(list, sort_node_uname); gIter = list; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } else if (hash) { char score[128]; int len = sizeof(score); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { do_crm_log_alias(LOG_TRACE, file, function, line, "%s: %s allocation score on %s: %s", comment, rsc->id, node->details->uname, score); } else { do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment, node->details->uname, score); } } } if (rsc && rsc->children) { GListPtr gIter = NULL; gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; dump_node_scores_worker(level, file, function, line, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; char *new_text = crm_strdup_printf("%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, node_t * node) { char *dump_text = crm_strdup_printf("%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == LOG_STDOUT) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node) { char *dump_text = crm_strdup_printf("%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); switch (level) { case LOG_STDOUT: fprintf(stdout, "%s\n", dump_text); break; case LOG_NEVER: break; default: crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } action_t * custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { - if (g_list_length(possible_matches) > 1) { + if (pcmk__list_of_multiple(possible_matches)) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found existing action %d (%s) for %s (%s) on %s", action->id, action->uuid, (rsc? rsc->id : "no resource"), task, (on_node? on_node->details->uname : "no node")); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating %s action %d: %s for %s (%s) on %s", (optional? "optional" : " mandatory"), data_set->action_id, key, (rsc? rsc->id : "no resource"), task, (on_node? on_node->details->uname : "no node")); } action = calloc(1, sizeof(action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; CRM_ASSERT(task != NULL); action->task = strdup(task); if (on_node) { action->node = node_copy(on_node); } action->uuid = strdup(key); pe_set_action_bit(action, pe_action_runnable); if (optional) { pe_set_action_bit(action, pe_action_optional); } else { pe_clear_action_bit(action, pe_action_optional); } action->extra = crm_str_table_new(); action->meta = crm_str_table_new(); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } if (save_action) { pe_rsc_trace(rsc, "Action %d created", action->id); } } if (!optional && is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Unset optional on action %d", action->id); pe_clear_action_bit(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { pe_set_action_bit(action, pe_action_have_node_attrs); pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, action->node->details->attrs, action->extra, NULL, FALSE, data_set); } if (is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid); pe_clear_action_bit(action, pe_action_runnable); } else if (is_not_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS) == NULL) { crm_debug("Action %s (unmanaged)", action->uuid); pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); /* action->runnable = FALSE; */ } else if (action->node->details->online == FALSE && (!pe__is_guest_node(action->node) || action->node->details->remote_requires_reset)) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)", action->uuid, action->node->details->uname); if (is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable"); } } else if (action->node->details->pending) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid); pe_action_set_reason(action, NULL, TRUE); if (pe__is_guest_node(action->node) && !pe_can_fence(data_set, action->node)) { /* An action that requires nothing usually does not require any * fencing in order to be runnable. However, there is an * exception: an action cannot be completed if it is on a guest * node whose host is unclean and cannot be fenced. */ pe_clear_action_bit(action, pe_action_runnable); crm_debug("%s\t%s (cancelled : host cannot be fenced)", action->node->details->uname, action->uuid); } else { pe_set_action_bit(action, pe_action_runnable); } #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_stop) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)", action->node->details->uname, action->uuid); } } else if(is_not_set(action->flags, pe_action_runnable)) { pe_rsc_trace(rsc, "Action %s is runnable", action->uuid); //pe_action_set_reason(action, NULL, TRUE); pe_set_action_bit(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: set_bit(rsc->flags, pe_rsc_stopping); break; case start_rsc: clear_bit(rsc->flags, pe_rsc_starting); if (is_set(action->flags, pe_action_runnable)) { set_bit(rsc->flags, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static const char * unpack_operation_on_fail(action_t * action) { const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id); return NULL; } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval_spec = NULL; const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = __xml_first_child_element(action->rsc->ops_xml); operation && !value; operation = __xml_next_element(operation)) { if (!crm_str_eq((const char *)operation->name, "op", TRUE)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) { continue; } else if (crm_parse_interval_spec(interval_spec) == 0) { continue; } value = on_fail; } } return value; } static xmlNode * find_min_interval_mon(resource_t * rsc, gboolean include_disabled) { guint interval_ms = 0; guint min_interval_ms = G_MAXUINT; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (safe_str_neq(name, RSC_STATUS)) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms && (interval_ms < min_interval_ms)) { min_interval_ms = interval_ms; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } // true if value contains valid, non-NULL interval origin for recurring op static bool unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms, crm_time_t *now, long long *start_delay) { long long result = 0; guint interval_sec = interval_ms / 1000; crm_time_t *origin = NULL; // Ignore unspecified values and non-recurring operations if ((value == NULL) || (interval_ms == 0) || (now == NULL)) { return false; } // Parse interval origin from text origin = crm_time_new(value); if (origin == NULL) { crm_config_err("Operation '%s' contains invalid " XML_OP_ATTR_ORIGIN " '%s'", (ID(xml_obj)? ID(xml_obj) : "(unspecified)"), value); return false; } // Get seconds since origin (negative if origin is in the future) result = crm_time_get_seconds(now) - crm_time_get_seconds(origin); crm_time_free(origin); // Calculate seconds from closest interval to now result = result % interval_sec; // Calculate seconds remaining until next interval result = ((result <= 0)? 0 : interval_sec) - result; crm_info("Calculated a start delay of %llds for operation '%s'", result, (ID(xml_obj)? ID(xml_obj) : "(unspecified)")); if (start_delay != NULL) { *start_delay = result * 1000; // milliseconds } return true; } static int unpack_timeout(const char *value) { int timeout = crm_get_msec(value); if (timeout < 0) { timeout = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout; } int pe_get_configured_timeout(resource_t *rsc, const char *action, pe_working_set_t *data_set) { xmlNode *child = NULL; const char *timeout = NULL; int timeout_ms = 0; for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) { timeout = crm_element_value(child, XML_ATTR_TIMEOUT); break; } } if (timeout == NULL && data_set->op_defaults) { GHashTable *action_meta = crm_str_table_new(); pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, action_meta, NULL, FALSE, data_set); timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); } // @TODO check meta-attributes (including versioned meta-attributes) // @TODO maybe use min-interval monitor timeout as default for monitors timeout_ms = crm_get_msec(timeout); if (timeout_ms < 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout_ms; } #if ENABLE_VERSIONED_ATTRS static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, guint interval_ms, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = __xml_first_child_element(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) { for (attr = __xml_first_child_element(attrs); attr != NULL; attr = __xml_next_element(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) { long long start_delay = 0; if (unpack_interval_origin(value, xml_obj, interval_ms, now, &start_delay)) { crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) { int timeout = unpack_timeout(value); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout); } } } } #endif /*! * \brief Unpack operation XML into an action structure * * Unpack an operation's meta-attributes (normalizing the interval, timeout, * and start delay values as integer milliseconds), requirements, and * failure policy. * * \param[in,out] action Action to unpack into * \param[in] xml_obj Operation XML (or NULL if all defaults) * \param[in] container Resource that contains affected resource, if any * \param[in] data_set Cluster state */ void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set) { guint interval_ms = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; const char *field = NULL; char *default_timeout = NULL; #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *rsc_details = NULL; #endif CRM_CHECK(action && action->rsc, return); // Cluster-wide pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set); // Probe timeouts default differently, so handle timeout default later default_timeout = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); if (default_timeout) { default_timeout = strdup(default_timeout); g_hash_table_remove(action->meta, XML_ATTR_TIMEOUT); } if (xml_obj) { xmlAttrPtr xIter = NULL; // take precedence over defaults pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, TRUE, data_set); #if ENABLE_VERSIONED_ATTRS rsc_details = pe_rsc_action_details(action); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, rsc_details->versioned_parameters, data_set->now, NULL); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, rsc_details->versioned_meta, data_set->now, NULL); #endif /* Anything set as an XML property has highest precedence. * This ensures we use the name and interval from the tag. */ for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } g_hash_table_remove(action->meta, "id"); // Normalize interval to milliseconds field = XML_LRM_ATTR_INTERVAL; value = g_hash_table_lookup(action->meta, field); if (value != NULL) { interval_ms = crm_parse_interval_spec(value); } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) { /* An orphaned recurring monitor will not have any XML. However, we * want the interval to be set, so the action can be properly detected * as a recurring monitor. Parse it from the key in this case. */ parse_op_key(action->uuid, NULL, NULL, &interval_ms); } if (interval_ms > 0) { value_ms = crm_strdup_printf("%u", interval_ms); g_hash_table_replace(action->meta, strdup(field), value_ms); } else if (value) { g_hash_table_remove(action->meta, field); } // Handle timeout default, now that we know the interval if (g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT)) { free(default_timeout); } else { // Probe timeouts default to minimum-interval monitor's if (safe_str_eq(action->task, RSC_STATUS) && (interval_ms == 0)) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); if (value) { crm_trace("\t%s defaults to minimum-interval monitor's timeout '%s'", action->uuid, value); free(default_timeout); default_timeout = strdup(value); } } } if (default_timeout) { g_hash_table_insert(action->meta, strdup(XML_ATTR_TIMEOUT), default_timeout); } } if (safe_str_neq(action->task, RSC_START) && safe_str_neq(action->task, RSC_PROMOTE)) { action->needs = rsc_req_nothing; value = "nothing (not start/promote)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing (resource)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum (resource)"; } else { action->needs = rsc_req_nothing; value = "nothing (resource)"; } pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (safe_str_eq(value, "block")) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); value = "block"; // The above could destroy the original string } else if (safe_str_eq(value, "fence")) { action->on_fail = action_fail_fence; value = "node fencing"; if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense"); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (safe_str_eq(value, "standby")) { action->on_fail = action_fail_standby; value = "node standby"; } else if (safe_str_eq(value, "ignore") || safe_str_eq(value, "nothing")) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (safe_str_eq(value, "migrate")) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (safe_str_eq(value, "stop")) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (safe_str_eq(value, "restart")) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (safe_str_eq(value, "restart-container")) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* For remote nodes, ensure that any failure that results in dropping an * active connection to the node results in fencing of the node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) && (pe__resource_is_remote_conn(action->rsc, data_set) && !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && (interval_ms == 0)) && (safe_str_neq(action->task, CRMD_ACTION_START)))) { if (!is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged remote node (enforcing default)"; } else { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence remote node (default)"; } else { value = "recover remote node connection (default)"; } if (action->rsc->remote_reconnect_ms) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); if (value) { pe_warn_once(pe_wo_role_after, "Support for role_after_failure is deprecated and will be removed in a future release"); } } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task, role2text(action->fail_role)); value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { long long start_delay = 0; value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now, &start_delay)) { g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY), crm_strdup_printf("%lld", start_delay)); } } value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); timeout = unpack_timeout(value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout)); #if ENABLE_VERSIONED_ATTRS unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, data_set->now); #endif } static xmlNode * find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled) { guint interval_ms = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); match_key = generate_op_key(rsc->id, name, interval_ms); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = generate_op_key(rsc->clone_name, name, interval_ms); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = generate_op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = generate_op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { int log_level = LOG_TRACE; char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending, &log_level); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void pe_free_action(action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } #if ENABLE_VERSIONED_ATTRS if (action->rsc) { pe_free_rsc_action_details(action); } #endif free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); if (value == NULL) { /* skip */ } else if (safe_str_eq(value, "0")) { /* skip */ } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; break; default: break; } } return task; } action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (uuid != NULL && safe_str_neq(uuid, action->uuid)) { continue; } else if (task != NULL && safe_str_neq(task, action->task)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(key, action->uuid)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = node_copy(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node) { GList *result = NULL; CRM_CHECK(key != NULL, return NULL); if (on_node == NULL) { crm_trace("Not searching for action %s because node not specified", key); return NULL; } for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (action->node == NULL) { crm_trace("Skipping comparison of %s vs action %s without node", key, action->uuid); } else if (safe_str_neq(key, action->uuid)) { crm_trace("Desired action %s doesn't match %s", key, action->uuid); } else if (safe_str_neq(on_node->details->id, action->node->details->id)) { crm_trace("Action %s desired node ID %s doesn't match %s", key, on_node->details->id, action->node->details->id); } else { crm_trace("Action %s matches", key); result = g_list_prepend(result, action); } } return result; } /*! * \brief Find all actions of given type for a resource * * \param[in] rsc Resource to search * \param[in] node Find only actions scheduled on this node * \param[in] task Action name to search for * \param[in] require_node If TRUE, NULL node or action node will not match * * \return List of actions found (or NULL if none) * \note If node is not NULL and require_node is FALSE, matching actions * without a node will be assigned to node. */ GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node) { GList *result = NULL; char *key = generate_op_key(rsc->id, task, 0); if (require_node) { result = find_actions_exact(rsc->actions, key, node); } else { result = find_actions(rsc->actions, key, node); } free(key); return result; } static void resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag) { node_t *match = NULL; if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never)) && safe_str_eq(tag, "symmetric_default")) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = merge_weights(match->weight, score); } void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node_iter = (node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); if (safe_str_eq(a_xml_id, b_xml_id)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unlikely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ time_t last_a = -1; time_t last_b = -1; crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %lld vs %lld", (long long) last_a, (long long) last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic a"); } if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (e.g. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (value == NULL || safe_str_eq("started", value) || safe_str_eq("default", value)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { crm_config_err("%s is not part of a promotable clone resource, a %s of '%s' makes no sense", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *after = (action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = NULL; /* order |= pe_order_implies_then; */ /* order ^= pe_order_implies_then; */ wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_pseudo); set_bit(op->flags, pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { ticket_t *ticket = NULL; if (ticket_id == NULL || strlen(ticket_id) == 0) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = crm_str_table_new(); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } static void filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) { if (param_set && param_string) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; char *name = crm_strdup_printf(" %s ", prop_name); char *match = strstr(param_string, name); free(name); // Do now, because current entry might get removed below xIter = xIter->next; if (need_present && match == NULL) { crm_trace("%s not found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } else if (need_present == FALSE && match) { crm_trace("%s found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } } } } #if ENABLE_VERSIONED_ATTRS static void append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) { GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { crm_xml_add(params, key, value); } g_hash_table_destroy(hash); } #endif /*! * \internal * \brief Calculate action digests and store in node's digest cache * * \param[in] rsc Resource that action was for * \param[in] task Name of action performed * \param[in] key Action's task key * \param[in] node Node action was performed on * \param[in] xml_op XML of operation in CIB status (if available) * \param[in] calc_secure Whether to calculate secure digest * \param[in] data_set Cluster working set * * \return Pointer to node's digest cache entry */ static op_digest_cache_t * rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, pe_node_t *node, xmlNode *xml_op, bool calc_secure, pe_working_set_t *data_set) { op_digest_cache_t *data = NULL; data = g_hash_table_lookup(node->details->digest_cache, key); if (data == NULL) { GHashTable *local_rsc_params = crm_str_table_new(); action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); #if ENABLE_VERSIONED_ATTRS xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); const char *ra_version = NULL; #endif const char *op_version; const char *restart_list = NULL; const char *secure_list = " passwd password "; data = calloc(1, sizeof(op_digest_cache_t)); CRM_ASSERT(data != NULL); get_rsc_attributes(local_rsc_params, rsc, node, data_set); #if ENABLE_VERSIONED_ATTRS pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); #endif data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside if (pe__add_bundle_remote_name(rsc, data->params_all, XML_RSC_ATTR_REMOTE_RA_ADDR)) { crm_trace("Set address for bundle connection %s (on %s)", rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); g_hash_table_foreach(action->extra, hash2field, data->params_all); g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); g_hash_table_foreach(action->meta, hash2metafield, data->params_all); if(xml_op) { secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); #if ENABLE_VERSIONED_ATTRS ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); #endif } else { op_version = CRM_FEATURE_SET; } #if ENABLE_VERSIONED_ATTRS append_versioned_params(local_versioned_params, ra_version, data->params_all); append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); { pe_rsc_action_details_t *details = pe_rsc_action_details(action); append_versioned_params(details->versioned_parameters, ra_version, data->params_all); } #endif filter_action_parameters(data->params_all, op_version); g_hash_table_destroy(local_rsc_params); pe_free_action(action); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); if (calc_secure) { data->params_secure = copy_xml(data->params_all); if(secure_list) { filter_parameters(data->params_secure, secure_list, FALSE); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { data->params_restart = copy_xml(data->params_all); if (restart_list) { filter_parameters(data->params_restart, restart_list, TRUE); } data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); } g_hash_table_insert(node->details->digest_cache, strdup(key), data); } return data; } op_digest_cache_t * rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; char *key = NULL; guint interval_ms = 0; const char *op_version; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); const char *digest_all; const char *digest_restart; CRM_ASSERT(node != NULL); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); interval_ms = crm_parse_ms(interval_ms_s); key = generate_op_key(rsc->id, task, interval_ms); data = rsc_action_digest(rsc, task, key, node, xml_op, is_set(data_set->flags, pe_flag_sanitized), data_set); data->rc = RSC_DIGEST_MATCH; if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", key, node->details->uname, crm_str(digest_restart), data->digest_restart_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); data->rc = RSC_DIGEST_RESTART; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s", key, node->details->uname, crm_str(digest_all), data->digest_all_calc, (interval_ms > 0)? "reschedule" : "reload", op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); data->rc = RSC_DIGEST_ALL; } free(key); return data; } /*! * \internal * \brief Create an unfencing summary for use in special node attribute * * Create a string combining a fence device's resource ID, agent type, and * parameter digest (whether for all parameters or just non-private parameters). * This can be stored in a special node attribute, allowing us to detect changes * in either the agent type or parameters, to know whether unfencing must be * redone or can be safely skipped when the device's history is cleaned. * * \param[in] rsc_id Fence device resource ID * \param[in] agent_type Fence device agent * \param[in] param_digest Fence device parameter digest * * \return Newly allocated string with unfencing digest * \note The caller is responsible for freeing the result. */ static inline char * create_unfencing_summary(const char *rsc_id, const char *agent_type, const char *param_digest) { return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest); } /*! * \internal * \brief Check whether a node can skip unfencing * * Check whether a fence device's current definition matches a node's * stored summary of when it was last unfenced by the device. * * \param[in] rsc_id Fence device's resource ID * \param[in] agent Fence device's agent type * \param[in] digest_calc Fence device's current parameter digest * \param[in] node_summary Value of node's special unfencing node attribute * (a comma-separated list of unfencing summaries for * all devices that have unfenced this node) * * \return TRUE if digest matches, FALSE otherwise */ static bool unfencing_digest_matches(const char *rsc_id, const char *agent, const char *digest_calc, const char *node_summary) { bool matches = FALSE; if (rsc_id && agent && digest_calc && node_summary) { char *search_secure = create_unfencing_summary(rsc_id, agent, digest_calc); /* The digest was calculated including the device ID and agent, * so there is no risk of collision using strstr(). */ matches = (strstr(node_summary, search_secure) != NULL); crm_trace("Calculated unfencing digest '%s' %sfound in '%s'", search_secure, matches? "" : "not ", node_summary); free(search_secure); } return matches; } /* Magic string to use as action name for digest cache entries used for * unfencing checks. This is not a real action name (i.e. "on"), so * check_action_definition() won't confuse these entries with real actions. */ #define STONITH_DIGEST_TASK "stonith-on" /*! * \internal * \brief Calculate fence device digests and digest comparison result * * \param[in] rsc Fence device resource * \param[in] agent Fence device's agent type * \param[in] node Node with digest cache to use * \param[in] data_set Cluster working set * * \return Node's digest cache entry */ static op_digest_cache_t * fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent, pe_node_t *node, pe_working_set_t *data_set) { const char *node_summary = NULL; // Calculate device's current parameter digests char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0); op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, TRUE, data_set); free(key); // Check whether node has special unfencing summary node attribute node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL); if (node_summary == NULL) { data->rc = RSC_DIGEST_UNKNOWN; return data; } // Check whether full parameter digest matches if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc, node_summary)) { data->rc = RSC_DIGEST_MATCH; return data; } // Check whether secure parameter digest matches node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE); if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc, node_summary)) { data->rc = RSC_DIGEST_MATCH; if (is_set(data_set->flags, pe_flag_stdout)) { printf("Only 'private' parameters to %s for unfencing %s changed\n", rsc->id, node->details->uname); } return data; } // Parameters don't match data->rc = RSC_DIGEST_ALL; if (is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout)) && data->digest_secure_calc) { char *digest = create_unfencing_summary(rsc->id, agent, data->digest_secure_calc); printf("Parameters to %s for unfencing %s changed, try '%s'\n", rsc->id, node->details->uname, digest); free(digest); } return data; } const char *rsc_printable_id(resource_t *rsc) { if (is_not_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void clear_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; clear_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; clear_bit_recursive(child_rsc, flag); } } void set_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; set_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_bit_recursive(child_rsc, flag); } } static GListPtr find_unfencing_devices(GListPtr candidates, GListPtr matches) { for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { resource_t *candidate = gIter->data; const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES); const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); if(candidate->children) { matches = find_unfencing_devices(candidate->children, matches); } else if (is_not_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) { matches = g_list_prepend(matches, candidate); } } return matches; } action_t * pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set) { char *op_key = NULL; action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, op_key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if (pe__is_guest_or_remote_node(node) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the check_action_definition() based stuff works fine. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = calloc(max, sizeof(char)); char *digests_secure = calloc(max, sizeof(char)); GListPtr matches = find_unfencing_devices(data_set->resources, NULL); for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) { resource_t *match = gIter->data; const char *agent = g_hash_table_lookup(match->meta, XML_ATTR_TYPE); op_digest_cache_t *data = NULL; data = fencing_action_digest_cmp(match, agent, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (is_set(data_set->flags, pe_flag_stdout)) { fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, agent, data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, agent, data->digest_secure_calc); } g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_ALL), digests_all); g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_SECURE), digests_secure); } } else { free(op_key); } if(optional == FALSE && pe_can_fence(data_set, node)) { pe_action_required(stonith_op, NULL, reason); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set) { if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { action_t *unfence = pe_fence_op(node, "on", FALSE, reason, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (crm_str_eq(existing_ref, obj_ref, TRUE)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite) { bool unset = FALSE; bool update = FALSE; const char *change = NULL; if(is_set(flags, pe_action_runnable)) { unset = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_optional)) { unset = TRUE; change = "required"; } else if(is_set(flags, pe_action_migrate_runnable)) { unset = TRUE; overwrite = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_dangle)) { change = "dangling"; } else if(is_set(flags, pe_action_requires_any)) { change = "required"; } else { crm_err("Unknown flag change to %x by %s: 0x%s", flags, action->uuid, (reason? reason->uuid : "0")); } if(unset) { if(is_set(action->flags, flags)) { action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } else { if(is_not_set(action->flags, flags)) { action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } if((change && update) || text) { char *reason_text = NULL; if(reason == NULL) { pe_action_set_reason(action, text, overwrite); } else if(reason->rsc == NULL) { reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:""); } else { reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA"); } if(reason_text && action->rsc != reason->rsc) { pe_action_set_reason(action, reason_text, overwrite); } free(reason_text); } } void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { if(action->reason && overwrite) { pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, reason); free(action->reason); action->reason = NULL; } if(action->reason == NULL) { if(reason) { pe_rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, reason); action->reason = strdup(reason); } else { action->reason = NULL; } } } /*! * \internal * \brief Check whether shutdown has been requested for a node * * \param[in] node Node to check * * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise * \note This differs from simply using node->details->shutdown in that it can * be used before that has been determined (and in fact to determine it), * and it can also be used to distinguish requested shutdown from implicit * shutdown of remote nodes by virtue of their connection stopping. */ bool pe__shutdown_requested(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); return shutdown && strcmp(shutdown, "0"); } /*! * \internal * \brief Update a data set's "recheck by" time * * \param[in] recheck Epoch time when recheck should happen * \param[in,out] data_set Current working set */ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) { if ((recheck > get_effective_time(data_set)) && ((data_set->recheck_by == 0) || (data_set->recheck_by > recheck))) { data_set->recheck_by = recheck; } } /*! * \internal * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set */ void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, GHashTable *node_hash, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set) { crm_time_t *next_change = crm_time_new_undefined(); pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash, always_first, overwrite, data_set->now, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, data_set); } crm_time_free(next_change); } bool pe__resource_is_disabled(pe_resource_t *rsc) { const char *target_role = NULL; CRM_CHECK(rsc != NULL, return false); target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); if ((target_role_e == RSC_ROLE_STOPPED) || ((target_role_e == RSC_ROLE_SLAVE) && is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) { return true; } } return false; } diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c index 24a65be90e..d6f4a958cd 100644 --- a/tools/crm_mon_print.c +++ b/tools/crm_mon_print.c @@ -1,1287 +1,1286 @@ /* - * Copyright 2019 the Pacemaker project contributors + * Copyright 2019-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "crm_mon.h" static void print_resources_heading(pcmk__output_t *out, unsigned int mon_ops); static void print_resources_closing(pcmk__output_t *out, unsigned int mon_ops); static gboolean print_resources(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int print_opts, unsigned int mon_ops, gboolean brief_output, gboolean print_summary); static void print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, node_t *node, xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list); static void print_node_history(pcmk__output_t *out, pe_working_set_t *data_set, xmlNode *node_state, gboolean operations, unsigned int mon_ops); static gboolean add_extra_info(pcmk__output_t *out, node_t * node, GListPtr rsc_list, const char *attrname, int *expected_score); static void print_node_attribute(gpointer name, gpointer user_data); static gboolean print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set, gboolean operations, unsigned int mon_ops); static gboolean print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set); static gboolean print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, const char *prefix); static gboolean print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops); static void print_cluster_times(pcmk__output_t *out, pe_working_set_t *data_set); static void print_cluster_dc(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops); static void print_cluster_summary(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, unsigned int show, mon_output_format_t fmt); static gboolean print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set); static gboolean print_failed_stonith_actions(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops); static gboolean print_stonith_pending(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops); static gboolean print_stonith_history(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops); static gboolean print_stonith_history_full(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops); /*! * \internal * \brief Print resources section heading appropriate to options * * \param[in] out The output functions structure. * \param[in] mon_ops Bitmask of mon_op_options. */ static void print_resources_heading(pcmk__output_t *out, unsigned int mon_ops) { const char *heading; if (is_set(mon_ops, mon_op_group_by_node)) { /* Active resources have already been printed by node */ heading = is_set(mon_ops, mon_op_inactive_resources) ? "Inactive Resources" : NULL; } else if (is_set(mon_ops, mon_op_inactive_resources)) { heading = "Full List of Resources"; } else { heading = "Active Resources"; } /* Print section heading */ out->begin_list(out, NULL, NULL, "%s", heading); } /*! * \internal * \brief Print whatever resource section closing is appropriate * * \param[in] out The output functions structure. * \param[in] mon_ops Bitmask of mon_op_options. */ static void print_resources_closing(pcmk__output_t *out, unsigned int mon_ops) { const char *heading; /* What type of resources we did or did not display */ if (is_set(mon_ops, mon_op_group_by_node)) { heading = "inactive "; } else if (is_set(mon_ops, mon_op_inactive_resources)) { heading = ""; } else { heading = "active "; } out->list_item(out, NULL, "No %sresources", heading); } /*! * \internal * \brief Print whatever resource section(s) are appropriate * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] print_opts Bitmask of pe_print_options. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] brief_output Whether to display full or brief output. * \param[in] print_summary Whether to display a failure summary. */ static gboolean print_resources(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int print_opts, unsigned int mon_ops, gboolean brief_output, gboolean print_summary) { GListPtr rsc_iter; gboolean printed_resource = FALSE; /* If we already showed active resources by node, and * we're not showing inactive resources, we have nothing to do */ if (is_set(mon_ops, mon_op_group_by_node) && is_not_set(mon_ops, mon_op_inactive_resources)) { return FALSE; } print_resources_heading(out, mon_ops); /* If we haven't already printed resources grouped by node, * and brief output was requested, print resource summary */ if (brief_output && is_not_set(mon_ops, mon_op_group_by_node)) { pe__rscs_brief_output(out, data_set->resources, print_opts, is_set(mon_ops, mon_op_inactive_resources)); } /* For each resource, display it if appropriate */ for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) { pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data; /* Complex resources may have some sub-resources active and some inactive */ gboolean is_active = rsc->fns->active(rsc, TRUE); gboolean partially_active = rsc->fns->active(rsc, FALSE); /* Skip inactive orphans (deleted but still in CIB) */ if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) { continue; /* Skip active resources if we already displayed them by node */ } else if (is_set(mon_ops, mon_op_group_by_node)) { if (is_active) { continue; } /* Skip primitives already counted in a brief summary */ } else if (brief_output && (rsc->variant == pe_native)) { continue; /* Skip resources that aren't at least partially active, * unless we're displaying inactive resources */ } else if (!partially_active && is_not_set(mon_ops, mon_op_inactive_resources)) { continue; } /* Print this resource */ if (printed_resource == FALSE) { printed_resource = TRUE; } out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc); } if (print_summary && !printed_resource) { print_resources_closing(out, mon_ops); } out->end_list(out); return TRUE; } static int failure_count(pe_working_set_t *data_set, node_t *node, resource_t *rsc, time_t *last_failure) { return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default, NULL, data_set) : 0; } static GListPtr get_operation_list(xmlNode *rsc_entry) { GListPtr op_list = NULL; xmlNode *rsc_op = NULL; for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *) rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_append(op_list, rsc_op); } } op_list = g_list_sort(op_list, sort_op_by_callid); return op_list; } /*! * \internal * \brief Print resource operation/failure history * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] node Node that ran this resource. * \param[in] rsc_entry Root of XML tree describing resource status. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] op_list A list of operations to print. */ static void print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, node_t *node, xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list) { GListPtr gIter = NULL; gboolean printed = FALSE; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); /* Print each operation */ for (gIter = op_list; gIter != NULL; gIter = gIter->next) { xmlNode *xml_op = (xmlNode *) gIter->data; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); int rc = crm_parse_int(op_rc, "0"); /* Display 0-interval monitors as "probe" */ if (safe_str_eq(task, CRMD_ACTION_STATUS) && ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) { task = "probe"; } /* Ignore notifies and some probes */ if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (rc == 7))) { continue; } /* If this is the first printed operation, print heading for resource */ if (printed == FALSE) { time_t last_failure = 0; int failcount = failure_count(data_set, node, rsc, &last_failure); out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure); printed = TRUE; } /* Print the operation */ out->message(out, "op-history", xml_op, task, interval_ms_s, rc, is_set(mon_ops, mon_op_print_timing)); } /* Free the list we created (no need to free the individual items) */ g_list_free(op_list); /* If we printed anything, close the resource */ if (printed) { out->end_list(out); } } /*! * \internal * \brief Print node operation/failure history * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] node_state Root of XML tree describing node status. * \param[in] operations Whether to print operations or just failcounts. * \param[in] mon_ops Bitmask of mon_op_options. */ static void print_node_history(pcmk__output_t *out, pe_working_set_t *data_set, xmlNode *node_state, gboolean operations, unsigned int mon_ops) { node_t *node = pe_find_node_id(data_set->nodes, ID(node_state)); xmlNode *lrm_rsc = NULL; xmlNode *rsc_entry = NULL; gboolean printed_header = FALSE; if (node && node->details && node->details->online) { lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); /* Print history of each of the node's resources */ for (rsc_entry = __xml_first_child_element(lrm_rsc); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if (operations == FALSE) { const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); time_t last_failure = 0; int failcount = failure_count(data_set, node, rsc, &last_failure); if (failcount > 0) { if (printed_header == FALSE) { printed_header = TRUE; out->message(out, "node", node, get_resource_display_options(mon_ops), FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail), is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node)); } out->message(out, "resource-history", rsc, rsc_id, FALSE, failcount, last_failure); out->end_list(out); } } else { GListPtr op_list = get_operation_list(rsc_entry); if (printed_header == FALSE) { printed_header = TRUE; out->message(out, "node", node, get_resource_display_options(mon_ops), FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail), is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node)); } - if (g_list_length(op_list) > 0) { + if (op_list != NULL) { print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list); } } } } if (printed_header) { out->end_list(out); } } } /*! * \internal * \brief Determine whether extended information about an attribute should be added. * * \param[in] out The output functions structure. * \param[in] node Node that ran this resource. * \param[in] rsc_list The list of resources for this node. * \param[in] attrname The attribute to find. * \param[out] expected_score The expected value for this attribute. * * \return TRUE if extended information should be printed, FALSE otherwise * \note Currently, extended information is only supported for ping/pingd * resources, for which a message will be printed if connectivity is lost * or degraded. */ static gboolean add_extra_info(pcmk__output_t *out, node_t *node, GListPtr rsc_list, const char *attrname, int *expected_score) { GListPtr gIter = NULL; for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; const char *type = g_hash_table_lookup(rsc->meta, "type"); const char *name = NULL; if (rsc->children != NULL) { if (add_extra_info(out, node, rsc->children, attrname, expected_score)) { return TRUE; } } if (safe_str_neq(type, "ping") && safe_str_neq(type, "pingd")) { return FALSE; } name = g_hash_table_lookup(rsc->parameters, "name"); if (name == NULL) { name = "pingd"; } /* To identify the resource with the attribute name. */ if (safe_str_eq(name, attrname)) { int host_list_num = 0; /* int value = crm_parse_int(attrvalue, "0"); */ const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list"); const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier"); if (hosts) { char **host_list = g_strsplit(hosts, " ", 0); host_list_num = g_strv_length(host_list); g_strfreev(host_list); } /* pingd multiplier is the same as the default value. */ *expected_score = host_list_num * crm_parse_int(multiplier, "1"); return TRUE; } } return FALSE; } /* structure for passing multiple user data to g_list_foreach() */ struct mon_attr_data { pcmk__output_t *out; node_t *node; }; static void print_node_attribute(gpointer name, gpointer user_data) { const char *value = NULL; int expected_score = 0; gboolean add_extra = FALSE; struct mon_attr_data *data = (struct mon_attr_data *) user_data; value = pe_node_attribute_raw(data->node, name); add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc, name, &expected_score); /* Print attribute name and value */ data->out->message(data->out, "node-attribute", name, value, add_extra, expected_score); } /*! * \internal * \brief Print history for all nodes. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] operations Whether to print operations or just failcounts. * \param[in] mon_ops Bitmask of mon_op_options. */ static gboolean print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set, gboolean operations, unsigned int mon_ops) { xmlNode *node_state = NULL; xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); if (xmlChildElementCount(cib_status) == 0) { return FALSE; } /* Print heading */ if (operations) { out->begin_list(out, NULL, NULL, "Operations"); } else { out->begin_list(out, NULL, NULL, "Migration Summary"); } /* Print each node in the CIB status */ for (node_state = __xml_first_child_element(cib_status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { print_node_history(out, data_set, node_state, operations, mon_ops); } } /* Close section */ out->end_list(out); return TRUE; } /*! * \internal * \brief Print all tickets. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. */ static gboolean print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set) { GHashTableIter iter; gpointer key, value; if (g_hash_table_size(data_set->tickets) == 0) { return FALSE; } /* Print section heading */ out->begin_list(out, NULL, NULL, "Tickets"); /* Print each ticket */ g_hash_table_iter_init(&iter, data_set->tickets); while (g_hash_table_iter_next(&iter, &key, &value)) { ticket_t *ticket = (ticket_t *) value; out->message(out, "ticket", ticket); } /* Close section */ out->end_list(out); return TRUE; } /*! * \internal * \brief Print section for negative location constraints * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] prefix ID prefix to filter results by. */ static gboolean print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, const char *prefix) { GListPtr gIter, gIter2; gboolean printed_header = FALSE; /* Print each ban */ for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { pe__location_t *location = gIter->data; if (!g_str_has_prefix(location->id, prefix)) continue; for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) { pe_node_t *node = (pe_node_t *) gIter2->data; if (node->weight < 0) { if (printed_header == FALSE) { printed_header = TRUE; out->begin_list(out, NULL, NULL, "Negative Location Constraints"); } out->message(out, "ban", node, location, is_set(mon_ops, mon_op_print_clone_detail)); } } } if (printed_header) { out->end_list(out); return TRUE; } else { return FALSE; } } /*! * \internal * \brief Print node attributes section * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] mon_ops Bitmask of mon_op_options. */ static gboolean print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops) { GListPtr gIter = NULL; gboolean printed_header = FALSE; /* Unpack all resource parameters (it would be more efficient to do this * only when needed for the first time in add_extra_info()) */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { crm_mon_get_parameters(gIter->data, data_set); } /* Display each node's attributes */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { struct mon_attr_data data; data.out = out; data.node = (node_t *) gIter->data; if (data.node && data.node->details && data.node->details->online) { GList *attr_list = NULL; GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, data.node->details->attrs); while (g_hash_table_iter_next (&iter, &key, &value)) { attr_list = append_attr_list(attr_list, key); } - if (g_list_length(attr_list) == 0) { - g_list_free(attr_list); + if (attr_list == NULL) { continue; } if (printed_header == FALSE) { printed_header = TRUE; out->begin_list(out, NULL, NULL, "Node Attributes"); } out->message(out, "node", data.node, get_resource_display_options(mon_ops), FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail), is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node)); g_list_foreach(attr_list, print_node_attribute, &data); g_list_free(attr_list); out->end_list(out); } } /* Print section footer */ if (printed_header) { out->end_list(out); return TRUE; } else { return FALSE; } } /*! * \internal * \brief Print times the display was last updated and CIB last changed * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. */ static void print_cluster_times(pcmk__output_t *out, pe_working_set_t *data_set) { const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN); const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER); const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT); const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG); out->message(out, "cluster-times", last_written, user, client, origin); } /*! * \internal * \brief Print current DC and its version * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] mon_ops Bitmask of mon_op_options. */ static void print_cluster_dc(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops) { node_t *dc = data_set->dc_node; xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']", data_set->input, LOG_DEBUG); const char *dc_version_s = dc_version? crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE) : NULL; const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM); char *dc_name = dc? pe__node_display_name(dc, is_set(mon_ops, mon_op_print_clone_detail)) : NULL; out->message(out, "cluster-dc", dc, quorum, dc_version_s, dc_name); free(dc_name); } /*! * \internal * \brief Print a summary of cluster-wide information * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] show Bitmask of mon_show_options. */ static void print_cluster_summary(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, unsigned int show, mon_output_format_t fmt) { const char *stack_s = get_cluster_stack(data_set); gboolean header_printed = FALSE; if (show & mon_show_stack) { if (header_printed == FALSE) { out->begin_list(out, NULL, NULL, "Cluster Summary"); header_printed = TRUE; } out->message(out, "cluster-stack", stack_s); } /* Always print DC if none, even if not requested */ if ((data_set->dc_node == NULL) || (show & mon_show_dc)) { if (header_printed == FALSE) { out->begin_list(out, NULL, NULL, "Cluster Summary"); header_printed = TRUE; } print_cluster_dc(out, data_set, mon_ops); } if (show & mon_show_times) { if (header_printed == FALSE) { out->begin_list(out, NULL, NULL, "Cluster Summary"); header_printed = TRUE; } print_cluster_times(out, data_set); } if (is_set(data_set->flags, pe_flag_maintenance_mode) || data_set->disabled_resources || data_set->blocked_resources || is_set(show, mon_show_count)) { if (header_printed == FALSE) { out->begin_list(out, NULL, NULL, "Cluster Summary"); header_printed = TRUE; } out->message(out, "cluster-counts", g_list_length(data_set->nodes), data_set->ninstances, data_set->disabled_resources, data_set->blocked_resources); } /* There is not a separate option for showing cluster options, so show with * stack for now; a separate option could be added if there is demand */ if (show & mon_show_stack) { if (fmt == mon_output_html || fmt == mon_output_cgi) { /* Kind of a hack - close the list we may have opened earlier in this * function so we can put all the options into their own list. We * only want to do this on HTML output, though. */ if (header_printed == TRUE) { out->end_list(out); } out->begin_list(out, NULL, NULL, "Config Options"); } out->message(out, "cluster-options", data_set); } if (header_printed) { out->end_list(out); } } /*! * \internal * \brief Print a section for failed actions * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. */ static gboolean print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set) { xmlNode *xml_op = NULL; if (xmlChildElementCount(data_set->failed) == 0) { return FALSE; } /* Print section heading */ out->begin_list(out, NULL, NULL, "Failed Resource Actions"); /* Print each failed action */ for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL; xml_op = __xml_next(xml_op)) { out->message(out, "failed-action", xml_op); } /* End section */ out->end_list(out); return TRUE; } /*! * \internal * \brief Print a section for failed stonith actions * * \note This function should not be called for XML output. * * \param[in] out The output functions structure. * \param[in] history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. */ static gboolean print_failed_stonith_actions(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops) { stonith_history_t *hp; for (hp = history; hp; hp = hp->next) { if (hp->state == st_failed) { break; } } if (!hp) { return FALSE; } /* Print section heading */ out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); /* Print each failed stonith action */ for (hp = history; hp; hp = hp->next) { if (hp->state == st_failed) { out->message(out, "stonith-event", hp, is_set(mon_ops, mon_op_fence_full_history), stonith__later_succeeded(hp, history)); out->increment_list(out); } } /* End section */ out->end_list(out); return TRUE; } /*! * \internal * \brief Print pending stonith actions * * \note This function should not be called for XML output. * * \param[in] out The output functions structure. * \param[in] history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. */ static gboolean print_stonith_pending(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops) { /* xml-output always shows the full history * so we'll never have to show pending-actions * separately */ if (history && (history->state != st_failed) && (history->state != st_done)) { stonith_history_t *hp; /* Print section heading */ out->begin_list(out, NULL, NULL, "Pending Fencing Actions"); for (hp = history; hp; hp = hp->next) { if ((hp->state == st_failed) || (hp->state == st_done)) { break; } out->message(out, "stonith-event", hp, is_set(mon_ops, mon_op_fence_full_history), stonith__later_succeeded(hp, history)); out->increment_list(out); } /* End section */ out->end_list(out); return TRUE; } return FALSE; } /*! * \internal * \brief Print fencing history, skipping all failed actions. * * \note This function should not be called for XML output. * * \param[in] out The output functions structure. * \param[in] history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. */ static gboolean print_stonith_history(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops) { stonith_history_t *hp; if (history == NULL) { return FALSE; } /* Print section heading */ out->begin_list(out, NULL, NULL, "Fencing History"); for (hp = history; hp; hp = hp->next) { if (hp->state != st_failed) { out->message(out, "stonith-event", hp, is_set(mon_ops, mon_op_fence_full_history), stonith__later_succeeded(hp, history)); out->increment_list(out); } } /* End section */ out->end_list(out); return TRUE; } /*! * \internal * \brief Print fencing history, including failed actions. * * \note This function should be called for XML output. It may also be * interesting for other output formats. * * \param[in] out The output functions structure. * \param[in] history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. */ static gboolean print_stonith_history_full(pcmk__output_t *out, stonith_history_t *history, unsigned int mon_ops) { stonith_history_t *hp; if (history == NULL) { return FALSE; } /* Print section heading */ out->begin_list(out, NULL, NULL, "Fencing History"); for (hp = history; hp; hp = hp->next) { out->message(out, "stonith-event", hp, is_set(mon_ops, mon_op_fence_full_history), stonith__later_succeeded(hp, history)); out->increment_list(out); } /* End section */ out->end_list(out); return TRUE; } /*! * \internal * \brief Top-level printing function for text/curses output. * * \param[in] out The output functions structure. * \param[in] output_format Is this text or curses output? * \param[in] data_set Cluster state to display. * \param[in] stonith_history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] show Bitmask of mon_show_options. * \param[in] prefix ID prefix to filter results by. */ void print_status(pcmk__output_t *out, mon_output_format_t output_format, pe_working_set_t *data_set, stonith_history_t *stonith_history, unsigned int mon_ops, unsigned int show, const char *prefix) { GListPtr gIter = NULL; unsigned int print_opts = get_resource_display_options(mon_ops); gboolean printed = FALSE; /* space-separated lists of node names */ char *online_nodes = NULL; char *online_remote_nodes = NULL; char *online_guest_nodes = NULL; char *offline_nodes = NULL; char *offline_remote_nodes = NULL; print_cluster_summary(out, data_set, mon_ops, show, output_format); if (is_set(show, mon_show_headers)) { out->info(out, "%s", ""); } /* Gather node information (and print if in bad state or grouping by node) */ out->begin_list(out, NULL, NULL, "Node List"); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; const char *node_mode = NULL; char *node_name = pe__node_display_name(node, is_set(mon_ops, mon_op_print_clone_detail)); /* Get node mode */ if (node->details->unclean) { if (node->details->online) { node_mode = "UNCLEAN (online)"; } else if (node->details->pending) { node_mode = "UNCLEAN (pending)"; } else { node_mode = "UNCLEAN (offline)"; } } else if (node->details->pending) { node_mode = "pending"; } else if (node->details->standby_onfail && node->details->online) { node_mode = "standby (on-fail)"; } else if (node->details->standby) { if (node->details->online) { if (node->details->running_rsc) { node_mode = "standby (with active resources)"; } else { node_mode = "standby"; } } else { node_mode = "OFFLINE (standby)"; } } else if (node->details->maintenance) { if (node->details->online) { node_mode = "maintenance"; } else { node_mode = "OFFLINE (maintenance)"; } } else if (node->details->online) { node_mode = "online"; if (is_not_set(mon_ops, mon_op_group_by_node)) { if (pe__is_guest_node(node)) { online_guest_nodes = add_list_element(online_guest_nodes, node_name); } else if (pe__is_remote_node(node)) { online_remote_nodes = add_list_element(online_remote_nodes, node_name); } else { online_nodes = add_list_element(online_nodes, node_name); } free(node_name); continue; } } else { node_mode = "OFFLINE"; if (is_not_set(mon_ops, mon_op_group_by_node)) { if (pe__is_remote_node(node)) { offline_remote_nodes = add_list_element(offline_remote_nodes, node_name); } else if (pe__is_guest_node(node)) { /* ignore offline guest nodes */ } else { offline_nodes = add_list_element(offline_nodes, node_name); } free(node_name); continue; } } /* If we get here, node is in bad state, or we're grouping by node */ out->message(out, "node", node, get_resource_display_options(mon_ops), TRUE, node_mode, is_set(mon_ops, mon_op_print_clone_detail), is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node)); free(node_name); } /* If we're not grouping by node, summarize nodes by status */ if (online_nodes) { out->list_item(out, "Online", "[%s ]", online_nodes); free(online_nodes); } if (offline_nodes) { out->list_item(out, "OFFLINE", "[%s ]", offline_nodes); free(offline_nodes); } if (online_remote_nodes) { out->list_item(out, "RemoteOnline", "[%s ]", online_remote_nodes); free(online_remote_nodes); } if (offline_remote_nodes) { out->list_item(out, "RemoteOFFLINE", "[%s ]", offline_remote_nodes); free(offline_remote_nodes); } if (online_guest_nodes) { out->list_item(out, "GuestOnline", "[%s ]", online_guest_nodes); free(online_guest_nodes); } out->end_list(out); out->info(out, "%s", ""); /* Print resources section, if needed */ printed = print_resources(out, data_set, print_opts, mon_ops, is_set(mon_ops, mon_op_print_brief), TRUE); /* print Node Attributes section if requested */ if (show & mon_show_attributes) { if (printed) { out->info(out, "%s", ""); } printed = print_node_attributes(out, data_set, mon_ops); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (show & (mon_show_operations | mon_show_failcounts)) { if (printed) { out->info(out, "%s", ""); } printed = print_node_summary(out, data_set, ((show & mon_show_operations)? TRUE : FALSE), mon_ops); } /* If there were any failed actions, print them */ if (xml_has_children(data_set->failed)) { if (printed) { out->info(out, "%s", ""); } printed = print_failed_actions(out, data_set); } /* Print failed stonith actions */ if (is_set(mon_ops, mon_op_fence_history)) { if (printed) { out->info(out, "%s", ""); } printed = print_failed_stonith_actions(out, stonith_history, mon_ops); } /* Print tickets if requested */ if (show & mon_show_tickets) { if (printed) { out->info(out, "%s", ""); } printed = print_cluster_tickets(out, data_set); } /* Print negative location constraints if requested */ if (show & mon_show_bans) { if (printed) { out->info(out, "%s", ""); } printed = print_neg_locations(out, data_set, mon_ops, prefix); } /* Print stonith history */ if (is_set(mon_ops, mon_op_fence_history)) { if (printed) { out->info(out, "%s", ""); } if (show & mon_show_fence_history) { print_stonith_history(out, stonith_history, mon_ops); } else { print_stonith_pending(out, stonith_history, mon_ops); } } } /*! * \internal * \brief Top-level printing function for XML output. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] stonith_history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] show Bitmask of mon_show_options. * \param[in] prefix ID prefix to filter results by. */ void print_xml_status(pcmk__output_t *out, mon_output_format_t output_format, pe_working_set_t *data_set, stonith_history_t *stonith_history, unsigned int mon_ops, unsigned int show, const char *prefix) { GListPtr gIter = NULL; unsigned int print_opts = get_resource_display_options(mon_ops); print_cluster_summary(out, data_set, mon_ops, show, output_format); /*** NODES ***/ out->begin_list(out, NULL, NULL, "nodes"); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; out->message(out, "node", node, get_resource_display_options(mon_ops), TRUE, NULL, is_set(mon_ops, mon_op_print_clone_detail), is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node)); } out->end_list(out); /* Print resources section, if needed */ print_resources(out, data_set, print_opts, mon_ops, FALSE, FALSE); /* print Node Attributes section if requested */ if (show & mon_show_attributes) { print_node_attributes(out, data_set, mon_ops); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (show & (mon_show_operations | mon_show_failcounts)) { print_node_summary(out, data_set, ((show & mon_show_operations)? TRUE : FALSE), mon_ops); } /* If there were any failed actions, print them */ if (xml_has_children(data_set->failed)) { print_failed_actions(out, data_set); } /* Print stonith history */ if (is_set(mon_ops, mon_op_fence_history)) { print_stonith_history_full(out, stonith_history, mon_ops); } /* Print tickets if requested */ if (show & mon_show_tickets) { print_cluster_tickets(out, data_set); } /* Print negative location constraints if requested */ if (show & mon_show_bans) { print_neg_locations(out, data_set, mon_ops, prefix); } } /*! * \internal * \brief Top-level printing function for HTML output. * * \param[in] out The output functions structure. * \param[in] output_format Is this HTML or CGI output? * \param[in] data_set Cluster state to display. * \param[in] stonith_history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_options. * \param[in] show Bitmask of mon_show_options. * \param[in] prefix ID prefix to filter results by. */ int print_html_status(pcmk__output_t *out, mon_output_format_t output_format, pe_working_set_t *data_set, stonith_history_t *stonith_history, unsigned int mon_ops, unsigned int show, const char *prefix) { GListPtr gIter = NULL; unsigned int print_opts = get_resource_display_options(mon_ops); print_cluster_summary(out, data_set, mon_ops, show, output_format); /*** NODE LIST ***/ out->begin_list(out, NULL, NULL, "Node List"); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; out->message(out, "node", node, get_resource_display_options(mon_ops), TRUE, NULL, is_set(mon_ops, mon_op_print_clone_detail), is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node)); } out->end_list(out); /* Print resources section, if needed */ print_resources(out, data_set, print_opts, mon_ops, is_set(mon_ops, mon_op_print_brief), TRUE); /* print Node Attributes section if requested */ if (show & mon_show_attributes) { print_node_attributes(out, data_set, mon_ops); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (show & (mon_show_operations | mon_show_failcounts)) { print_node_summary(out, data_set, ((show & mon_show_operations)? TRUE : FALSE), mon_ops); } /* If there were any failed actions, print them */ if (xml_has_children(data_set->failed)) { print_failed_actions(out, data_set); } /* Print failed stonith actions */ if (is_set(mon_ops, mon_op_fence_history)) { print_failed_stonith_actions(out, stonith_history, mon_ops); } /* Print stonith history */ if (is_set(mon_ops, mon_op_fence_history)) { if (show & mon_show_fence_history) { print_stonith_history(out, stonith_history, mon_ops); } else { print_stonith_pending(out, stonith_history, mon_ops); } } /* Print tickets if requested */ if (show & mon_show_tickets) { print_cluster_tickets(out, data_set); } /* Print negative location constraints if requested */ if (show & mon_show_bans) { print_neg_locations(out, data_set, mon_ops, prefix); } return 0; } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 61eb82a56e..9ae24b6fb0 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2104 +1,2104 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include int resource_verbose = 0; bool do_force = FALSE; int crmd_replies_needed = 1; /* The welcome message */ const char *attr_set_type = XML_TAG_ATTR_SETS; static int do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set) { int found = 0; GListPtr lpc = NULL; for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) { node_t *node = (node_t *) lpc->data; if (BE_QUIET) { fprintf(stdout, "%s\n", node->details->uname); } else { const char *state = ""; if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) { state = "Master"; } fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state); } found++; } if (BE_QUIET == FALSE && found == 0) { fprintf(stderr, "resource %s is NOT running\n", rsc); } return found; } int cli_resource_search(resource_t *rsc, const char *requested_name, pe_working_set_t *data_set) { int found = 0; resource_t *parent = uber_parent(rsc); if (pe_rsc_is_clone(rsc)) { for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { found += do_find_resource(requested_name, iter->data, data_set); } /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) && is_not_set(rsc->flags, pe_rsc_unique) && rsc->clone_name && safe_str_eq(requested_name, rsc->clone_name) && safe_str_neq(requested_name, rsc->id)) { for (GListPtr iter = parent->children; iter; iter = iter->next) { found += do_find_resource(requested_name, iter->data, data_set); } } else { found += do_find_resource(requested_name, rsc, data_set); } return found; } #define XPATH_MAX 1024 static int find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; if(value) { *value = NULL; } if(the_cib == NULL) { return -ENOTCONN; } xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources")); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc); if (set_type) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type); if (set_name) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name); } } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair["); if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id); } if (attr_name) { if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and "); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]"); CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { xmlNode *child = NULL; rc = -EINVAL; printf("Multiple attributes match name=%s\n", attr_name); for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) { printf(" Value: %s \t(id=%s)\n", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } } else if(value) { const char *tmp = crm_element_value(xml_search, attr); if (tmp) { *value = strdup(tmp); } } bail: free(xpath_string); free_xml(xml_search); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(GList/* */ ** result, resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, int depth) { int rc = pcmk_ok; char *lookup_id = clone_strip(rsc->id); char *local_attr_id = NULL; /* visit the children */ for(GList *gIter = rsc->children; gIter; gIter = gIter->next) { find_matching_attr_resources_recursive(result, (resource_t*)gIter->data, rsc_id, attr_set, attr_id, attr_name, cib, cmd, depth+1); /* do it only once for clones */ if(pe_clone == rsc->variant) { break; } } rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(local_attr_id); free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd) { int rc = pcmk_ok; char *lookup_id = NULL; char *local_attr_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(do_force == TRUE) { return g_list_append(result, rsc); } if(rsc->parent && pe_clone == rsc->parent->variant) { int rc = pcmk_ok; char *local_attr_id = NULL; rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); free(local_attr_id); if(rc != pcmk_ok) { rsc = rsc->parent; if (BE_QUIET == FALSE) { printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id); } } return g_list_append(result, rsc); } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) { resource_t *child = rsc->children->data; if(child->variant == pe_native) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == pcmk_ok) { rsc = child; if (BE_QUIET == FALSE) { printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id); } } free(local_attr_id); free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(&result, rsc, rsc_id, attr_set, attr_id, attr_name, cib, cmd, 0); return result; } int cli_resource_update_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, const char *attr_value, bool recursive, cib_t *cib, pe_working_set_t *data_set) { int rc = pcmk_ok; static bool need_init = TRUE; char *local_attr_id = NULL; char *local_attr_set = NULL; GList/**/ *resources = NULL; const char *common_attr_id = attr_id; if(attr_id == NULL && do_force == FALSE && find_resource_attr( cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) { printf("\n"); } if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { if (do_force == FALSE) { rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", uber_parent(rsc)->id, attr_name, local_attr_id); printf(" Delete '%s' first or use the force option to override\n", local_attr_id); } free(local_attr_id); if (rc == pcmk_ok) { return -ENOTUNIQ; } } resources = g_list_append(resources, rsc); } else { resources = find_matching_attr_resources(rsc, requested_name, attr_set, attr_id, attr_name, cib, "update"); } /* If either attr_set or attr_id is specified, * one clearly intends to modify a single resource. * It is the last item on the resource list.*/ for(GList *gIter = (attr_set||attr_id) ? g_list_last(resources) : resources ; gIter; gIter = gIter->next) { char *lookup_id = NULL; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; local_attr_id = NULL; local_attr_set = NULL; rsc = (resource_t*)gIter->data; attr_id = common_attr_id; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_ok) { crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); attr_id = local_attr_id; } else if (rc != -ENXIO) { free(lookup_id); free(local_attr_id); g_list_free(resources); return rc; } else { const char *tag = crm_element_name(rsc->xml); if (attr_set == NULL) { local_attr_set = crm_concat(lookup_id, attr_set_type, '-'); attr_set = local_attr_set; } if (attr_id == NULL) { local_attr_id = crm_concat(attr_set, attr_name, '-'); attr_id = local_attr_id; } xml_top = create_xml_node(NULL, tag); crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); } xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("Set '%s' option: id=%s%s%s%s%s value=%s\n", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); } free_xml(xml_top); free(lookup_id); free(local_attr_id); free(local_attr_set); if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { GListPtr lpc = NULL; if(need_init) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); need_init = FALSE; unpack_constraints(cib_constraints, data_set); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } } crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs); set_bit(rsc->flags, pe_rsc_allocating); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; resource_t *peer = cons->rsc_lh; crm_debug("Checking %s %d", cons->id, cons->score); if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) { /* Don't get into colocation loops */ crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id); cli_resource_update_attribute(peer, peer->id, NULL, NULL, attr_name, attr_value, recursive, cib, data_set); } } } } g_list_free(resources); return rc; } int cli_resource_delete_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, cib_t *cib, pe_working_set_t *data_set) { int rc = pcmk_ok; GList/**/ *resources = NULL; if(attr_id == NULL && do_force == FALSE && find_resource_attr( cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) { printf("\n"); } if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { resources = find_matching_attr_resources(rsc, requested_name, attr_set, attr_id, attr_name, cib, "delete"); } else { resources = g_list_append(resources, rsc); } for(GList *gIter = resources; gIter; gIter = gIter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; char *local_attr_id = NULL; rsc = (resource_t*)gIter->data; lookup_id = clone_strip(rsc->id); rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == -ENXIO) { free(lookup_id); rc = pcmk_ok; continue; } else if (rc != pcmk_ok) { free(lookup_id); g_list_free(resources); return rc; } if (attr_id == NULL) { attr_id = local_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); CRM_ASSERT(cib); rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : ""); } free(lookup_id); free_xml(xml_obj); free(local_attr_id); } g_list_free(resources); return rc; } static int send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, const char *host_uname, const char *rsc_id, bool only_failed, pe_working_set_t * data_set) { char *our_pid = NULL; char *key = NULL; int rc = -ECOMM; xmlNode *cmd = NULL; xmlNode *xml_rsc = NULL; const char *value = NULL; const char *router_node = host_uname; xmlNode *params = NULL; xmlNode *msg_data = NULL; resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { CMD_ERR("Resource %s not found", rsc_id); return -ENXIO; } else if (rsc->variant != pe_native) { CMD_ERR("We can only process primitive resources, not %s", rsc_id); return -EINVAL; } else if (host_uname == NULL) { CMD_ERR("Please specify a node name"); return -EINVAL; } else { node_t *node = pe_find_node(data_set->nodes, host_uname); if (pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", host_uname); return -ENXIO; } router_node = node->details->uname; } } key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); free(key); crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname); if (safe_str_neq(router_node, host_uname)) { crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); } xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); if (rsc->clone_name) { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id); } else { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id); } value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE); if (value == NULL) { CMD_ERR("%s has no type! Aborting...", rsc_id); return -ENXIO; } value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS); if (value == NULL) { CMD_ERR("%s has no class! Aborting...", rsc_id); return -ENXIO; } crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER); params = create_xml_node(msg_data, XML_TAG_ATTRS); crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); crm_xml_add(params, key, "60000"); /* 1 minute */ free(key); our_pid = crm_getpid_s(); cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid); /* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */ free_xml(msg_data); if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) { rc = 0; } else { crm_debug("Could not send %s op to the controller", op); rc = -ENOTCONN; } free_xml(cmd); return rc; } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); } static int clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { int rc = pcmk_ok; /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id, TRUE, data_set); if (rc != pcmk_ok) { return rc; } crmd_replies_needed++; crm_trace("Processing %d mainloop inputs", crmd_replies_needed); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", crmd_replies_needed); } if (crmd_replies_needed < 0) { crmd_replies_needed = 0; } return rc; } static int clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name, const char *rsc_id, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_ok; const char *failed_value = NULL; const char *failed_id = NULL; const char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { interval_ms_s = crm_strdup_printf("%u", crm_parse_interval_spec(interval_spec)); } for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; xml_op = __xml_next(xml_op)) { failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources, failed_id, pe_find_renamed|pe_find_anon); if (!fail_rsc || safe_str_neq(rsc_id, fail_rsc->id)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, XML_ATTR_UNAME); if (safe_str_neq(node_name, failed_value)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK); if (safe_str_neq(operation, failed_value)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); if (safe_str_neq(interval_ms_s, failed_value)) { continue; } } /* not available until glib 2.32 g_hash_table_add(rscs, (gpointer) failed_id); */ g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id); } g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { crm_debug("Erasing failures of %s on %s", failed_id, node_name); rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set); if (rc != pcmk_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } static int clear_rsc_fail_attrs(resource_t *rsc, const char *operation, const char *interval_spec, node_t *node) { int rc = pcmk_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pe__is_guest_or_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__node_attr_request_clear(NULL, node->details->uname, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set) { int rc = pcmk_ok; node_t *node = NULL; if (rsc == NULL) { return -ENXIO; } else if (rsc->children) { GListPtr lpc = NULL; for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { resource_t *child = (resource_t *) lpc->data; rc = cli_resource_delete(crmd_channel, host_uname, child, operation, interval_spec, just_failures, data_set); if (rc != pcmk_ok) { return rc; } } return pcmk_ok; } else if (host_uname == NULL) { GListPtr lpc = NULL; GListPtr nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && do_force) { nodes = node_list_dup(data_set->nodes, FALSE, FALSE); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { if(node->weight >= 0) { nodes = g_list_prepend(nodes, node); } } } else if(nodes == NULL) { nodes = g_hash_table_get_values(rsc->allowed_nodes); } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { node = (node_t *) lpc->data; if (node->details->online) { rc = cli_resource_delete(crmd_channel, node->details->uname, rsc, operation, interval_spec, just_failures, data_set); } if (rc != pcmk_ok) { g_list_free(nodes); return rc; } } g_list_free(nodes); return pcmk_ok; } node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { printf("Unable to clean up %s because node %s not found\n", rsc->id, host_uname); return -ENODEV; } if (!node->details->rsc_discovery_enabled) { printf("Unable to clean up %s because resource discovery disabled on %s\n", rsc->id, host_uname); return -EOPNOTSUPP; } if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n", rsc->id, host_uname); return pcmk_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { printf("Unable to clean up %s failures on %s: %s\n", rsc->id, host_uname, pcmk_rc_str(rc)); return pcmk_rc2legacy(rc); } if (just_failures) { rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation, interval_spec, data_set); } else { rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set); } if (rc != pcmk_ok) { printf("Cleaned %s failures on %s, but unable to clean history: %s\n", rsc->id, host_uname, pcmk_strerror(rc)); } else { printf("Cleaned up %s on %s\n", rsc->id, host_uname); } return rc; } int cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_ok; int attr_options = pcmk__node_attr_none; const char *display_name = node_name? node_name : "all nodes"; if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s due to CIB_file\n", display_name); return pcmk_ok; } crmd_replies_needed = 0; if (node_name) { node_t *node = pe_find_node(data_set->nodes, node_name); if (node == NULL) { CMD_ERR("Unknown node: %s", node_name); return -ENXIO; } if (pe__is_guest_or_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } } rc = pcmk__node_attr_request_clear(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { printf("Unable to clean up all failures on %s: %s\n", display_name, pcmk_rc_str(rc)); return pcmk_rc2legacy(rc); } if (node_name) { rc = clear_rsc_failures(crmd_channel, node_name, NULL, operation, interval_spec, data_set); if (rc != pcmk_ok) { printf("Cleaned all resource failures on %s, but unable to clean history: %s\n", node_name, pcmk_strerror(rc)); return rc; } } else { for (GList *iter = data_set->nodes; iter; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL, operation, interval_spec, data_set); if (rc != pcmk_ok) { printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n", pcmk_strerror(rc)); return rc; } } } printf("Cleaned up all resources on %s\n", display_name); return pcmk_ok; } void cli_resource_check(cib_t * cib_conn, resource_t *rsc) { int need_nl = 0; char *role_s = NULL; char *managed = NULL; resource_t *parent = uber_parent(rsc); find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); if(role_s) { enum rsc_role_e role = text2role(role_s); free(role_s); if(role == RSC_ROLE_UNKNOWN) { // Treated as if unset } else if(role == RSC_ROLE_STOPPED) { printf("\n * The configuration specifies that '%s' should remain stopped\n", parent->id); need_nl++; } else if (is_set(parent->flags, pe_rsc_promotable) && (role == RSC_ROLE_SLAVE)) { printf("\n * The configuration specifies that '%s' should not be promoted\n", parent->id); need_nl++; } } if(managed && crm_is_true(managed) == FALSE) { printf("%s * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id); need_nl++; } free(managed); if(need_nl) { printf("\n"); } } int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set) { crm_warn("Failing: %s", rsc_id); return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set); } static GHashTable * generate_resource_params(resource_t * rsc, pe_working_set_t * data_set) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; if (!rsc) { crm_err("Resource does not exist in config"); return NULL; } params = crm_str_table_new(); meta = crm_str_table_new(); combined = crm_str_table_new(); get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set); get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set); if (params) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { g_hash_table_insert(combined, strdup(key), strdup(value)); } g_hash_table_destroy(params); } if (meta) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } static bool resource_is_running_on(resource_t *rsc, const char *host) { bool found = TRUE; GListPtr hIter = NULL; GListPtr hosts = NULL; if(rsc == NULL) { return FALSE; } rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pe_node_t *node = (pe_node_t *) hIter->data; if(strcmp(host, node->details->uname) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } else if(strcmp(host, node->details->id) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if(host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = FALSE; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = FALSE; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { resource_t *rsc = (resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (rsc->variant == pe_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } GList* subtract_lists(GList *from, GList *items, GCompareFunc cmp) { GList *item = NULL; GList *result = g_list_copy(from); for (item = items; item != NULL; item = item->next) { GList *candidate = NULL; for (candidate = from; candidate != NULL; candidate = candidate->next) { crm_info("Comparing %s with %s", (const char *) candidate->data, (const char *) item->data); if(cmp(candidate->data, item->data) == 0) { result = g_list_remove(result, candidate->data); break; } } } return result; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { fprintf(stdout, "%s%s\n", tag, (const char *)item->data); } } /*! * \internal * \brief Upgrade XML to latest schema version and use it as working set input * * This also updates the working set timestamp to the current time. * * \param[in] data_set Working set instance to update * \param[in] xml XML to use as input * * \return pcmk_ok on success, -ENOKEY if unable to upgrade XML * \note On success, caller is responsible for freeing memory allocated for * data_set->now. * \todo This follows the example of other callers of cli_config_update() * and returns -ENOKEY ("Required key not available") if that fails, * but perhaps -pcmk_err_schema_validation would be better in that case. */ int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return -ENOKEY; } data_set->input = *xml; data_set->now = crm_time_new(NULL); return pcmk_ok; } /*! * \internal * \brief Update a working set's XML input based on a CIB query * * \param[in] data_set Data set instance to initialize * \param[in] cib Connection to the CIB manager * * \return pcmk_ok on success, -errno on failure * \note On success, caller is responsible for freeing memory allocated for * data_set->input and data_set->now. */ static int update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc; rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc); return rc; } rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_ok) { fprintf(stderr, "Could not upgrade the current CIB XML\n"); free_xml(cib_xml_copy); return rc; } return pcmk_ok; } static int update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc; pe_reset_working_set(data_set); rc = update_working_set_from_cib(data_set, cib); if (rc != pcmk_ok) { return rc; } if(simulate) { pid = crm_getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { fprintf(stderr, "Could not create shadow cib: '%s'\n", pid); rc = -ENXIO; goto cleanup; } rc = write_xml_file(data_set->input, shadow_file, FALSE); if (rc < 0) { fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); goto cleanup; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); if(rc != pcmk_ok) { fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); goto cleanup; } pcmk__schedule_actions(data_set, data_set->input, NULL); run_simulation(data_set, shadow_cib, NULL, TRUE); rc = update_dataset(shadow_cib, data_set, FALSE); } else { cluster_status(data_set); } cleanup: /* Do not free data_set->input here, we need rsc->xml to be valid later on */ cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } static int max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc) { int delay = 0; int max_delay = 0; if(rsc && rsc->children) { GList *iter = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; delay = max_delay_for_resource(data_set, child); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id); max_delay = delay; } } } else if(rsc) { char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP); action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set); const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT); max_delay = crm_int_helper(value, NULL); pe_free_action(stop); } return max_delay; } static int max_delay_in(pe_working_set_t * data_set, GList *resources) { int max_delay = 0; GList *item = NULL; for (item = resources; item != NULL; item = item->next) { int delay = 0; resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data); delay = max_delay_for_resource(data_set, rsc); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id); max_delay = delay; } } return 5 + (max_delay / 1000); } -#define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \ +#define waiting_for_starts(d, r, h) ((d != NULL) || \ (resource_is_running_on((r), (h)) == FALSE)) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in] rsc The resource to restart * \param[in] host The host to restart the resource on (or NULL for all) * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but a two-second * granularity is actually used; if 0, a timeout will be * calculated based on the resource timeout) * \param[in] cib Connection to the CIB manager * * \return pcmk_ok on success, -errno on failure (exits on certain failures) */ int cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms, cib_t *cib) { int rc = 0; int lpc = 0; int before = 0; int step_timeout_s = 0; int sleep_interval = 2; int timeout = timeout_ms / 1000; bool stop_via_ban = FALSE; char *rsc_id = NULL; char *orig_target_role = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pe_working_set_t *data_set = NULL; if(resource_is_running_on(rsc, host) == FALSE) { const char *id = rsc->clone_name?rsc->clone_name:rsc->id; if(host) { printf("%s is not running on %s and so cannot be restarted\n", id, host); } else { printf("%s is not running anywhere and so cannot be restarted\n", id); } return -ENXIO; } /* We might set the target-role meta-attribute */ attr_set_type = XML_TAG_META_SETS; rsc_id = strdup(rsc->id); if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) { stop_via_ban = TRUE; } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_ERR, "Could not allocate working set"); rc = -ENOMEM; goto done; } set_bit(data_set->flags, pe_flag_no_counts); rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_ok) { fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc); goto done; } restart_target_active = get_active_resources(host, data_set->resources); current_active = get_active_resources(host, data_set->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ BE_QUIET = TRUE; rc = cli_resource_ban(rsc_id, host, NULL, cib); } else { /* Stop the resource by setting target-role to Stopped. * Remember any existing target-role so we can restore it later * (though it only makes any difference if it's Slave). */ char *lookup_id = clone_strip(rsc->id); find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); free(lookup_id); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, RSC_STOPPED, FALSE, cib, data_set); } if(rc != pcmk_ok) { fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); if (current_active) { g_list_free_full(current_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } goto done; } rc = update_dataset(cib, data_set, TRUE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources would be stopped\n"); goto failure; } target_active = get_active_resources(host, data_set->resources); dump_list(target_active, "Target"); list_delta = subtract_lists(current_active, target_active, (GCompareFunc) strcmp); fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta)); display_list(list_delta, " * "); step_timeout_s = timeout / sleep_interval; - while(g_list_length(list_delta) > 0) { + while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ - for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) { + for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources were stopped\n"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, data_set->resources); g_list_free(list_delta); list_delta = subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); display_list(list_delta, " * "); rc = -ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(rsc_id, host, NULL, cib, TRUE); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, data_set); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, data_set); } if(rc != pcmk_ok) { fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); goto done; } if (target_active) { g_list_free_full(target_active, free); } target_active = restart_target_active; if (list_delta) { g_list_free(list_delta); } list_delta = subtract_lists(target_active, current_active, (GCompareFunc) strcmp); fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta)); display_list(list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources were started\n"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ current_active = get_active_resources(NULL, data_set->resources); g_list_free(list_delta); list_delta = subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); display_list(list_delta, " * "); rc = -ETIME; goto failure; } } rc = pcmk_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(rsc_id, host, NULL, cib, TRUE); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, data_set); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, data_set); } done: if (list_delta) { g_list_free(list_delta); } if (current_active) { g_list_free_full(current_active, free); } if (target_active && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } free(rsc_id); pe_free_working_set(data_set); return rc; } static inline int action_is_pending(action_t *action) { if(is_set(action->flags, pe_action_optional)) { return FALSE; } else if(is_set(action->flags, pe_action_runnable) == FALSE) { return FALSE; } else if(is_set(action->flags, pe_action_pseudo)) { return FALSE; } else if(safe_str_eq("notify", action->task)) { return FALSE; } return TRUE; } /*! * \internal * \brief Return TRUE if any actions in a list are pending * * \param[in] actions List of actions to check * * \return TRUE if any actions in the list are pending, FALSE otherwise */ static bool actions_are_pending(GListPtr actions) { GListPtr action; for (action = actions; action != NULL; action = action->next) { action_t *a = (action_t *)action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags); return TRUE; } } return FALSE; } /*! * \internal * \brief Print pending actions to stderr * * \param[in] actions List of actions to check * * \return void */ static void print_pending_actions(GListPtr actions) { GListPtr action; fprintf(stderr, "Pending actions:\n"); for (action = actions; action != NULL; action = action->next) { action_t *a = (action_t *) action->data; if (action_is_pending(a)) { fprintf(stderr, "\tAction %d: %s", a->id, a->uuid); if (a->node) { fprintf(stderr, "\ton %s", a->node->details->uname); } fprintf(stderr, "\n"); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but one-second granularity * is actually used; if 0, a default will be used) * \param[in] cib Connection to the CIB manager * * \return pcmk_ok on success, -errno on failure */ int wait_till_stable(int timeout_ms, cib_t * cib) { pe_working_set_t *data_set = NULL; int rc = -1; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; bool printed_version_warning = BE_QUIET; // i.e. don't print if quiet data_set = pe_new_working_set(); if (data_set == NULL) { return -ENOMEM; } set_bit(data_set->flags, pe_flag_no_counts); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff > 0) { crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff); } else { print_pending_actions(data_set->actions); pe_free_working_set(data_set); return -ETIME; } if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pe_reset_working_set(data_set); rc = update_working_set_from_cib(data_set, cib); if (rc != pcmk_ok) { pe_free_working_set(data_set); return rc; } pcmk__schedule_actions(data_set, data_set->input, NULL); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = g_hash_table_lookup(data_set->config_hash, "dc-version"); if (safe_str_neq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION)) { printf("warning: wait option may not work properly in " "mixed-version cluster\n"); printed_version_warning = TRUE; } } } while (actions_are_pending(data_set->actions)); pe_free_working_set(data_set); return pcmk_ok; } int cli_resource_execute_from_params(const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *action, GHashTable *params, GHashTable *override_hash, int timeout_ms) { GHashTable *params_copy = NULL; int rc = pcmk_ok; svc_action_t *op = NULL; if (safe_str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH)) { CMD_ERR("Sorry, the %s option doesn't support %s resources yet", action, rsc_class); crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); } /* If no timeout was provided, grab the default. */ if (timeout_ms == 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } /* add meta_timeout env needed by some resource agents */ g_hash_table_insert(params, strdup("CRM_meta_timeout"), crm_strdup_printf("%d", timeout_ms)); /* add crm_feature_set env needed by some resource agents */ g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); /* resources_action_create frees the params hash table it's passed, but we * may need to reuse it in a second call to resources_action_create. Thus * we'll make a copy here so that gets freed and the original remains for * reuse. */ params_copy = crm_str_table_dup(params); op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0, timeout_ms, params_copy, 0); if (op == NULL) { /* Re-run with stderr enabled so we can display a sane error message */ crm_enable_stderr(TRUE); params_copy = crm_str_table_dup(params); op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0, timeout_ms, params_copy, 0); /* Callers of cli_resource_execute expect that the params hash table will * be freed. That function uses this one, so for that reason and for * making the two act the same, we should free the hash table here too. */ g_hash_table_destroy(params); /* We know op will be NULL, but this makes static analysis happy */ services_action_free(op); crm_exit(CRM_EX_DATAERR); } setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1); if(resource_verbose > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); if (override_hash) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, override_hash); while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n", rsc_name, name, value); g_hash_table_replace(op->params, strdup(name), strdup(value)); } } if (services_action_sync(op)) { int more, lpc, last; char *local_copy = NULL; rc = op->rc; if (op->status == PCMK_LRM_OP_DONE) { printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n", action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, services_ocf_exitcode_str(op->rc), op->rc); } else { printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n", action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, services_lrm_status_str(op->status), op->status); } /* hide output for validate-all if not in verbose */ if (resource_verbose == 0 && safe_str_eq(action, "validate-all")) goto done; if (op->stdout_data) { local_copy = strdup(op->stdout_data); more = strlen(local_copy); last = 0; for (lpc = 0; lpc < more; lpc++) { if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; printf(" > stdout: %s\n", local_copy + last); last = lpc + 1; } } free(local_copy); } if (op->stderr_data) { local_copy = strdup(op->stderr_data); more = strlen(local_copy); last = 0; for (lpc = 0; lpc < more; lpc++) { if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; printf(" > stderr: %s\n", local_copy + last); last = lpc + 1; } } free(local_copy); } } else { rc = op->rc == 0 ? pcmk_err_generic : op->rc; } done: services_action_free(op); /* See comment above about why we free params here. */ g_hash_table_destroy(params); return rc; } int cli_resource_execute(resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t * cib, pe_working_set_t *data_set) { int rc = pcmk_ok; const char *rid = NULL; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; const char *action = NULL; GHashTable *params = NULL; if (safe_str_eq(rsc_action, "validate")) { action = "validate-all"; } else if (safe_str_eq(rsc_action, "force-check")) { action = "monitor"; } else if (safe_str_eq(rsc_action, "force-stop")) { action = rsc_action+6; } else if (safe_str_eq(rsc_action, "force-start") || safe_str_eq(rsc_action, "force-demote") || safe_str_eq(rsc_action, "force-promote")) { action = rsc_action+6; if(pe_rsc_is_clone(rsc)) { rc = cli_resource_search(rsc, requested_name, data_set); if(rc > 0 && do_force == FALSE) { CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active", action, rsc->id); CMD_ERR("Try setting target-role=Stopped first or specifying " "the force option"); crm_exit(CRM_EX_UNSAFE); } } } else { action = rsc_action; } if(pe_rsc_is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->children->data; } if(rsc->variant == pe_group) { CMD_ERR("Sorry, the %s option doesn't support group resources", rsc_action); crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); } rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); params = generate_resource_params(rsc, data_set); if (timeout_ms == 0) { timeout_ms = pe_get_configured_timeout(rsc, action, data_set); } rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; rc = cli_resource_execute_from_params(rid, rclass, rprov, rtype, action, params, override_hash, timeout_ms); return rc; } int cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name, cib_t *cib, pe_working_set_t *data_set) { int rc = pcmk_ok; unsigned int count = 0; node_t *current = NULL; node_t *dest = pe_find_node(data_set->nodes, host_name); bool cur_is_dest = FALSE; if (dest == NULL) { return -pcmk_err_node_unknown; } if (scope_master && is_not_set(rsc->flags, pe_rsc_promotable)) { resource_t *p = uber_parent(rsc); if (is_set(p->flags, pe_rsc_promotable)) { CMD_ERR("Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { CMD_ERR("Ignoring master option: %s is not promotable", rsc_id); scope_master = FALSE; } } current = pe__find_active_requires(rsc, &count); if (is_set(rsc->flags, pe_rsc_promotable)) { GListPtr iter = NULL; unsigned int master_count = 0; pe_node_t *master_node = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { rsc = child; master_node = pe__current_node(child); master_count++; } } if (scope_master || master_count) { count = master_count; current = master_node; } } if (count > 1) { if (pe_rsc_is_clone(rsc)) { current = NULL; } else { return -pcmk_err_multiple; } } if (current && (current->details == dest->details)) { cur_is_dest = TRUE; if (do_force) { crm_info("%s is already %s on %s, reinforcing placement with location constraint.", rsc_id, scope_master?"promoted":"active", dest->details->uname); } else { return -pcmk_err_already; } } /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, FALSE); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib, TRUE); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(rsc_id, dest->details->uname, cib); crm_trace("%s%s now prefers node %s%s", rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":""); /* only ban the previous location if current location != destination location. * it is possible to use -M to enforce a location without regard of where the * resource is currently located */ if(do_force && (cur_is_dest == FALSE)) { /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib); } else if(count > 1) { CMD_ERR("Resource '%s' is currently %s in %d locations. " "One may now move to %s", rsc_id, (scope_master? "promoted" : "active"), count, dest->details->uname); CMD_ERR("To prevent '%s' from being %s at a specific location, " "specify a node.", rsc_id, (scope_master? "promoted" : "active")); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; } static void cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources) { GListPtr lpc = NULL; GListPtr hosts = NULL; for (lpc = resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc->fns->location(rsc, &hosts, TRUE); if (hosts == NULL) { printf("Resource %s is not running\n", rsc->id); } else { printf("Resource %s is running\n", rsc->id); } cli_resource_check(cib_conn, rsc); g_list_free(hosts); hosts = NULL; } } static void cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources, resource_t *rsc, const char *host_uname) { if (resource_is_running_on(rsc, host_uname)) { printf("Resource %s is running on host %s\n",rsc->id,host_uname); } else { printf("Resource %s is not running on host %s\n", rsc->id, host_uname); } cli_resource_check(cib_conn, rsc); } static void cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node) { const char* host_uname = node->details->uname; GListPtr allResources = node->details->allocated_rsc; GListPtr activeResources = node->details->running_rsc; GListPtr unactiveResources = subtract_lists(allResources,activeResources,(GCompareFunc) strcmp); GListPtr lpc = NULL; for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; printf("Resource %s is running on host %s\n",rsc->id,host_uname); cli_resource_check(cib_conn,rsc); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; printf("Resource %s is assigned to host %s but not running\n", rsc->id, host_uname); cli_resource_check(cib_conn,rsc); } g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } static void cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources, resource_t *rsc) { GListPtr hosts = NULL; rsc->fns->location(rsc, &hosts, TRUE); printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not ")); cli_resource_check(cib_conn, rsc); g_list_free(hosts); } void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc, node_t *node) { const char *host_uname = (node == NULL)? NULL : node->details->uname; if ((rsc == NULL) && (host_uname == NULL)) { cli_resource_why_without_rsc_and_host(cib_conn, resources); } else if ((rsc != NULL) && (host_uname != NULL)) { cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc, host_uname); } else if ((rsc == NULL) && (host_uname != NULL)) { cli_resource_why_without_rsc_with_host(cib_conn, resources, node); } else if ((rsc != NULL) && (host_uname == NULL)) { cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc); } } diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c index 11388a2c4e..a2ffb34e3e 100644 --- a/tools/crm_ticket.c +++ b/tools/crm_ticket.c @@ -1,905 +1,905 @@ /* - * Copyright 2012-2019 the Pacemaker project contributors + * Copyright 2012-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean do_force = FALSE; gboolean BE_QUIET = FALSE; const char *ticket_id = NULL; const char *get_attr_name = NULL; const char *attr_name = NULL; const char *attr_value = NULL; const char *attr_id = NULL; const char *set_name = NULL; const char *attr_default = NULL; char ticket_cmd = 'S'; char *xml_file = NULL; int cib_options = cib_sync_call; static ticket_t * find_ticket(const char *ticket_id, pe_working_set_t * data_set) { ticket_t *ticket = NULL; ticket = g_hash_table_lookup(data_set->tickets, ticket_id); return ticket; } static void print_date(time_t time) { int lpc = 0; char date_str[26]; asctime_r(localtime(&time), date_str); for (; lpc < 26; lpc++) { if (date_str[lpc] == '\n') { date_str[lpc] = 0; } } fprintf(stdout, "'%s'", date_str); } static int print_ticket(ticket_t * ticket, gboolean raw, gboolean details) { if (raw) { fprintf(stdout, "%s\n", ticket->id); return pcmk_ok; } fprintf(stdout, "%s\t%s %s", ticket->id, ticket->granted ? "granted" : "revoked", ticket->standby ? "[standby]" : " "); if (details && g_hash_table_size(ticket->state) > 0) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; int lpc = 0; fprintf(stdout, " ("); g_hash_table_iter_init(&iter, ticket->state); while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) { if (lpc > 0) { fprintf(stdout, ", "); } fprintf(stdout, "%s=", name); if (crm_str_eq(name, "last-granted", TRUE) || crm_str_eq(name, "expires", TRUE)) { print_date(crm_parse_int(value, 0)); } else { fprintf(stdout, "%s", value); } lpc++; } fprintf(stdout, ")\n"); } else { if (ticket->last_granted > -1) { fprintf(stdout, " last-granted="); print_date(ticket->last_granted); } fprintf(stdout, "\n"); } return pcmk_ok; } static int print_ticket_list(pe_working_set_t * data_set, gboolean raw, gboolean details) { GHashTableIter iter; ticket_t *ticket = NULL; g_hash_table_iter_init(&iter, data_set->tickets); while (g_hash_table_iter_next(&iter, NULL, (void **)&ticket)) { print_ticket(ticket, raw, details); } return pcmk_ok; } #define XPATH_MAX 1024 static int find_ticket_state(cib_t * the_cib, const char *ticket_id, xmlNode ** ticket_state_xml) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; CRM_ASSERT(ticket_state_xml != NULL); *ticket_state_xml = NULL; xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", "/cib/status/tickets"); if (ticket_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s[@id=\"%s\"]", XML_CIB_TAG_TICKET_STATE, ticket_id); } CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { if (ticket_id) { fprintf(stdout, "Multiple ticket_states match ticket_id=%s\n", ticket_id); } *ticket_state_xml = xml_search; } else { *ticket_state_xml = xml_search; } bail: free(xpath_string); return rc; } static int find_ticket_constraints(cib_t * the_cib, const char *ticket_id, xmlNode ** ticket_cons_xml) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; CRM_ASSERT(ticket_cons_xml != NULL); *ticket_cons_xml = NULL; xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s/%s", get_object_path(XML_CIB_TAG_CONSTRAINTS), XML_CONS_TAG_RSC_TICKET); if (ticket_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@ticket=\"%s\"]", ticket_id); } CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); *ticket_cons_xml = xml_search; bail: free(xpath_string); return rc; } static int dump_ticket_xml(cib_t * the_cib, const char *ticket_id) { int rc = pcmk_ok; xmlNode *state_xml = NULL; rc = find_ticket_state(the_cib, ticket_id, &state_xml); if (state_xml == NULL) { return rc; } fprintf(stdout, "State XML:\n"); if (state_xml) { char *state_xml_str = NULL; state_xml_str = dump_xml_formatted(state_xml); fprintf(stdout, "\n%s\n", crm_str(state_xml_str)); free_xml(state_xml); free(state_xml_str); } return pcmk_ok; } static int dump_constraints(cib_t * the_cib, const char *ticket_id) { int rc = pcmk_ok; xmlNode *cons_xml = NULL; char *cons_xml_str = NULL; rc = find_ticket_constraints(the_cib, ticket_id, &cons_xml); if (cons_xml == NULL) { return rc; } cons_xml_str = dump_xml_formatted(cons_xml); fprintf(stdout, "Constraints XML:\n\n%s\n", crm_str(cons_xml_str)); free_xml(cons_xml); free(cons_xml_str); return pcmk_ok; } static int get_ticket_state_attr(const char *ticket_id, const char *attr_name, const char **attr_value, pe_working_set_t * data_set) { ticket_t *ticket = NULL; CRM_ASSERT(attr_value != NULL); *attr_value = NULL; ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { return -ENXIO; } *attr_value = g_hash_table_lookup(ticket->state, attr_name); if (*attr_value == NULL) { return -ENXIO; } return pcmk_ok; } static gboolean ticket_warning(const char *ticket_id, const char *action) { gboolean rc = FALSE; int offset = 0; static int text_max = 1024; char *warning = NULL; const char *word = NULL; warning = calloc(1, text_max); if (safe_str_eq(action, "grant")) { offset += snprintf(warning + offset, text_max - offset, "This command cannot help you verify whether '%s' has been already granted elsewhere.\n", ticket_id); word = "to"; } else { offset += snprintf(warning + offset, text_max - offset, "Revoking '%s' can trigger the specified 'loss-policy'(s) relating to '%s'.\n\n", ticket_id, ticket_id); offset += snprintf(warning + offset, text_max - offset, "You can check that with:\ncrm_ticket --ticket %s --constraints\n\n", ticket_id); offset += snprintf(warning + offset, text_max - offset, "Otherwise before revoking '%s', you may want to make '%s' standby with:\ncrm_ticket --ticket %s --standby\n\n", ticket_id, ticket_id, ticket_id); word = "from"; } offset += snprintf(warning + offset, text_max - offset, "If you really want to %s '%s' %s this site now, and you know what you are doing,\n", action, ticket_id, word); offset += snprintf(warning + offset, text_max - offset, "please specify --force."); CRM_LOG_ASSERT(offset > 0); fprintf(stdout, "%s\n", warning); free(warning); return rc; } static gboolean allow_modification(const char *ticket_id, GListPtr attr_delete, GHashTable *attr_set) { const char *value = NULL; GListPtr list_iter = NULL; if (do_force) { return TRUE; } if (g_hash_table_lookup_extended(attr_set, "granted", NULL, (gpointer *) & value)) { if (crm_is_true(value)) { ticket_warning(ticket_id, "grant"); return FALSE; } else { ticket_warning(ticket_id, "revoke"); return FALSE; } } for(list_iter = attr_delete; list_iter; list_iter = list_iter->next) { const char *key = (const char *)list_iter->data; if (safe_str_eq(key, "granted")) { ticket_warning(ticket_id, "revoke"); return FALSE; } } return TRUE; } static int modify_ticket_state(const char * ticket_id, GListPtr attr_delete, GHashTable * attr_set, cib_t * cib, pe_working_set_t * data_set) { int rc = pcmk_ok; xmlNode *xml_top = NULL; xmlNode *ticket_state_xml = NULL; gboolean found = FALSE; GListPtr list_iter = NULL; GHashTableIter hash_iter; char *key = NULL; char *value = NULL; ticket_t *ticket = NULL; rc = find_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == pcmk_ok) { crm_debug("Found a match state for ticket: id=%s", ticket_id); xml_top = ticket_state_xml; found = TRUE; } else if (rc != -ENXIO) { return rc; } else if (g_hash_table_size(attr_set) == 0){ return pcmk_ok; } else { xmlNode *xml_obj = NULL; xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS); xml_obj = create_xml_node(xml_top, XML_CIB_TAG_TICKETS); ticket_state_xml = create_xml_node(xml_obj, XML_CIB_TAG_TICKET_STATE); crm_xml_add(ticket_state_xml, XML_ATTR_ID, ticket_id); } for(list_iter = attr_delete; list_iter; list_iter = list_iter->next) { const char *key = (const char *)list_iter->data; xml_remove_prop(ticket_state_xml, key); } ticket = find_ticket(ticket_id, data_set); g_hash_table_iter_init(&hash_iter, attr_set); while (g_hash_table_iter_next(&hash_iter, (gpointer *) & key, (gpointer *) & value)) { crm_xml_add(ticket_state_xml, key, value); if (safe_str_eq(key, "granted") && (ticket == NULL || ticket->granted == FALSE) && crm_is_true(value)) { char *now = crm_ttoa(time(NULL)); crm_xml_add(ticket_state_xml, "last-granted", now); free(now); } } - if (found && g_list_length(attr_delete)) { + if (found && (attr_delete != NULL)) { crm_log_xml_debug(xml_top, "Replace"); rc = cib->cmds->replace(cib, XML_CIB_TAG_STATUS, ticket_state_xml, cib_options); } else { crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, xml_top, cib_options); } free_xml(xml_top); return rc; } static int delete_ticket_state(const char *ticket_id, cib_t * cib) { xmlNode *ticket_state_xml = NULL; int rc = pcmk_ok; rc = find_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == -ENXIO) { return pcmk_ok; } else if (rc != pcmk_ok) { return rc; } crm_log_xml_debug(ticket_state_xml, "Delete"); rc = cib->cmds->remove(cib, XML_CIB_TAG_STATUS, ticket_state_xml, cib_options); if (rc == pcmk_ok) { fprintf(stdout, "Cleaned up %s\n", ticket_id); } free_xml(ticket_state_xml); return rc; } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\t\tThis text"}, {"version", 0, 0, '$', "\t\tVersion information" }, {"verbose", 0, 0, 'V', "\t\tIncrease debug output"}, {"quiet", 0, 0, 'Q', "\t\tPrint only the value on stdout\n"}, {"ticket", 1, 0, 't', "\tTicket ID" }, {"-spacer-", 1, 0, '-', "\nQueries:"}, {"info", 0, 0, 'l', "\t\tDisplay the information of ticket(s)"}, {"details", 0, 0, 'L', "\t\tDisplay the details of ticket(s)"}, {"raw", 0, 0, 'w', "\t\tDisplay the IDs of ticket(s)"}, {"query-xml", 0, 0, 'q', "\tQuery the XML of ticket(s)"}, {"constraints",0, 0, 'c', "\tDisplay the rsc_ticket constraints that apply to ticket(s)"}, {"-spacer-", 1, 0, '-', "\nCommands:"}, {"grant", 0, 0, 'g', "\t\tGrant a ticket to this cluster site"}, {"revoke", 0, 0, 'r', "\t\tRevoke a ticket from this cluster site"}, {"standby", 0, 0, 's', "\t\tTell this cluster site this ticket is standby"}, {"activate", 0, 0, 'a', "\tTell this cluster site this ticket is active"}, {"-spacer-", 1, 0, '-', "\nAdvanced Commands:"}, {"get-attr", 1, 0, 'G', "\tDisplay the named attribute for a ticket"}, {"set-attr", 1, 0, 'S', "\tSet the named attribute for a ticket"}, {"delete-attr",1, 0, 'D', "\tDelete the named attribute for a ticket"}, {"cleanup", 0, 0, 'C', "\t\tDelete all state of a ticket at this cluster site"}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"attr-value", 1, 0, 'v', "\tAttribute value to use with -S"}, {"default", 1, 0, 'd', "\t(Advanced) The default attribute value to display if none is found. For use with -G"}, {"force", 0, 0, 'f', "\t\t(Advanced) Force the action to be performed"}, {"xml-file", 1, 0, 'x', NULL, 1}, /* legacy options */ {"set-name", 1, 0, 'n', "\t(Advanced) ID of the instance_attributes object to change"}, {"nvpair", 1, 0, 'i', "\t(Advanced) ID of the nvpair object to change/delete"}, {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "Display the info of tickets:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --info", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the detailed info of tickets:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --details", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the XML of 'ticketA':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --query-xml", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the rsc_ticket constraints that apply to 'ticketA':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --constraints", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Grant 'ticketA' to this cluster site:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --grant", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Revoke 'ticketA' from this cluster site:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --revoke", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Make 'ticketA' standby:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "The cluster site will treat a granted 'ticketA' as 'standby'."}, {"-spacer-", 1, 0, '-', "The dependent resources will be stopped or demoted gracefully without triggering loss-policies", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --standby", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Activate 'ticketA' from being standby:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --activate", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Get the value of the 'granted' attribute for 'ticketA':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --get-attr granted", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Set the value of the 'standby' attribute for 'ticketA':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --set-attr standby --attr-value true", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Delete the 'granted' attribute for 'ticketA':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --delete-attr granted", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Erase the operation history of 'ticketA' at this cluster site:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "The cluster site will 'forget' the existing ticket state.", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_ticket --ticket ticketA --cleanup", pcmk_option_example}, {0, 0, 0, 0} }; /* *INDENT-ON* */ int main(int argc, char **argv) { pe_working_set_t *data_set = NULL; xmlNode *cib_xml_copy = NULL; xmlNode *cib_constraints = NULL; cib_t *cib_conn = NULL; crm_exit_t exit_code = CRM_EX_OK; int rc = pcmk_ok; int option_index = 0; int argerr = 0; int flag; guint modified = 0; GListPtr attr_delete = NULL; GHashTable *attr_set = crm_str_table_new(); crm_log_init(NULL, LOG_CRIT, FALSE, FALSE, argc, argv, FALSE); crm_set_options(NULL, "(query|command) [options]", long_options, "Perform tasks related to cluster tickets.\nAllows ticket attributes to be queried, modified and deleted.\n"); if (argc < 2) { crm_help('?', CRM_EX_USAGE); } while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case '$': case '?': crm_help(flag, CRM_EX_OK); break; case 'Q': BE_QUIET = TRUE; break; case 't': ticket_id = optarg; break; case 'l': case 'L': case 'w': case 'q': case 'c': ticket_cmd = flag; break; case 'g': g_hash_table_insert(attr_set, strdup("granted"), strdup("true")); modified++; break; case 'r': g_hash_table_insert(attr_set, strdup("granted"), strdup("false")); modified++; break; case 's': g_hash_table_insert(attr_set, strdup("standby"), strdup("true")); modified++; break; case 'a': g_hash_table_insert(attr_set, strdup("standby"), strdup("false")); modified++; break; case 'G': get_attr_name = optarg; ticket_cmd = flag; break; case 'S': attr_name = optarg; if (attr_name && attr_value) { g_hash_table_insert(attr_set, strdup(attr_name), strdup(attr_value)); attr_name = NULL; attr_value = NULL; modified++; } break; case 'D': attr_delete = g_list_append(attr_delete, optarg); modified++; break; case 'C': ticket_cmd = flag; break; case 'v': attr_value = optarg; if (attr_name && attr_value) { g_hash_table_insert(attr_set, strdup(attr_name), strdup(attr_value)); attr_name = NULL; attr_value = NULL; modified++; } break; case 'd': attr_default = optarg; break; case 'f': do_force = TRUE; break; case 'x': xml_file = strdup(optarg); break; case 'n': set_name = optarg; break; case 'i': attr_id = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag); ++argerr; break; } } if (optind < argc && argv[optind] != NULL) { CMD_ERR("non-option ARGV-elements:"); while (optind < argc && argv[optind] != NULL) { CMD_ERR("%s", argv[optind++]); ++argerr; } } if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', CRM_EX_USAGE); } data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_CRIT, "Could not allocate working set"); exit_code = CRM_EX_OSERR; goto bail; } set_bit(data_set->flags, pe_flag_no_counts); cib_conn = cib_new(); if (cib_conn == NULL) { CMD_ERR("Could not connect to the CIB manager"); exit_code = CRM_EX_DISCONNECT; goto bail; } rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc != pcmk_ok) { CMD_ERR("Could not connect to the CIB manager: %s", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto bail; } if (xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { CMD_ERR("Could not get local CIB: %s", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto bail; } } if (cli_config_update(&cib_xml_copy, NULL, FALSE) == FALSE) { CMD_ERR("Could not update local CIB to latest schema version"); exit_code = CRM_EX_CONFIG; goto bail; } data_set->input = cib_xml_copy; data_set->now = crm_time_new(NULL); cluster_status(data_set); /* For recording the tickets that are referenced in rsc_ticket constraints * but have never been granted yet. */ cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); unpack_constraints(cib_constraints, data_set); if (ticket_cmd == 'l' || ticket_cmd == 'L' || ticket_cmd == 'w') { gboolean raw = FALSE; gboolean details = FALSE; if (ticket_cmd == 'L') { details = TRUE; } else if (ticket_cmd == 'w') { raw = TRUE; } if (ticket_id) { ticket_t *ticket = find_ticket(ticket_id, data_set); if (ticket == NULL) { CMD_ERR("No such ticket '%s'", ticket_id); exit_code = CRM_EX_NOSUCH; goto bail; } rc = print_ticket(ticket, raw, details); } else { rc = print_ticket_list(data_set, raw, details); } if (rc != pcmk_ok) { CMD_ERR("Could not print ticket: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'q') { rc = dump_ticket_xml(cib_conn, ticket_id); if (rc != pcmk_ok) { CMD_ERR("Could not query ticket XML: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'c') { rc = dump_constraints(cib_conn, ticket_id); if (rc != pcmk_ok) { CMD_ERR("Could not show ticket constraints: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'G') { const char *value = NULL; if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_NOSUCH; goto bail; } rc = get_ticket_state_attr(ticket_id, get_attr_name, &value, data_set); if (rc == pcmk_ok) { fprintf(stdout, "%s\n", value); } else if (rc == -ENXIO && attr_default) { fprintf(stdout, "%s\n", attr_default); rc = pcmk_ok; } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'C') { if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_USAGE; goto bail; } if (do_force == FALSE) { ticket_t *ticket = NULL; ticket = find_ticket(ticket_id, data_set); if (ticket == NULL) { CMD_ERR("No such ticket '%s'", ticket_id); exit_code = CRM_EX_NOSUCH; goto bail; } if (ticket->granted) { ticket_warning(ticket_id, "revoke"); exit_code = CRM_EX_INSUFFICIENT_PRIV; goto bail; } } rc = delete_ticket_state(ticket_id, cib_conn); if (rc != pcmk_ok) { CMD_ERR("Could not clean up ticket: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (modified) { if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_USAGE; goto bail; } if (attr_value && (attr_name == NULL || strlen(attr_name) == 0)) { CMD_ERR("Must supply attribute name with -S for -v %s", attr_value); exit_code = CRM_EX_USAGE; goto bail; } if (attr_name && (attr_value == NULL || strlen(attr_value) == 0)) { CMD_ERR("Must supply attribute value with -v for -S %s", attr_name); exit_code = CRM_EX_USAGE; goto bail; } if (allow_modification(ticket_id, attr_delete, attr_set) == FALSE) { CMD_ERR("Ticket modification not allowed"); exit_code = CRM_EX_INSUFFICIENT_PRIV; goto bail; } rc = modify_ticket_state(ticket_id, attr_delete, attr_set, cib_conn, data_set); if (rc != pcmk_ok) { CMD_ERR("Could not modify ticket: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'S') { /* Correct usage was handled in the "if (modified)" block above, so * this is just for reporting usage errors */ if (attr_name == NULL || strlen(attr_name) == 0) { // We only get here if ticket_cmd was left as default CMD_ERR("Must supply a command"); exit_code = CRM_EX_USAGE; goto bail; } if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_USAGE; goto bail; } if (attr_value == NULL || strlen(attr_value) == 0) { CMD_ERR("Must supply value with -v for -S %s", attr_name); exit_code = CRM_EX_USAGE; goto bail; } } else { CMD_ERR("Unknown command: %c", ticket_cmd); exit_code = CRM_EX_USAGE; } bail: if (attr_set) { g_hash_table_destroy(attr_set); } attr_set = NULL; if (attr_delete) { g_list_free(attr_delete); } attr_delete = NULL; pe_free_working_set(data_set); data_set = NULL; if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } if (rc == -pcmk_err_no_quorum) { CMD_ERR("Use --force to ignore quorum"); } crm_exit(exit_code); }