diff --git a/crmd/control.c b/crmd/control.c index 3e0370cec5..1fb92bbff0 100644 --- a/crmd/control.c +++ b/crmd/control.c @@ -1,1146 +1,1145 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include qb_ipcs_service_t *ipcs = NULL; extern gboolean crm_connect_corosync(crm_cluster_t * cluster); extern void crmd_ha_connection_destroy(gpointer user_data); void crm_shutdown(int nsig); gboolean crm_read_options(gpointer user_data); gboolean fsa_has_quorum = FALSE; crm_trigger_t *fsa_source = NULL; crm_trigger_t *config_read = NULL; bool no_quorum_suicide_escalation = FALSE; static gboolean election_timeout_popped(gpointer data) { /* Not everyone voted */ crm_info("Election failed: Declaring ourselves the winner"); register_fsa_input(C_TIMER_POPPED, I_ELECTION_DC, NULL); return FALSE; } /* A_HA_CONNECT */ void do_ha_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean registered = FALSE; static crm_cluster_t *cluster = NULL; if (cluster == NULL) { cluster = calloc(1, sizeof(crm_cluster_t)); } if (action & A_HA_DISCONNECT) { crm_cluster_disconnect(cluster); crm_info("Disconnected from the cluster"); set_bit(fsa_input_register, R_HA_DISCONNECTED); } if (action & A_HA_CONNECT) { crm_set_status_callback(&peer_update_callback); crm_set_autoreap(FALSE); if (is_openais_cluster()) { #if SUPPORT_COROSYNC registered = crm_connect_corosync(cluster); #endif } else if (is_heartbeat_cluster()) { #if SUPPORT_HEARTBEAT cluster->destroy = crmd_ha_connection_destroy; cluster->hb_dispatch = crmd_ha_msg_callback; registered = crm_cluster_connect(cluster); fsa_cluster_conn = cluster->hb_conn; crm_trace("Be informed of Node Status changes"); if (registered && fsa_cluster_conn->llc_ops->set_nstatus_callback(fsa_cluster_conn, crmd_ha_status_callback, fsa_cluster_conn) != HA_OK) { crm_err("Cannot set nstatus callback: %s", fsa_cluster_conn->llc_ops->errmsg(fsa_cluster_conn)); registered = FALSE; } crm_trace("Be informed of CRM Client Status changes"); if (registered && fsa_cluster_conn->llc_ops->set_cstatus_callback(fsa_cluster_conn, crmd_client_status_callback, fsa_cluster_conn) != HA_OK) { crm_err("Cannot set cstatus callback: %s", fsa_cluster_conn->llc_ops->errmsg(fsa_cluster_conn)); registered = FALSE; } if (registered) { crm_trace("Requesting an initial dump of CRMD client_status"); fsa_cluster_conn->llc_ops->client_status(fsa_cluster_conn, NULL, CRM_SYSTEM_CRMD, -1); } #endif } fsa_election = election_init(NULL, cluster->uname, 60000/*60s*/, election_timeout_popped); fsa_our_uname = cluster->uname; fsa_our_uuid = cluster->uuid; if(cluster->uuid == NULL) { crm_err("Could not obtain local uuid"); registered = FALSE; } if (registered == FALSE) { set_bit(fsa_input_register, R_HA_DISCONNECTED); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } populate_cib_nodes(node_update_none, __FUNCTION__); clear_bit(fsa_input_register, R_HA_DISCONNECTED); crm_info("Connected to the cluster"); } if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static bool need_spawn_pengine_from_crmd(void) { static int result = -1; if (result != -1) return result; if (!is_heartbeat_cluster()) { result = 0; return result; } /* NULL, or "strange" value: rather spawn from here. */ result = TRUE; crm_str_to_boolean(daemon_option("crmd_spawns_pengine"), &result); return result; } /* A_SHUTDOWN */ void do_shutdown(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* just in case */ set_bit(fsa_input_register, R_SHUTDOWN); if (need_spawn_pengine_from_crmd()) { if (is_set(fsa_input_register, pe_subsystem->flag_connected)) { crm_info("Terminating the %s", pe_subsystem->name); if (stop_subsystem(pe_subsystem, TRUE) == FALSE) { /* its gone... */ crm_err("Faking %s exit", pe_subsystem->name); clear_bit(fsa_input_register, pe_subsystem->flag_connected); } else { crm_info("Waiting for subsystems to exit"); crmd_fsa_stall(FALSE); } } crm_info("All subsystems stopped, continuing"); } if (stonith_api) { /* Prevent it from coming up again */ clear_bit(fsa_input_register, R_ST_REQUIRED); crm_info("Disconnecting STONITH..."); stonith_api->cmds->disconnect(stonith_api); } } /* A_SHUTDOWN_REQ */ void do_shutdown_req(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { xmlNode *msg = NULL; crm_info("Sending shutdown request to %s", crm_str(fsa_our_dc)); msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); /* set_bit(fsa_input_register, R_STAYDOWN); */ if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } free_xml(msg); } extern crm_ipc_t *attrd_ipc; extern char *max_generation_from; extern xmlNode *max_generation_xml; extern GHashTable *resource_history; extern GHashTable *voted; extern GHashTable *metadata_hash; extern char *te_client_id; void log_connected_client(gpointer key, gpointer value, gpointer user_data); void log_connected_client(gpointer key, gpointer value, gpointer user_data) { crm_client_t *client = value; crm_err("%s is still connected at exit", crm_client_name(client)); } int crmd_fast_exit(int rc) { if (is_set(fsa_input_register, R_STAYDOWN)) { crm_warn("Inhibiting respawn "CRM_XS" remapping exit code %d to %d", rc, DAEMON_RESPAWN_STOP); rc = DAEMON_RESPAWN_STOP; } if (rc == pcmk_ok && is_set(fsa_input_register, R_IN_RECOVERY)) { crm_err("Could not recover from internal error"); rc = pcmk_err_generic; } return crm_exit(rc); } int crmd_exit(int rc) { GListPtr gIter = NULL; GMainLoop *mloop = crmd_mainloop; static bool in_progress = FALSE; if(in_progress && rc == 0) { crm_debug("Exit is already in progress"); return rc; } else if(in_progress) { crm_notice("Error during shutdown process, terminating now with status %d: %s", rc, pcmk_strerror(rc)); crm_write_blackbox(SIGTRAP, NULL); crmd_fast_exit(rc); } in_progress = TRUE; crm_trace("Preparing to exit: %d", rc); /* Suppress secondary errors resulting from us disconnecting everything */ set_bit(fsa_input_register, R_HA_DISCONNECTED); /* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */ if(ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } if (attrd_ipc) { crm_trace("Closing attrd connection"); crm_ipc_close(attrd_ipc); crm_ipc_destroy(attrd_ipc); attrd_ipc = NULL; } if (pe_subsystem && pe_subsystem->client && pe_subsystem->client->ipcs) { crm_trace("Disconnecting Policy Engine"); qb_ipcs_disconnect(pe_subsystem->client->ipcs); } if(stonith_api) { crm_trace("Disconnecting fencing API"); clear_bit(fsa_input_register, R_ST_REQUIRED); stonith_api->cmds->free(stonith_api); stonith_api = NULL; } if (rc == pcmk_ok && crmd_mainloop == NULL) { crm_debug("No mainloop detected"); rc = EPROTO; } /* On an error, just get out. * * Otherwise, make the effort to have mainloop exit gracefully so * that it (mostly) cleans up after itself and valgrind has less * to report on - allowing real errors stand out */ if(rc != pcmk_ok) { crm_notice("Forcing immediate exit with status %d: %s", rc, pcmk_strerror(rc)); crm_write_blackbox(SIGTRAP, NULL); return crmd_fast_exit(rc); } /* Clean up as much memory as possible for valgrind */ for (gIter = fsa_message_queue; gIter != NULL; gIter = gIter->next) { fsa_data_t *fsa_data = gIter->data; crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]", fsa_input2string(fsa_data->fsa_input), fsa_state2string(fsa_state), fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin); delete_fsa_input(fsa_data); } clear_bit(fsa_input_register, R_MEMBERSHIP); g_list_free(fsa_message_queue); fsa_message_queue = NULL; free(pe_subsystem); pe_subsystem = NULL; free(te_subsystem); te_subsystem = NULL; free(cib_subsystem); cib_subsystem = NULL; if (metadata_hash) { crm_trace("Destroying reload cache with %d members", g_hash_table_size(metadata_hash)); g_hash_table_destroy(metadata_hash); metadata_hash = NULL; } election_fini(fsa_election); fsa_election = NULL; cib_delete(fsa_cib_conn); fsa_cib_conn = NULL; verify_stopped(fsa_state, LOG_WARNING); clear_bit(fsa_input_register, R_LRM_CONNECTED); lrm_state_destroy_all(); /* This basically will not work, since mainloop has a reference to it */ mainloop_destroy_trigger(fsa_source); fsa_source = NULL; mainloop_destroy_trigger(config_read); config_read = NULL; mainloop_destroy_trigger(stonith_reconnect); stonith_reconnect = NULL; mainloop_destroy_trigger(transition_trigger); transition_trigger = NULL; crm_client_cleanup(); crm_peer_destroy(); crm_timer_stop(transition_timer); crm_timer_stop(integration_timer); crm_timer_stop(finalization_timer); crm_timer_stop(election_trigger); election_timeout_stop(fsa_election); crm_timer_stop(shutdown_escalation_timer); crm_timer_stop(wait_timer); crm_timer_stop(recheck_timer); free(transition_timer); transition_timer = NULL; free(integration_timer); integration_timer = NULL; free(finalization_timer); finalization_timer = NULL; free(election_trigger); election_trigger = NULL; election_fini(fsa_election); free(shutdown_escalation_timer); shutdown_escalation_timer = NULL; free(wait_timer); wait_timer = NULL; free(recheck_timer); recheck_timer = NULL; free(fsa_our_dc_version); fsa_our_dc_version = NULL; free(fsa_our_uname); fsa_our_uname = NULL; free(fsa_our_uuid); fsa_our_uuid = NULL; free(fsa_our_dc); fsa_our_dc = NULL; free(fsa_cluster_name); fsa_cluster_name = NULL; free(te_uuid); te_uuid = NULL; free(te_client_id); te_client_id = NULL; free(fsa_pe_ref); fsa_pe_ref = NULL; free(failed_stop_offset); failed_stop_offset = NULL; free(failed_start_offset); failed_start_offset = NULL; free(max_generation_from); max_generation_from = NULL; free_xml(max_generation_xml); max_generation_xml = NULL; mainloop_destroy_signal(SIGPIPE); mainloop_destroy_signal(SIGUSR1); mainloop_destroy_signal(SIGTERM); mainloop_destroy_signal(SIGTRAP); mainloop_destroy_signal(SIGCHLD); if (mloop) { int lpc = 0; GMainContext *ctx = g_main_loop_get_context(crmd_mainloop); /* Don't re-enter this block */ crmd_mainloop = NULL; crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx)); while(g_main_context_pending(ctx) && lpc < 10) { lpc++; crm_trace("Iteration %d", lpc); g_main_context_dispatch(ctx); } crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx)); g_main_loop_quit(mloop); #if SUPPORT_HEARTBEAT /* Do this only after g_main_loop_quit(). * * This interface was broken (incomplete) since it was introduced. * ->delete() does cleanup and free most of it, but it does not * actually remove and destroy the corresponding GSource, so the next * prepare/check iteratioin would find a corrupt (because partially * freed) GSource, and segfault. * * Apparently one was supposed to store the GSource as returned by * G_main_add_ll_cluster(), and g_source_destroy() that "by hand". * * But no-one ever did this, not even in the old hb code when this was * introduced. * * Note that fsa_cluster_conn was set as an "alias" to cluster->hb_conn * in do_ha_control() right after crm_cluster_connect(), and only * happens to still point at that object, because do_ha_control() does * not reset it to NULL after crm_cluster_disconnect() above does * reset cluster->hb_conn to NULL. * Not sure if that's something to cleanup, too. * * I'll try to fix this up in heartbeat proper, so ->delete * will actually remove, and destroy, and unref, and free this thing. * Doing so after g_main_loop_quit() is valid with both old, * and eventually fixed heartbeat. * * If we introduce the "by hand" destroy/remove/unref, * this may break again once heartbeat is fixed :-( * * -- Lars Ellenberg */ if (fsa_cluster_conn) { crm_trace("Deleting heartbeat api object"); fsa_cluster_conn->llc_ops->delete(fsa_cluster_conn); fsa_cluster_conn = NULL; } #endif /* Won't do anything yet, since we're inside it now */ g_main_loop_unref(mloop); crm_trace("Done %d", rc); } /* Graceful */ return rc; } /* A_EXIT_0, A_EXIT_1 */ void do_exit(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int exit_code = pcmk_ok; int log_level = LOG_INFO; const char *exit_type = "gracefully"; if (action & A_EXIT_1) { /* exit_code = pcmk_err_generic; */ log_level = LOG_ERR; exit_type = "forcefully"; exit_code = pcmk_err_generic; } verify_stopped(cur_state, LOG_ERR); do_crm_log(log_level, "Performing %s - %s exiting the CRMd", fsa_action2string(action), exit_type); crm_info("[%s] stopped (%d)", crm_system_name, exit_code); crmd_exit(exit_code); } static void sigpipe_ignore(int nsig) { return; } /* A_STARTUP */ void do_startup(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int was_error = 0; crm_debug("Registering Signal Handlers"); mainloop_add_signal(SIGTERM, crm_shutdown); mainloop_add_signal(SIGPIPE, sigpipe_ignore); fsa_source = mainloop_add_trigger(G_PRIORITY_HIGH, crm_fsa_trigger, NULL); config_read = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL); transition_trigger = mainloop_add_trigger(G_PRIORITY_LOW, te_graph_trigger, NULL); crm_debug("Creating CIB and LRM objects"); fsa_cib_conn = cib_new(); lrm_state_init_local(); /* set up the timers */ transition_timer = calloc(1, sizeof(fsa_timer_t)); integration_timer = calloc(1, sizeof(fsa_timer_t)); finalization_timer = calloc(1, sizeof(fsa_timer_t)); election_trigger = calloc(1, sizeof(fsa_timer_t)); shutdown_escalation_timer = calloc(1, sizeof(fsa_timer_t)); wait_timer = calloc(1, sizeof(fsa_timer_t)); recheck_timer = calloc(1, sizeof(fsa_timer_t)); if (election_trigger != NULL) { election_trigger->source_id = 0; election_trigger->period_ms = -1; election_trigger->fsa_input = I_DC_TIMEOUT; election_trigger->callback = crm_timer_popped; election_trigger->repeat = FALSE; } else { was_error = TRUE; } if (transition_timer != NULL) { transition_timer->source_id = 0; transition_timer->period_ms = -1; transition_timer->fsa_input = I_PE_CALC; transition_timer->callback = crm_timer_popped; transition_timer->repeat = FALSE; } else { was_error = TRUE; } if (integration_timer != NULL) { integration_timer->source_id = 0; integration_timer->period_ms = -1; integration_timer->fsa_input = I_INTEGRATED; integration_timer->callback = crm_timer_popped; integration_timer->repeat = FALSE; } else { was_error = TRUE; } if (finalization_timer != NULL) { finalization_timer->source_id = 0; finalization_timer->period_ms = -1; finalization_timer->fsa_input = I_FINALIZED; finalization_timer->callback = crm_timer_popped; finalization_timer->repeat = FALSE; /* for possible enabling... a bug in the join protocol left * a slave in S_PENDING while we think its in S_NOT_DC * * raising I_FINALIZED put us into a transition loop which is * never resolved. * in this loop we continually send probes which the node * NACK's because its in S_PENDING * * if we have nodes where heartbeat is active but the * CRM is not... then this will be handled in the * integration phase */ finalization_timer->fsa_input = I_ELECTION; } else { was_error = TRUE; } if (shutdown_escalation_timer != NULL) { shutdown_escalation_timer->source_id = 0; shutdown_escalation_timer->period_ms = -1; shutdown_escalation_timer->fsa_input = I_STOP; shutdown_escalation_timer->callback = crm_timer_popped; shutdown_escalation_timer->repeat = FALSE; } else { was_error = TRUE; } if (wait_timer != NULL) { wait_timer->source_id = 0; wait_timer->period_ms = 2000; wait_timer->fsa_input = I_NULL; wait_timer->callback = crm_timer_popped; wait_timer->repeat = FALSE; } else { was_error = TRUE; } if (recheck_timer != NULL) { recheck_timer->source_id = 0; recheck_timer->period_ms = -1; recheck_timer->fsa_input = I_PE_CALC; recheck_timer->callback = crm_timer_popped; recheck_timer->repeat = FALSE; } else { was_error = TRUE; } /* set up the sub systems */ cib_subsystem = calloc(1, sizeof(struct crm_subsystem_s)); te_subsystem = calloc(1, sizeof(struct crm_subsystem_s)); pe_subsystem = calloc(1, sizeof(struct crm_subsystem_s)); if (cib_subsystem != NULL) { cib_subsystem->pid = -1; cib_subsystem->name = CRM_SYSTEM_CIB; cib_subsystem->flag_connected = R_CIB_CONNECTED; cib_subsystem->flag_required = R_CIB_REQUIRED; } else { was_error = TRUE; } if (te_subsystem != NULL) { te_subsystem->pid = -1; te_subsystem->name = CRM_SYSTEM_TENGINE; te_subsystem->flag_connected = R_TE_CONNECTED; te_subsystem->flag_required = R_TE_REQUIRED; } else { was_error = TRUE; } if (pe_subsystem != NULL) { pe_subsystem->pid = -1; pe_subsystem->path = CRM_DAEMON_DIR; pe_subsystem->name = CRM_SYSTEM_PENGINE; pe_subsystem->command = CRM_DAEMON_DIR "/" CRM_SYSTEM_PENGINE; pe_subsystem->args = NULL; pe_subsystem->flag_connected = R_PE_CONNECTED; pe_subsystem->flag_required = R_PE_REQUIRED; } else { was_error = TRUE; } if (was_error == FALSE && need_spawn_pengine_from_crmd()) { if (start_subsystem(pe_subsystem) == FALSE) { was_error = TRUE; } } if (was_error) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static int32_t crmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (crm_client_new(c, uid, gid) == NULL) { return -EIO; } return 0; } static void crmd_ipc_created(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); } static int32_t crmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; crm_client_t *client = crm_client_get(c); xmlNode *msg = crm_ipcs_recv(client, data, size, &id, &flags); crm_trace("Invoked: %s", crm_client_name(client)); crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__); if (msg == NULL) { return 0; } #if ENABLE_ACL CRM_ASSERT(client->user != NULL); crm_acl_get_set_user(msg, F_CRM_USER, client->user); #endif crm_trace("Processing msg from %s", crm_client_name(client)); crm_log_xml_trace(msg, "CRMd[inbound]"); crm_xml_add(msg, F_CRM_SYS_FROM, client->id); if (crmd_authorize_message(msg, client, NULL)) { route_message(C_IPC_MESSAGE, msg); } trigger_fsa(fsa_source); free_xml(msg); return 0; } static int32_t crmd_ipc_closed(qb_ipcs_connection_t * c) { crm_client_t *client = crm_client_get(c); struct crm_subsystem_s *the_subsystem = NULL; if (client == NULL) { return 0; } crm_trace("Connection %p", c); if (client->userdata == NULL) { crm_trace("Client hadn't registered with us yet"); } else if (strcasecmp(CRM_SYSTEM_PENGINE, client->userdata) == 0) { the_subsystem = pe_subsystem; } else if (strcasecmp(CRM_SYSTEM_TENGINE, client->userdata) == 0) { the_subsystem = te_subsystem; } else if (strcasecmp(CRM_SYSTEM_CIB, client->userdata) == 0) { the_subsystem = cib_subsystem; } if (the_subsystem != NULL) { the_subsystem->source = NULL; the_subsystem->client = NULL; crm_info("Received HUP from %s:[%d]", the_subsystem->name, the_subsystem->pid); } else { /* else that was a transient client */ crm_trace("Received HUP from transient client"); } crm_trace("Disconnecting client %s (%p)", crm_client_name(client), client); free(client->userdata); crm_client_destroy(client); trigger_fsa(fsa_source); return 0; } static void crmd_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); crmd_ipc_closed(c); } /* A_STOP */ void do_stop(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } /* A_STARTED */ void do_started(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { static struct qb_ipcs_service_handlers crmd_callbacks = { .connection_accept = crmd_ipc_accept, .connection_created = crmd_ipc_created, .msg_process = crmd_ipc_dispatch, .connection_closed = crmd_ipc_closed, .connection_destroyed = crmd_ipc_destroy }; if (cur_state != S_STARTING) { crm_err("Start cancelled... %s", fsa_state2string(cur_state)); return; } else if (is_set(fsa_input_register, R_MEMBERSHIP) == FALSE) { crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP); crmd_fsa_stall(TRUE); return; } else if (is_set(fsa_input_register, R_LRM_CONNECTED) == FALSE) { crm_info("Delaying start, LRM not connected (%.16llx)", R_LRM_CONNECTED); crmd_fsa_stall(TRUE); return; } else if (is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) { crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED); crmd_fsa_stall(TRUE); return; } else if (is_set(fsa_input_register, R_READ_CONFIG) == FALSE) { crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG); crmd_fsa_stall(TRUE); return; } else if (is_set(fsa_input_register, R_PEER_DATA) == FALSE) { /* try reading from HA */ crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA); #if SUPPORT_HEARTBEAT if (is_heartbeat_cluster()) { HA_Message *msg = NULL; crm_trace("Looking for a HA message"); msg = fsa_cluster_conn->llc_ops->readmsg(fsa_cluster_conn, 0); if (msg != NULL) { crm_trace("There was a HA message"); ha_msg_del(msg); } } #endif crmd_fsa_stall(TRUE); return; } crm_debug("Init server comms"); ipcs = crmd_ipc_server_init(&crmd_callbacks); if (ipcs == NULL) { crm_err("Failed to create IPC server: shutting down and inhibiting respawn"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } if (stonith_reconnect == NULL) { int dummy; stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW, te_connect_stonith, &dummy); } set_bit(fsa_input_register, R_ST_REQUIRED); mainloop_set_trigger(stonith_reconnect); crm_notice("The local CRM is operational"); clear_bit(fsa_input_register, R_STARTING); register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL); } /* A_RECOVER */ void do_recover(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { set_bit(fsa_input_register, R_IN_RECOVERY); crm_warn("Fast-tracking shutdown in response to errors"); register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } /* *INDENT-OFF* */ pe_cluster_option crmd_opts[] = { /* name, old-name, validate, values, default, short description, long description */ { "dc-version", NULL, "string", NULL, "none", NULL, "Version of Pacemaker on the cluster's DC.", "Includes the hash which identifies the exact changeset it was built from. Used for diagnostic purposes." }, { "cluster-infrastructure", NULL, "string", NULL, "heartbeat", NULL, "The messaging stack on which Pacemaker is currently running.", "Used for informational and diagnostic purposes." }, { XML_CONFIG_ATTR_DC_DEADTIME, "dc_deadtime", "time", NULL, "20s", &check_time, "How long to wait for a response from other nodes during startup.", "The \"correct\" value will depend on the speed/load of your network and the type of switches used." }, { XML_CONFIG_ATTR_RECHECK, "cluster_recheck_interval", "time", "Zero disables polling. Positive values are an interval in seconds (unless other SI units are specified. eg. 5min)", "15min", &check_timer, "Polling interval for time based changes to options, resource parameters and constraints.", "The Cluster is primarily event driven, however the configuration can have elements that change based on time." " To ensure these changes take effect, we can optionally poll the cluster's status for changes." }, #ifdef RHEL7_COMPAT /* These options were superseded by the alerts feature and now are just an * alternate interface to it. It was never released upstream, but was * released in RHEL 7, so we allow it to be enabled at compile-time by * defining RHEL7_COMPAT. */ { "notification-agent", NULL, "string", NULL, "/dev/null", &check_script, - "Notification script or tool to be called after significant cluster events", - "Full path to a script or binary that will be invoked when resources start/stop/fail, fencing occurs or nodes join/leave the cluster.\n" - "Must exist on all nodes in the cluster." + "Deprecated", + "Use alert path in alerts section instead" }, { "notification-recipient", NULL, "string", NULL, "", NULL, - "Destination for notifications (Optional)", - "Where should the supplied script send notifications to. Useful to avoid hard-coding this in the script." + "Deprecated", + "Use recipient value in alerts section instead" }, #endif { "load-threshold", NULL, "percentage", NULL, "80%", &check_utilization, "The maximum amount of system resources that should be used by nodes in the cluster", "The cluster will slow down its recovery process when the amount of system resources used" " (currently CPU) approaches this limit", }, { "node-action-limit", NULL, "integer", NULL, "0", &check_number, "The maximum number of jobs that can be scheduled per node. Defaults to 2x cores"}, { XML_CONFIG_ATTR_ELECTION_FAIL, "election_timeout", "time", NULL, "2min", &check_timer, "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." }, { XML_CONFIG_ATTR_FORCE_QUIT, "shutdown_escalation", "time", NULL, "20min", &check_timer, "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." }, { "crmd-integration-timeout", NULL, "time", NULL, "3min", &check_timer, "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." }, { "crmd-finalization-timeout", NULL, "time", NULL, "30min", &check_timer, "*** Advanced Use Only ***.", "If you need to adjust this value, it probably indicates the presence of a bug." }, { "crmd-transition-delay", NULL, "time", NULL, "0s", &check_timer, "*** Advanced Use Only ***\n" "Enabling this option will slow down cluster recovery under all conditions", "Delay cluster recovery for the configured interval to allow for additional/related events to occur.\n" "Useful if your configuration is sensitive to the order in which ping updates arrive." }, { "stonith-watchdog-timeout", NULL, "time", NULL, NULL, &check_sbd_timeout, "How long to wait before we can assume nodes are safely down", NULL }, { "no-quorum-policy", "no_quorum_policy", "enum", "stop, freeze, ignore, suicide", "stop", &check_quorum, NULL, NULL }, #if SUPPORT_PLUGIN { XML_ATTR_EXPECTED_VOTES, NULL, "integer", NULL, "2", &check_number, "The number of nodes expected to be in the cluster", "Used to calculate quorum in openais based clusters." }, #endif }; /* *INDENT-ON* */ void crmd_metadata(void) { config_metadata("CRM Daemon", "1.0", "CRM Daemon Options", "This is a fake resource that details the options that can be configured for the CRM Daemon.", crmd_opts, DIMOF(crmd_opts)); } static void verify_crmd_options(GHashTable * options) { verify_all_options(options, crmd_opts, DIMOF(crmd_opts)); } static const char * crmd_pref(GHashTable * options, const char *name) { return get_cluster_pref(options, crmd_opts, DIMOF(crmd_opts), name); } static void config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { #ifdef RHEL7_COMPAT const char *script = NULL; #endif const char *value = NULL; GHashTable *config_hash = NULL; crm_time_t *now = crm_time_new(NULL); xmlNode *crmconfig = NULL; xmlNode *alerts = NULL; if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); if (rc == -EACCES || rc == -pcmk_err_schema_validation) { crm_err("The cluster is mis-configured - shutting down and staying down"); set_bit(fsa_input_register, R_STAYDOWN); } goto bail; } crmconfig = output; if ((crmconfig) && (crm_element_name(crmconfig)) && (strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) { crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG); } if (!crmconfig) { fsa_data_t *msg_data = NULL; crm_err("Local CIB query for " XML_CIB_TAG_CRMCONFIG " section failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); goto bail; } crm_debug("Call %d : Parsing CIB options", call_id); config_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); unpack_instance_attributes(crmconfig, crmconfig, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, now); verify_crmd_options(config_hash); #ifdef RHEL7_COMPAT script = crmd_pref(config_hash, "notification-agent"); value = crmd_pref(config_hash, "notification-recipient"); crmd_enable_notifications(script, value); #endif value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME); election_trigger->period_ms = crm_get_msec(value); value = crmd_pref(config_hash, "node-action-limit"); /* Also checks migration-limit */ throttle_update_job_max(value); value = crmd_pref(config_hash, "load-threshold"); if(value) { throttle_load_target = strtof(value, NULL) / 100; } value = crmd_pref(config_hash, "no-quorum-policy"); if (safe_str_eq(value, "suicide") && pcmk_locate_sbd()) { no_quorum_suicide_escalation = TRUE; } value = crmd_pref(config_hash, XML_CONFIG_ATTR_FORCE_QUIT); shutdown_escalation_timer->period_ms = crm_get_msec(value); /* How long to declare an election over - even if not everyone voted */ crm_debug("Shutdown escalation occurs after: %dms", shutdown_escalation_timer->period_ms); value = crmd_pref(config_hash, XML_CONFIG_ATTR_ELECTION_FAIL); election_timeout_set_period(fsa_election, crm_get_msec(value)); value = crmd_pref(config_hash, XML_CONFIG_ATTR_RECHECK); recheck_timer->period_ms = crm_get_msec(value); crm_debug("Checking for expired actions every %dms", recheck_timer->period_ms); value = crmd_pref(config_hash, "crmd-transition-delay"); transition_timer->period_ms = crm_get_msec(value); value = crmd_pref(config_hash, "crmd-integration-timeout"); integration_timer->period_ms = crm_get_msec(value); value = crmd_pref(config_hash, "crmd-finalization-timeout"); finalization_timer->period_ms = crm_get_msec(value); #if SUPPORT_COROSYNC if (is_classic_ais_cluster()) { value = crmd_pref(config_hash, XML_ATTR_EXPECTED_VOTES); crm_debug("Sending expected-votes=%s to corosync", value); send_cluster_text(crm_class_quorum, value, TRUE, NULL, crm_msg_ais); } #endif free(fsa_cluster_name); fsa_cluster_name = NULL; value = g_hash_table_lookup(config_hash, "cluster-name"); if (value) { fsa_cluster_name = strdup(value); } alerts = output?first_named_child(output, XML_CIB_TAG_ALERTS):NULL; parse_notifications(alerts); set_bit(fsa_input_register, R_READ_CONFIG); crm_trace("Triggering FSA: %s", __FUNCTION__); mainloop_set_trigger(fsa_source); g_hash_table_destroy(config_hash); bail: crm_time_free(now); } gboolean crm_read_options(gpointer user_data) { int call_id = fsa_cib_conn->cmds->query(fsa_cib_conn, "//" XML_CIB_TAG_CRMCONFIG " | //" XML_CIB_TAG_ALERTS, NULL, cib_xpath | cib_scope_local); fsa_register_cib_callback(call_id, FALSE, NULL, config_query_callback); crm_trace("Querying the CIB... call %d", call_id); return TRUE; } /* A_READCONFIG */ void do_read_config(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { throttle_init(); mainloop_set_trigger(config_read); } void crm_shutdown(int nsig) { if (crmd_mainloop != NULL && g_main_is_running(crmd_mainloop)) { if (is_set(fsa_input_register, R_SHUTDOWN)) { crm_err("Escalating the shutdown"); register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL); } else { set_bit(fsa_input_register, R_SHUTDOWN); register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); if (shutdown_escalation_timer->period_ms < 1) { const char *value = crmd_pref(NULL, XML_CONFIG_ATTR_FORCE_QUIT); int msec = crm_get_msec(value); crm_debug("Using default shutdown escalation: %dms", msec); shutdown_escalation_timer->period_ms = msec; } /* can't rely on this... */ crm_notice("Shutting down cluster resource manager " CRM_XS " limit=%dms", shutdown_escalation_timer->period_ms); crm_timer_start(shutdown_escalation_timer); } } else { crm_info("exit from shutdown"); crmd_exit(pcmk_ok); } } diff --git a/include/crm/stonith-ng.h b/include/crm/stonith-ng.h index d20719075d..7bb9c788d5 100644 --- a/include/crm/stonith-ng.h +++ b/include/crm/stonith-ng.h @@ -1,475 +1,480 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** * \file * \brief Fencing aka. STONITH * \ingroup fencing */ #ifndef STONITH_NG__H # define STONITH_NG__H # include +# include # include /* TO-DO: Work out how to drop this requirement */ # include # define T_STONITH_NOTIFY_DISCONNECT "st_notify_disconnect" # define T_STONITH_NOTIFY_FENCE "st_notify_fence" /* *INDENT-OFF* */ enum stonith_state { stonith_connected_command, stonith_connected_query, stonith_disconnected, }; enum stonith_call_options { st_opt_none = 0x00000000, st_opt_verbose = 0x00000001, st_opt_allow_suicide = 0x00000002, st_opt_manual_ack = 0x00000008, st_opt_discard_reply = 0x00000010, /* st_opt_all_replies = 0x00000020, */ st_opt_topology = 0x00000040, st_opt_scope_local = 0x00000100, st_opt_cs_nodeid = 0x00000200, st_opt_sync_call = 0x00001000, /*! Allow the timeout period for a callback to be adjusted * based on the time the server reports the operation will take. */ st_opt_timeout_updates = 0x00002000, /*! Only report back if operation is a success in callback */ st_opt_report_only_success = 0x00004000, }; /*! Order matters here, do not change values */ enum op_state { st_query, st_exec, st_done, st_duplicate, st_failed, }; typedef struct stonith_key_value_s { char *key; char *value; struct stonith_key_value_s *next; } stonith_key_value_t; typedef struct stonith_history_s { char *target; char *action; char *origin; char *delegate; int completed; int state; struct stonith_history_s *next; char *client; } stonith_history_t; typedef struct stonith_s stonith_t; typedef struct stonith_event_s { char *id; char *type; char *message; char *operation; int result; char *origin; char *target; char *action; char *executioner; char *device; /*! The name of the client that initiated the action. */ char *client_origin; } stonith_event_t; typedef struct stonith_callback_data_s { int rc; int call_id; void *userdata; } stonith_callback_data_t; typedef struct stonith_api_operations_s { /*! * \brief Destroy the stonith api structure. */ int (*free) (stonith_t *st); /*! * \brief Connect to the local stonith daemon. * * \retval 0, success * \retval negative error code on failure */ int (*connect) (stonith_t *st, const char *name, int *stonith_fd); /*! * \brief Disconnect from the local stonith daemon. * * \retval 0, success * \retval negative error code on failure */ int (*disconnect)(stonith_t *st); /*! * \brief Remove a registered stonith device with the local stonith daemon. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*remove_device)( stonith_t *st, int options, const char *name); /*! * \brief Register a stonith device with the local stonith daemon. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*register_device)( stonith_t *st, int options, const char *id, const char *namespace, const char *agent, stonith_key_value_t *params); /*! * \brief Remove a fencing level for a specific node. * * \note This feature is not available when stonith is in standalone mode. * * \retval 0, success * \retval negative error code on failure */ int (*remove_level)( stonith_t *st, int options, const char *node, int level); /*! * \brief Register a fencing level containing the fencing devices to be used * at that level for a specific node. * * \note This feature is not available when stonith is in standalone mode. * * \retval 0, success * \retval negative error code on failure */ int (*register_level)( stonith_t *st, int options, const char *node, int level, stonith_key_value_t *device_list); /*! * \brief Get the metadata documentation for a resource. * * \note Value is returned in output. Output must be freed when set. * * \retval 0 success * \retval negative error code on failure */ int (*metadata)(stonith_t *st, int options, const char *device, const char *namespace, char **output, int timeout); /*! * \brief Retrieve a list of installed stonith agents * * \note if namespace is not provided, all known agents will be returned * \note list must be freed using stonith_key_value_freeall() * \note call_options parameter is not used, it is reserved for future use. * * \retval num items in list on success * \retval negative error code on failure */ int (*list_agents)(stonith_t *stonith, int call_options, const char *namespace, stonith_key_value_t **devices, int timeout); /*! * \brief Retrieve string listing hosts and port assignments from a local stonith device. * * \retval 0 on success * \retval negative error code on failure */ int (*list)(stonith_t *st, int options, const char *id, char **list_output, int timeout); /*! * \brief Check to see if a local stonith device is reachable * * \retval 0 on success * \retval negative error code on failure */ int (*monitor)(stonith_t *st, int options, const char *id, int timeout); /*! * \brief Check to see if a local stonith device's port is reachable * * \retval 0 on success * \retval negative error code on failure */ int (*status)(stonith_t *st, int options, const char *id, const char *port, int timeout); /*! * \brief Retrieve a list of registered stonith devices. * * \note If node is provided, only devices that can fence the node id * will be returned. * * \retval num items in list on success * \retval negative error code on failure */ int (*query)(stonith_t *st, int options, const char *node, stonith_key_value_t **devices, int timeout); /*! * \brief Issue a fencing action against a node. * * \note Possible actions are, 'on', 'off', and 'reboot'. * * \param st, stonith connection * \param options, call options * \param node, The target node to fence * \param action, The fencing action to take * \param timeout, The default per device timeout to use with each device * capable of fencing the target. * * \retval 0 success * \retval negative error code on failure. */ int (*fence)(stonith_t *st, int options, const char *node, const char *action, int timeout, int tolerance); /*! * \brief Manually confirm that a node is down. * * \retval 0 success * \retval negative error code on failure. */ int (*confirm)(stonith_t *st, int options, const char *node); /*! * \brief Retrieve a list of fencing operations that have occurred for a specific node. * * \note History is not available in standalone mode. * * \retval 0 success * \retval negative error code on failure. */ int (*history)(stonith_t *st, int options, const char *node, stonith_history_t **output, int timeout); int (*register_notification)( stonith_t *st, const char *event, void (*notify)(stonith_t *st, stonith_event_t *e)); int (*remove_notification)(stonith_t *st, const char *event); /*! * \brief Register a callback to receive the result of an async call id * * \param call_id, The call id to register the callback for. * \param timeout, The default timeout period to wait until this callback expires * \param options, Option flags, st_opt_timeout_updates and st_opt_report_only_success are the * only valid options for this function. * \param userdate, A pointer that will be handed back in the callback. * \param callback_name, Unique name given to callback * \param callback, The callback function * * \retval 0 success * \retval negative error code on failure. */ int (*register_callback)(stonith_t *st, int call_id, int timeout, int options, void *userdata, const char *callback_name, void (*callback)(stonith_t *st, stonith_callback_data_t *data)); /*! * \brief Remove a registered callback for a given call id. */ int (*remove_callback)(stonith_t *st, int call_id, bool all_callbacks); /*! * \brief Remove fencing level for specific node, node regex or attribute * * \param[in] st Stonithd connection to use * \param[in] options Bitmask of stonith_call_options to pass to stonithd * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to remove * * \return 0 on success, negative error code otherwise * * \note This feature is not available when stonith is in standalone mode. * The caller should set only one of node, pattern or attr/value. */ int (*remove_level_full)(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level); /*! * \brief Register fencing level for specific node, node regex or attribute * * \param[in] st Stonithd connection to use * \param[in] options Bitmask of stonith_call_options to pass to stonithd * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to add * \param[in] device_list Devices to use in level * * \return 0 on success, negative error code otherwise * * \note This feature is not available when stonith is in standalone mode. * The caller should set only one of node, pattern or attr/value. */ int (*register_level_full)(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list); } stonith_api_operations_t; struct stonith_s { enum stonith_state state; int call_id; int call_timeout; void *private; stonith_api_operations_t *cmds; }; /* *INDENT-ON* */ /* Core functions */ stonith_t *stonith_api_new(void); void stonith_api_delete(stonith_t * st); void stonith_dump_pending_callbacks(stonith_t * st); const char *get_stonith_provider(const char *agent, const char *provider); bool stonith_dispatch(stonith_t * st); stonith_key_value_t *stonith_key_value_add(stonith_key_value_t * kvp, const char *key, const char *value); void stonith_key_value_freeall(stonith_key_value_t * kvp, int keys, int values); /* Basic helpers that allows nodes to be fenced and the history to be * queried without mainloop or the caller understanding the full API * * At least one of nodeid and uname are required */ int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off); time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress); /* * Helpers for using the above functions without install-time dependencies * * Usage: * #include * * To turn a node off by corosync nodeid: * stonith_api_kick_helper(nodeid, 120, 1); * * To check the last fence date/time (also by nodeid): * last = stonith_api_time_helper(nodeid, 0); * * To check if fencing is in progress: * if(stonith_api_time_helper(nodeid, 1) > 0) { ... } * * eg. #include #include #include int main(int argc, char ** argv) { int rc = 0; int nodeid = 102; rc = stonith_api_time_helper(nodeid, 0); printf("%d last fenced at %s\n", nodeid, ctime(rc)); rc = stonith_api_kick_helper(nodeid, 120, 1); printf("%d fence result: %d\n", nodeid, rc); rc = stonith_api_time_helper(nodeid, 0); printf("%d last fenced at %s\n", nodeid, ctime(rc)); return 0; } */ # define STONITH_LIBRARY "libstonithd.so.2" static inline int stonith_api_kick_helper(uint32_t nodeid, int timeout, bool off) { static void *st_library = NULL; static int (*st_kick_fn) (int nodeid, const char *uname, int timeout, bool off) = NULL; if (st_library == NULL) { st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY); } if (st_library && st_kick_fn == NULL) { st_kick_fn = dlsym(st_library, "stonith_api_kick"); } if (st_kick_fn == NULL) { +#ifdef ELIBACC return -ELIBACC; +#else + return -ENOSYS; +#endif } return (*st_kick_fn) (nodeid, NULL, timeout, off); } static inline time_t stonith_api_time_helper(uint32_t nodeid, bool in_progress) { static void *st_library = NULL; static time_t(*st_time_fn) (int nodeid, const char *uname, bool in_progress) = NULL; if (st_library == NULL) { st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY); } if (st_library && st_time_fn == NULL) { st_time_fn = dlsym(st_library, "stonith_api_time"); } if (st_time_fn == NULL) { return 0; } return (*st_time_fn) (nodeid, NULL, in_progress); } #endif diff --git a/pacemaker.spec.in b/pacemaker.spec.in index cdb0756f37..0378e28901 100644 --- a/pacemaker.spec.in +++ b/pacemaker.spec.in @@ -1,649 +1,645 @@ %global gname haclient %global uname hacluster %global pcmk_docdir %{_docdir}/%{name} %global specversion 1 %global pcmkversion 1.1.15 # set following to the actual commit or, for final release, concatenate # "pcmkversion" macro to "Pacemaker-" (will yield a tag per the convention) %global commit HEAD %global lparen ( %global rparen ) %global shortcommit %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo ${c:10};; *%{rparen} echo ${c:0:7};; esac) %global pre_release %(s=%{shortcommit}; [ ${s: -4:3} != -rc ]; echo $?) %global post_release %([ %{commit} = Pacemaker-%{shortcommit} ]; echo $?) %global github_owner ClusterLabs # Turn off the auto compilation of python files not in the site-packages directory # Needed so that the -devel package is multilib compliant %global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g') %global rawhide %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) %global cs_version %(pkg-config corosync --modversion | awk -F . '{print $1}') # It has to be eventually decided whether to use Python2 or Python3 %global py_site %{?python_sitearch}%{!?python_sitearch:%(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} %global cman_native 0%{?el6} || (0%{?fedora} > 0 && 0%{?fedora} < 17) # It's desired to apply "license" macro uniformly in "files" sections below, # but RPM versions not aware of this new classification normally (re)define it # to the value of "License:", so following is to ensure the macro definition # is per expectation only after that tag; solution courtesy of Jason Tibbitts: # https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77b0c05a6c29bc0eb0c4e82113180a0a99#n12 %if !%{defined _licensedir} %define description %{lua: rpm.define("license %doc") print("%description") } %endif # Conditionals # Invoke "rpmbuild --without " or "rpmbuild --with " # to disable or enable specific features # Legacy stonithd fencing agents %bcond_with stonithd # Build with/without support for profiling tools %bcond_with profiling # Include Build with/without support for performing coverage analysis %bcond_with coverage # We generate docs using Publican, Asciidoc and Inkscape, but they're not available everywhere %bcond_without doc # Use a different versioning scheme %bcond_with pre_release # Ship an Upstart job file %bcond_with upstart_job # Turn off cman support on platforms that normally ship with it %bcond_without cman %if %{with profiling} # This disables -debuginfo package creation and also the stripping binaries/libraries # Useful if you want sane profiling data %global debug_package %{nil} %endif %if %{with pre_release} || 0%{pre_release} %if 0%{pre_release} %global pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) %else %global pcmk_release 0.%{specversion}.%{shortcommit}.git %endif %else %if 0%{post_release} %global pcmk_release %{specversion}.%{shortcommit}.git %else %global pcmk_release %{specversion} %endif %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: http://www.clusterlabs.org Group: System Environment/Daemons +# eg. https://github.com/ClusterLabs/pacemaker/archive/8ae45302394b039fb098e150f156df29fc0cb576/pacemaker-8ae4530.tar.gz Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) AutoReqProv: on Requires: python Requires: resource-agents Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cluster-libs = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if %{defined systemd_requires} %systemd_requires %endif -%if 0%{?rhel} > 0 -ExclusiveArch: i386 i686 x86_64 -%endif - - # Required for core functionality (python-devel depends on python) BuildRequires: automake autoconf libtool pkgconfig libtool-ltdl-devel BuildRequires: pkgconfig(glib-2.0) libxml2-devel libxslt-devel libuuid-devel BuildRequires: python-devel bzip2-devel pam-devel # Required for agent_config.h which specifies the correct scratch directory BuildRequires: resource-agents # We need reasonably recent versions of libqb BuildRequires: libqb-devel > 0.11.0 Requires: libqb > 0.11.0 # Enables optional functionality BuildRequires: ncurses-devel openssl-devel libselinux-devel docbook-style-xsl BuildRequires: bison byacc flex help2man gnutls-devel pkgconfig(dbus-1) %if %{defined _unitdir} BuildRequires: systemd-devel %endif %if %{with cman} && %{cman_native} BuildRequires: clusterlib-devel # pacemaker initscript: cman initscript, fence_tool (+ some soft-dependencies) # "post" scriptlet: ccs_update_schema Requires: cman %endif Requires: corosync BuildRequires: corosynclib-devel %if %{with stonithd} BuildRequires: cluster-glue-libs-devel %endif %if !%{rawhide} # More often than not, inkscape is busted on rawhide, don't even bother %if %{with doc} %ifarch %{ix86} x86_64 BuildRequires: publican inkscape asciidoc %endif %endif %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : cman stonithd doc coverage profiling pre_release upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} Requires: perl-TimeDate %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{name}-libs License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons %description -n %{name}-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. The %{name}-libs package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package -n %{name}-cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} %description -n %{name}-cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if %{defined systemd_requires} %systemd_requires %endif %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{name}-libs-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{name}-cts = %{version}-%{release} Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cluster-libs = %{version}-%{release} Requires: libtool-ltdl-devel libqb-devel libuuid-devel Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel Requires: corosynclib-devel %description -n %{name}-libs-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. The %{name}-libs-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: python Requires: %{name}-libs = %{version}-%{release} # systemd python bindings are separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 20 Requires: systemd-python %endif %if 0%{?rhel} > 6 Requires: systemd-python %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: GPLv2+ and LGPLv2+ Summary: Documentation for Pacemaker Group: Documentation %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. %prep %setup -q -n %{name}-%{commit} # Force the local time # # 'git' sets the file date to the date of the last commit. # This can result in files having been created in the future # when building on machines in timezones 'behind' the one the # commit occurred in - which seriously confuses 'make' find . -exec touch \{\} \; %build + +# Early versions of autotools (e.g. RHEL <= 5) do not support --docdir +export docdir=%{pcmk_docdir} + ./autogen.sh -# RHEL <= 5 does not support --docdir -docdir=%{pcmk_docdir} %{configure} \ +%{configure} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{!?with_cman: --without-cman} \ --without-heartbeat \ %{!?with_doc: --with-brand=} \ --with-initdir=%{_initrddir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif -make %{_smp_mflags} V=1 docdir=%{pcmk_docdir} all +make %{_smp_mflags} V=1 all %install rm -rf %{buildroot} make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 mcp/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 mcp/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif # Scripts that should be executable chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py # These are not actually scripts find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x -find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x -find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x -find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # Do not package these either rm -f %{buildroot}/%{_libdir}/service_crm.so # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif # Don't ship fence_pcmk where it has no use %if %{without cman} rm -f %{buildroot}/%{_sbindir}/fence_pcmk %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %clean rm -rf %{buildroot} %if %{defined _unitdir} %post %systemd_post pacemaker.service %preun %systemd_preun pacemaker.service %postun %systemd_postun_with_restart pacemaker.service %post remote %systemd_post pacemaker_remote.service %preun remote %systemd_preun pacemaker_remote.service %postun remote %systemd_postun_with_restart pacemaker_remote.service %post cli %systemd_post crm_mon.service %preun cli %systemd_preun crm_mon.service %postun cli %systemd_postun_with_restart crm_mon.service %else %post /sbin/chkconfig --add pacemaker || : %if %{with cman} && %{cman_native} # make fence_pcmk in cluster.conf valid instantly otherwise tools like ccs may # choke (until schema gets auto-regenerated on the next start of cluster), # per the protocol shared with other packages contributing to cluster.rng /usr/sbin/ccs_update_schema >/dev/null 2>&1 || : %endif %preun /sbin/service pacemaker stop || : if [ $1 -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %post remote /sbin/chkconfig --add pacemaker_remote || : %preun remote /sbin/service pacemaker_remote stop &>/dev/null || : if [ $1 -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %pre -n %{name}-libs getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189 getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname} exit 0 %post -n %{name}-libs -p /sbin/ldconfig %postun -n %{name}-libs -p /sbin/ldconfig %post -n %{name}-cluster-libs -p /sbin/ldconfig %postun -n %{name}-cluster-libs -p /sbin/ldconfig %files ########################################################### %defattr(-,root,root) %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/lrmd_test %exclude %{_sbindir}/pacemaker_remoted %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/crm_node %{_sbindir}/fence_legacy %if %{with cman} %{_sbindir}/fence_pcmk %endif %{_sbindir}/stonith_admin %doc %{_mandir}/man7/crmd.* %doc %{_mandir}/man7/pengine.* %doc %{_mandir}/man7/stonithd.* %if %{without cman} || !%{cman_native} %doc %{_mandir}/man7/ocf_pacemaker_controld.* %endif %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_node.* %doc %{_mandir}/man8/crm_master.* %if %{with cman} %doc %{_mandir}/man8/fence_pcmk.* %endif %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_mandir}/man8/stonith_admin.* %license COPYING %doc AUTHORS %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine %if %{without cman} || !%{cman_native} /usr/lib/ocf/resource.d/pacemaker/controld %endif /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote /usr/lib/ocf/resource.d/.isolation %if "%{?cs_version}" != "UNKNOWN" %if 0%{?cs_version} < 2 %{_libexecdir}/lcrso/pacemaker.lcrso %endif %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %defattr(-,root,root) %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_resource %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %exclude %{_datadir}/pacemaker/tests %{_datadir}/pacemaker %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/crmd.* %exclude %{_mandir}/man7/pengine.* %exclude %{_mandir}/man7/stonithd.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_node.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_pcmk.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker_remoted.* %exclude %{_mandir}/man8/stonith_admin.* %license COPYING %doc AUTHORS %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %files -n %{name}-libs %defattr(-,root,root) %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpengine.so.* %{_libdir}/libstonithd.so.* %{_libdir}/libtransitioner.so.* %license COPYING.LIB %doc AUTHORS %files -n %{name}-cluster-libs %defattr(-,root,root) %{_libdir}/libcrmcluster.so.* %license COPYING.LIB %doc AUTHORS %files remote %defattr(-,root,root) %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker_remoted %{_mandir}/man8/pacemaker_remoted.* %license COPYING %doc AUTHORS %files doc %defattr(-,root,root) %doc %{pcmk_docdir} %files cts %defattr(-,root,root) %{py_site}/cts %{_datadir}/pacemaker/tests/cts %{_libexecdir}/pacemaker/lrmd_test %license COPYING %doc AUTHORS %files -n %{name}-libs-devel %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests/cts %{_datadir}/pacemaker/tests %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license COPYING.LIB %doc AUTHORS %changelog diff --git a/pengine/test10/asymmetric.summary b/pengine/test10/asymmetric.summary index 42ec9fc074..7c51fd2679 100644 --- a/pengine/test10/asymmetric.summary +++ b/pengine/test10/asymmetric.summary @@ -1,28 +1,27 @@ -2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ puma1 puma3 ] Master/Slave Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3] Masters: [ puma3 ] - Slaves (target-role): [ puma1 ] + Slaves: [ puma1 ] vpool_ip_poolA (ocf::heartbeat:IPaddr2): Stopped drbd_target_poolA (ocf::vpools:iscsi_target): Stopped Transition Summary: Executing cluster transition: * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 monitor=19000 on puma1 * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 monitor=20000 on puma3 * Resource action: drbd_target_poolA monitor on puma3 * Resource action: drbd_target_poolA monitor on puma1 Revised cluster status: Online: [ puma1 puma3 ] Master/Slave Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3] Masters: [ puma3 ] - Slaves (target-role): [ puma1 ] + Slaves: [ puma1 ] vpool_ip_poolA (ocf::heartbeat:IPaddr2): Stopped drbd_target_poolA (ocf::vpools:iscsi_target): Stopped diff --git a/pengine/test10/bug-1765.summary b/pengine/test10/bug-1765.summary index 478cad545f..593bac392c 100644 --- a/pengine/test10/bug-1765.summary +++ b/pengine/test10/bug-1765.summary @@ -1,37 +1,36 @@ -6 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ sles236 sles238 ] Master/Slave Set: ms-drbd0 [drbd0] Masters: [ sles236 ] Stopped: [ sles238 ] Master/Slave Set: ms-drbd1 [drbd1] Masters: [ sles236 ] - Slaves (target-role): [ sles238 ] + Slaves: [ sles238 ] Transition Summary: * Start drbd0:1 (sles238) Executing cluster transition: * Pseudo action: ms-drbd0_pre_notify_start_0 * Resource action: drbd0:0 notify on sles236 * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd0_start_0 * Resource action: drbd0:1 start on sles238 * Pseudo action: ms-drbd0_running_0 * Pseudo action: ms-drbd0_post_notify_running_0 * Resource action: drbd0:0 notify on sles236 * Resource action: drbd0:1 notify on sles238 * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised cluster status: Online: [ sles236 sles238 ] Master/Slave Set: ms-drbd0 [drbd0] Masters: [ sles236 ] - Slaves (target-role): [ sles238 ] + Slaves: [ sles238 ] Master/Slave Set: ms-drbd1 [drbd1] Masters: [ sles236 ] - Slaves (target-role): [ sles238 ] + Slaves: [ sles238 ] diff --git a/pengine/test10/bug-1822.summary b/pengine/test10/bug-1822.summary index 6cfd45e5a4..afb9fd1bea 100644 --- a/pengine/test10/bug-1822.summary +++ b/pengine/test10/bug-1822.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ process1a process2b ] Master/Slave Set: ms-sf [ms-sf_group] (unique) Resource Group: ms-sf_group:0 - master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Started process2b + master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Slave process2b master_slave_procdctl:0 (ocf::heartbeat:procdctl): Stopped Resource Group: ms-sf_group:1 master_slave_Stateful:1 (ocf::heartbeat:Dummy-statful): Master process1a master_slave_procdctl:1 (ocf::heartbeat:procdctl): Master process1a Transition Summary: * Demote master_slave_Stateful:1 (Master -> Stopped process1a) * Demote master_slave_procdctl:1 (Master -> Stopped process1a) Executing cluster transition: * Pseudo action: ms-sf_demote_0 * Pseudo action: ms-sf_group:1_demote_0 * Resource action: master_slave_Stateful:1 demote on process1a * Resource action: master_slave_procdctl:1 demote on process1a * Pseudo action: ms-sf_group:1_demoted_0 * Pseudo action: ms-sf_demoted_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: ms-sf_group:1_stop_0 * Resource action: master_slave_Stateful:1 stop on process1a * Resource action: master_slave_procdctl:1 stop on process1a * Cluster action: do_shutdown on process1a * Pseudo action: all_stopped * Pseudo action: ms-sf_group:1_stopped_0 * Pseudo action: ms-sf_stopped_0 Revised cluster status: Online: [ process1a process2b ] Master/Slave Set: ms-sf [ms-sf_group] (unique) Resource Group: ms-sf_group:0 - master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Started process2b + master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Slave process2b master_slave_procdctl:0 (ocf::heartbeat:procdctl): Stopped Resource Group: ms-sf_group:1 master_slave_Stateful:1 (ocf::heartbeat:Dummy-statful): Stopped master_slave_procdctl:1 (ocf::heartbeat:procdctl): Stopped diff --git a/pengine/test10/bug-5059.summary b/pengine/test10/bug-5059.summary index 38efad7195..36a5c67d5b 100644 --- a/pengine/test10/bug-5059.summary +++ b/pengine/test10/bug-5059.summary @@ -1,77 +1,76 @@ -12 of 6 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node gluster03.h: standby Online: [ gluster01.h gluster02.h ] OFFLINE: [ gluster04.h ] Master/Slave Set: ms_stateful [g_stateful] Resource Group: g_stateful:0 - p_stateful1 (ocf::pacemaker:Stateful): Started gluster01.h - p_stateful2 (ocf::pacemaker:Stateful): Stopped ( target-role:Started ) + p_stateful1 (ocf::pacemaker:Stateful): Slave gluster01.h + p_stateful2 (ocf::pacemaker:Stateful): Stopped Resource Group: g_stateful:1 - p_stateful1 (ocf::pacemaker:Stateful): Started gluster02.h - p_stateful2 (ocf::pacemaker:Stateful): Stopped ( target-role:Started ) + p_stateful1 (ocf::pacemaker:Stateful): Slave gluster02.h + p_stateful2 (ocf::pacemaker:Stateful): Stopped Stopped: [ gluster03.h gluster04.h ] Clone Set: c_dummy [p_dummy1] Started: [ gluster01.h gluster02.h ] Transition Summary: * Promote p_stateful1:0 (Slave -> Master gluster01.h) * Start p_stateful2:0 (gluster01.h) * Promote p_stateful2:0 (Stopped -> Master gluster01.h) * Start p_stateful2:1 (gluster02.h) Executing cluster transition: * Pseudo action: ms_stateful_pre_notify_start_0 * Resource action: iptest delete on gluster02.h * Resource action: ipsrc2 delete on gluster02.h * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 * Pseudo action: ms_stateful_start_0 * Pseudo action: g_stateful:0_start_0 * Resource action: p_stateful2:0 start on gluster01.h * Pseudo action: g_stateful:1_start_0 * Resource action: p_stateful2:1 start on gluster02.h * Pseudo action: g_stateful:0_running_0 * Pseudo action: g_stateful:1_running_0 * Pseudo action: ms_stateful_running_0 * Pseudo action: ms_stateful_post_notify_running_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-post_notify_running_0 * Pseudo action: ms_stateful_pre_notify_promote_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-pre_notify_promote_0 * Pseudo action: ms_stateful_promote_0 * Pseudo action: g_stateful:0_promote_0 * Resource action: p_stateful1:0 promote on gluster01.h * Resource action: p_stateful2:0 promote on gluster01.h * Pseudo action: g_stateful:0_promoted_0 * Pseudo action: ms_stateful_promoted_0 * Pseudo action: ms_stateful_post_notify_promoted_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-post_notify_promoted_0 * Resource action: p_stateful1:1 monitor=10000 on gluster02.h * Resource action: p_stateful2:1 monitor=10000 on gluster02.h Revised cluster status: Node gluster03.h: standby Online: [ gluster01.h gluster02.h ] OFFLINE: [ gluster04.h ] Master/Slave Set: ms_stateful [g_stateful] Masters: [ gluster01.h ] - Slaves (target-role): [ gluster02.h ] + Slaves: [ gluster02.h ] Clone Set: c_dummy [p_dummy1] Started: [ gluster01.h gluster02.h ] diff --git a/pengine/test10/bug-5143-ms-shuffle.summary b/pengine/test10/bug-5143-ms-shuffle.summary index 63c54e126d..4aa3fd3735 100644 --- a/pengine/test10/bug-5143-ms-shuffle.summary +++ b/pengine/test10/bug-5143-ms-shuffle.summary @@ -1,75 +1,75 @@ -7 of 34 resources DISABLED and 0 BLOCKED from being started due to failures +2 of 34 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ hex-1 hex-2 hex-3 ] fencing (stonith:external/sbd): Started hex-1 Clone Set: baseclone [basegrp] Started: [ hex-1 hex-2 hex-3 ] fs-xfs-1 (ocf::heartbeat:Filesystem): Started hex-2 Clone Set: fs2 [fs-ocfs-2] Started: [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r0 [drbd-r0] Masters: [ hex-1 ] - Slaves (target-role): [ hex-2 ] + Slaves: [ hex-2 ] Master/Slave Set: ms-r1 [drbd-r1] - Slaves (target-role): [ hex-2 hex-3 ] + Slaves: [ hex-2 hex-3 ] Resource Group: md0-group md0 (ocf::heartbeat:Raid1): Started hex-3 vg-md0 (ocf::heartbeat:LVM): Started hex-3 fs-md0 (ocf::heartbeat:Filesystem): Started hex-3 dummy1 (ocf::heartbeat:Delay): Started hex-3 dummy3 (ocf::heartbeat:Delay): Started hex-1 dummy4 (ocf::heartbeat:Delay): Started hex-2 dummy5 (ocf::heartbeat:Delay): Started hex-1 dummy6 (ocf::heartbeat:Delay): Started hex-2 Resource Group: r0-group fs-r0 (ocf::heartbeat:Filesystem): Stopped ( disabled ) dummy2 (ocf::heartbeat:Delay): Stopped Transition Summary: * Promote drbd-r1:1 (Slave -> Master hex-3) Executing cluster transition: * Pseudo action: ms-r1_pre_notify_promote_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-pre_notify_promote_0 * Pseudo action: ms-r1_promote_0 * Resource action: drbd-r1 promote on hex-3 * Pseudo action: ms-r1_promoted_0 * Pseudo action: ms-r1_post_notify_promoted_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-post_notify_promoted_0 * Resource action: drbd-r1 monitor=29000 on hex-2 * Resource action: drbd-r1 monitor=31000 on hex-3 Revised cluster status: Online: [ hex-1 hex-2 hex-3 ] fencing (stonith:external/sbd): Started hex-1 Clone Set: baseclone [basegrp] Started: [ hex-1 hex-2 hex-3 ] fs-xfs-1 (ocf::heartbeat:Filesystem): Started hex-2 Clone Set: fs2 [fs-ocfs-2] Started: [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r0 [drbd-r0] Masters: [ hex-1 ] - Slaves (target-role): [ hex-2 ] + Slaves: [ hex-2 ] Master/Slave Set: ms-r1 [drbd-r1] Masters: [ hex-3 ] - Slaves (target-role): [ hex-2 ] + Slaves: [ hex-2 ] Resource Group: md0-group md0 (ocf::heartbeat:Raid1): Started hex-3 vg-md0 (ocf::heartbeat:LVM): Started hex-3 fs-md0 (ocf::heartbeat:Filesystem): Started hex-3 dummy1 (ocf::heartbeat:Delay): Started hex-3 dummy3 (ocf::heartbeat:Delay): Started hex-1 dummy4 (ocf::heartbeat:Delay): Started hex-2 dummy5 (ocf::heartbeat:Delay): Started hex-1 dummy6 (ocf::heartbeat:Delay): Started hex-2 Resource Group: r0-group fs-r0 (ocf::heartbeat:Filesystem): Stopped ( disabled ) dummy2 (ocf::heartbeat:Delay): Stopped diff --git a/pengine/test10/bug-cl-5168.summary b/pengine/test10/bug-cl-5168.summary index b197abedaf..7b8ff6f055 100644 --- a/pengine/test10/bug-cl-5168.summary +++ b/pengine/test10/bug-cl-5168.summary @@ -1,75 +1,74 @@ -5 of 34 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ hex-1 hex-2 hex-3 ] fencing (stonith:external/sbd): Started hex-1 Clone Set: baseclone [basegrp] Started: [ hex-1 hex-2 hex-3 ] fs-xfs-1 (ocf::heartbeat:Filesystem): Started hex-2 Clone Set: fs2 [fs-ocfs-2] Started: [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r0 [drbd-r0] Masters: [ hex-1 ] - Slaves (target-role): [ hex-2 ] + Slaves: [ hex-2 ] Resource Group: md0-group md0 (ocf::heartbeat:Raid1): Started hex-3 vg-md0 (ocf::heartbeat:LVM): Started hex-3 fs-md0 (ocf::heartbeat:Filesystem): Started hex-3 dummy1 (ocf::heartbeat:Delay): Started hex-3 dummy3 (ocf::heartbeat:Delay): Started hex-1 dummy4 (ocf::heartbeat:Delay): Started hex-2 dummy5 (ocf::heartbeat:Delay): Started hex-1 dummy6 (ocf::heartbeat:Delay): Started hex-2 Resource Group: r0-group fs-r0 (ocf::heartbeat:Filesystem): Started hex-1 dummy2 (ocf::heartbeat:Delay): Started hex-1 Master/Slave Set: ms-r1 [drbd-r1] - Slaves (target-role): [ hex-2 hex-3 ] + Slaves: [ hex-2 hex-3 ] Transition Summary: * Promote drbd-r1:1 (Slave -> Master hex-3) Executing cluster transition: * Pseudo action: ms-r1_pre_notify_promote_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-pre_notify_promote_0 * Pseudo action: ms-r1_promote_0 * Resource action: drbd-r1 promote on hex-3 * Pseudo action: ms-r1_promoted_0 * Pseudo action: ms-r1_post_notify_promoted_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-post_notify_promoted_0 * Resource action: drbd-r1 monitor=29000 on hex-2 * Resource action: drbd-r1 monitor=31000 on hex-3 Revised cluster status: Online: [ hex-1 hex-2 hex-3 ] fencing (stonith:external/sbd): Started hex-1 Clone Set: baseclone [basegrp] Started: [ hex-1 hex-2 hex-3 ] fs-xfs-1 (ocf::heartbeat:Filesystem): Started hex-2 Clone Set: fs2 [fs-ocfs-2] Started: [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r0 [drbd-r0] Masters: [ hex-1 ] - Slaves (target-role): [ hex-2 ] + Slaves: [ hex-2 ] Resource Group: md0-group md0 (ocf::heartbeat:Raid1): Started hex-3 vg-md0 (ocf::heartbeat:LVM): Started hex-3 fs-md0 (ocf::heartbeat:Filesystem): Started hex-3 dummy1 (ocf::heartbeat:Delay): Started hex-3 dummy3 (ocf::heartbeat:Delay): Started hex-1 dummy4 (ocf::heartbeat:Delay): Started hex-2 dummy5 (ocf::heartbeat:Delay): Started hex-1 dummy6 (ocf::heartbeat:Delay): Started hex-2 Resource Group: r0-group fs-r0 (ocf::heartbeat:Filesystem): Started hex-1 dummy2 (ocf::heartbeat:Delay): Started hex-1 Master/Slave Set: ms-r1 [drbd-r1] Masters: [ hex-3 ] - Slaves (target-role): [ hex-2 ] + Slaves: [ hex-2 ] diff --git a/pengine/test10/bug-cl-5212.summary b/pengine/test10/bug-cl-5212.summary index 542afbf86d..b5d5146add 100644 --- a/pengine/test10/bug-cl-5212.summary +++ b/pengine/test10/bug-cl-5212.summary @@ -1,67 +1,67 @@ Current cluster status: Node srv01 (3232238280): UNCLEAN (offline) Node srv02 (3232238290): UNCLEAN (offline) Online: [ srv03 ] Resource Group: grpStonith1 prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith2 prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Resource Group: grpStonith3 prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Master/Slave Set: msPostgresql [pgsql] - pgsql (ocf::pacemaker:Stateful): Started srv02 (UNCLEAN) + pgsql (ocf::pacemaker:Stateful): Slave srv02 ( UNCLEAN ) pgsql (ocf::pacemaker:Stateful): Master srv01 (UNCLEAN) Slaves: [ srv03 ] Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): Started srv02 (UNCLEAN) prmPingd (ocf::pacemaker:ping): Started srv01 (UNCLEAN) Started: [ srv03 ] Transition Summary: * Stop prmStonith1-1 (Started srv02 - blocked) * Stop prmStonith2-1 (Started srv01 - blocked) * Stop prmStonith3-1 (srv01 - blocked) * Stop pgsql:0 (srv02 - blocked) * Demote pgsql:1 (Master -> Stopped srv01 - blocked) * Stop prmPingd:0 (srv02 - blocked) * Stop prmPingd:1 (srv01 - blocked) Executing cluster transition: * Pseudo action: grpStonith1_stop_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stop_0 * Pseudo action: grpStonith2_start_0 * Pseudo action: grpStonith3_stop_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: clnPingd_stop_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: clnPingd_stopped_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: msPostgresql_post_notify_stopped_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 Revised cluster status: Node srv01 (3232238280): UNCLEAN (offline) Node srv02 (3232238290): UNCLEAN (offline) Online: [ srv03 ] Resource Group: grpStonith1 prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith2 prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Resource Group: grpStonith3 prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Master/Slave Set: msPostgresql [pgsql] - pgsql (ocf::pacemaker:Stateful): Started srv02 (UNCLEAN) + pgsql (ocf::pacemaker:Stateful): Slave srv02 ( UNCLEAN ) pgsql (ocf::pacemaker:Stateful): Master srv01 (UNCLEAN) Slaves: [ srv03 ] Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): Started srv02 (UNCLEAN) prmPingd (ocf::pacemaker:ping): Started srv01 (UNCLEAN) Started: [ srv03 ] diff --git a/pengine/test10/bug-lf-2317.summary b/pengine/test10/bug-lf-2317.summary index bed83f1658..f6b0ae406b 100644 --- a/pengine/test10/bug-lf-2317.summary +++ b/pengine/test10/bug-lf-2317.summary @@ -1,35 +1,34 @@ -2 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ ibm1.isg.si ibm2.isg.si ] HostingIsg (ocf::heartbeat:Xen): Started ibm2.isg.si Master/Slave Set: ms_drbd_r0 [drbd_r0] Masters: [ ibm2.isg.si ] - Slaves (target-role): [ ibm1.isg.si ] + Slaves: [ ibm1.isg.si ] Transition Summary: * Promote drbd_r0:1 (Slave -> Master ibm1.isg.si) Executing cluster transition: * Resource action: drbd_r0:0 cancel=30000 on ibm1.isg.si * Pseudo action: ms_drbd_r0_pre_notify_promote_0 * Resource action: drbd_r0:1 notify on ibm2.isg.si * Resource action: drbd_r0:0 notify on ibm1.isg.si * Pseudo action: ms_drbd_r0_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_r0_promote_0 * Resource action: drbd_r0:0 promote on ibm1.isg.si * Pseudo action: ms_drbd_r0_promoted_0 * Pseudo action: ms_drbd_r0_post_notify_promoted_0 * Resource action: drbd_r0:1 notify on ibm2.isg.si * Resource action: drbd_r0:0 notify on ibm1.isg.si * Pseudo action: ms_drbd_r0_confirmed-post_notify_promoted_0 * Resource action: drbd_r0:0 monitor=15000 on ibm1.isg.si Revised cluster status: Online: [ ibm1.isg.si ibm2.isg.si ] HostingIsg (ocf::heartbeat:Xen): Started ibm2.isg.si Master/Slave Set: ms_drbd_r0 [drbd_r0] Masters: [ ibm1.isg.si ibm2.isg.si ] diff --git a/pengine/test10/bug-pm-11.summary b/pengine/test10/bug-pm-11.summary index 828cd7067c..07f2d9ade6 100644 --- a/pengine/test10/bug-pm-11.summary +++ b/pengine/test10/bug-pm-11.summary @@ -1,47 +1,47 @@ Current cluster status: Online: [ node-a node-b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 - stateful-1:0 (ocf::heartbeat:Stateful): Started node-b + stateful-1:0 (ocf::heartbeat:Stateful): Slave node-b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master node-a stateful-2:1 (ocf::heartbeat:Stateful): Stopped Transition Summary: * Start stateful-2:0 (node-b) * Start stateful-2:1 (node-a) * Promote stateful-2:1 (Stopped -> Master node-a) Executing cluster transition: * Resource action: stateful-2:0 monitor on node-b * Resource action: stateful-2:0 monitor on node-a * Resource action: stateful-2:1 monitor on node-b * Resource action: stateful-2:1 monitor on node-a * Pseudo action: ms-sf_start_0 * Pseudo action: group:0_start_0 * Resource action: stateful-2:0 start on node-b * Pseudo action: group:1_start_0 * Resource action: stateful-2:1 start on node-a * Pseudo action: group:0_running_0 * Pseudo action: group:1_running_0 * Pseudo action: ms-sf_running_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-2:1 promote on node-a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ node-a node-b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 - stateful-1:0 (ocf::heartbeat:Stateful): Started node-b - stateful-2:0 (ocf::heartbeat:Stateful): Started node-b + stateful-1:0 (ocf::heartbeat:Stateful): Slave node-b + stateful-2:0 (ocf::heartbeat:Stateful): Slave node-b Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master node-a stateful-2:1 (ocf::heartbeat:Stateful): Master node-a diff --git a/pengine/test10/bug-pm-12.summary b/pengine/test10/bug-pm-12.summary index 55fbfb4dc2..312e4a26e1 100644 --- a/pengine/test10/bug-pm-12.summary +++ b/pengine/test10/bug-pm-12.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ node-a node-b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 - stateful-1:0 (ocf::heartbeat:Stateful): Started node-b - stateful-2:0 (ocf::heartbeat:Stateful): Started node-b + stateful-1:0 (ocf::heartbeat:Stateful): Slave node-b + stateful-2:0 (ocf::heartbeat:Stateful): Slave node-b Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master node-a stateful-2:1 (ocf::heartbeat:Stateful): Master node-a Transition Summary: * Restart stateful-2:0 (Slave node-b) * Restart stateful-2:1 (Master node-a) Executing cluster transition: * Pseudo action: ms-sf_demote_0 * Pseudo action: group:1_demote_0 * Resource action: stateful-2:1 demote on node-a * Pseudo action: group:1_demoted_0 * Pseudo action: ms-sf_demoted_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-2:0 stop on node-b * Pseudo action: group:1_stop_0 * Resource action: stateful-2:1 stop on node-a * Pseudo action: all_stopped * Pseudo action: group:0_stopped_0 * Pseudo action: group:1_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_start_0 * Pseudo action: group:0_start_0 * Resource action: stateful-2:0 start on node-b * Pseudo action: group:1_start_0 * Resource action: stateful-2:1 start on node-a * Pseudo action: group:0_running_0 * Pseudo action: group:1_running_0 * Pseudo action: ms-sf_running_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-2:1 promote on node-a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ node-a node-b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 - stateful-1:0 (ocf::heartbeat:Stateful): Started node-b - stateful-2:0 (ocf::heartbeat:Stateful): Started node-b + stateful-1:0 (ocf::heartbeat:Stateful): Slave node-b + stateful-2:0 (ocf::heartbeat:Stateful): Slave node-b Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master node-a stateful-2:1 (ocf::heartbeat:Stateful): Master node-a diff --git a/pengine/test10/clone-no-shuffle.summary b/pengine/test10/clone-no-shuffle.summary index 9cbba90974..59ffbbe864 100644 --- a/pengine/test10/clone-no-shuffle.summary +++ b/pengine/test10/clone-no-shuffle.summary @@ -1,61 +1,60 @@ -2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ dktest1sles10 dktest2sles10 ] stonith-1 (stonith:dummy): Stopped Master/Slave Set: ms-drbd1 [drbd1] Masters: [ dktest2sles10 ] Stopped: [ dktest1sles10 ] testip (ocf::heartbeat:IPaddr2): Started dktest2sles10 Transition Summary: * Start stonith-1 (dktest1sles10) * Demote drbd1:0 (Master -> Stopped dktest2sles10) * Start drbd1:1 (dktest1sles10) * Stop testip (dktest2sles10) Executing cluster transition: * Resource action: stonith-1 monitor on dktest2sles10 * Resource action: stonith-1 monitor on dktest1sles10 * Resource action: drbd1:1 monitor on dktest1sles10 * Pseudo action: ms-drbd1_pre_notify_demote_0 * Resource action: testip monitor on dktest1sles10 * Resource action: stonith-1 start on dktest1sles10 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-pre_notify_demote_0 * Resource action: testip stop on dktest2sles10 * Pseudo action: ms-drbd1_demote_0 * Resource action: drbd1:0 demote on dktest2sles10 * Pseudo action: ms-drbd1_demoted_0 * Pseudo action: ms-drbd1_post_notify_demoted_0 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-post_notify_demoted_0 * Pseudo action: ms-drbd1_pre_notify_stop_0 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-pre_notify_stop_0 * Pseudo action: ms-drbd1_stop_0 * Resource action: drbd1:0 stop on dktest2sles10 * Pseudo action: ms-drbd1_stopped_0 * Pseudo action: ms-drbd1_post_notify_stopped_0 * Pseudo action: ms-drbd1_confirmed-post_notify_stopped_0 * Pseudo action: ms-drbd1_pre_notify_start_0 * Pseudo action: all_stopped * Pseudo action: ms-drbd1_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd1_start_0 * Resource action: drbd1:1 start on dktest1sles10 * Pseudo action: ms-drbd1_running_0 * Pseudo action: ms-drbd1_post_notify_running_0 * Resource action: drbd1:1 notify on dktest1sles10 * Pseudo action: ms-drbd1_confirmed-post_notify_running_0 * Resource action: drbd1:1 monitor=11000 on dktest1sles10 Revised cluster status: Online: [ dktest1sles10 dktest2sles10 ] stonith-1 (stonith:dummy): Started dktest1sles10 Master/Slave Set: ms-drbd1 [drbd1] - Slaves (target-role): [ dktest1sles10 ] + Slaves: [ dktest1sles10 ] Stopped: [ dktest2sles10 ] testip (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/coloc-clone-stays-active.summary b/pengine/test10/coloc-clone-stays-active.summary index 34dc2c0e33..b0171377f3 100644 --- a/pengine/test10/coloc-clone-stays-active.summary +++ b/pengine/test10/coloc-clone-stays-active.summary @@ -1,207 +1,207 @@ -50 of 87 resources DISABLED and 0 BLOCKED from being started due to failures +12 of 87 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ s01-0 s01-1 ] stonith-s01-0 (stonith:external/ipmi): Started s01-1 stonith-s01-1 (stonith:external/ipmi): Started s01-0 Resource Group: iscsi-pool-0-target-all iscsi-pool-0-target (ocf::vds-ok:iSCSITarget): Started s01-0 iscsi-pool-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-0 Resource Group: iscsi-pool-0-vips vip-235 (ocf::heartbeat:IPaddr2): Started s01-0 vip-236 (ocf::heartbeat:IPaddr2): Started s01-0 Resource Group: iscsi-pool-1-target-all iscsi-pool-1-target (ocf::vds-ok:iSCSITarget): Started s01-1 iscsi-pool-1-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-1 Resource Group: iscsi-pool-1-vips vip-237 (ocf::heartbeat:IPaddr2): Started s01-1 vip-238 (ocf::heartbeat:IPaddr2): Started s01-1 Master/Slave Set: ms-drbd-pool-0 [drbd-pool-0] Masters: [ s01-0 ] - Slaves (target-role): [ s01-1 ] + Slaves: [ s01-1 ] Master/Slave Set: ms-drbd-pool-1 [drbd-pool-1] Masters: [ s01-1 ] - Slaves (target-role): [ s01-0 ] + Slaves: [ s01-0 ] Master/Slave Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] Masters: [ s01-0 ] - Slaves (target-role): [ s01-1 ] + Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] Masters: [ s01-1 ] - Slaves (target-role): [ s01-0 ] + Slaves: [ s01-0 ] Clone Set: cl-o2cb [o2cb] Stopped (disabled): [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-service [drbd-s01-service] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-service-fs [s01-service-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-ietd [ietd] Started: [ s01-0 s01-1 ] Clone Set: cl-dhcpd [dhcpd] Stopped (disabled): [ s01-0 s01-1 ] Resource Group: http-server vip-233 (ocf::heartbeat:IPaddr2): Started s01-0 nginx (lsb:nginx): Stopped ( disabled ) Master/Slave Set: ms-drbd-s01-logs [drbd-s01-logs] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-logs-fs [s01-logs-fs] Started: [ s01-0 s01-1 ] Resource Group: syslog-server vip-234 (ocf::heartbeat:IPaddr2): Started s01-1 syslog-ng (ocf::heartbeat:syslog-ng): Started s01-1 Resource Group: tftp-server vip-232 (ocf::heartbeat:IPaddr2): Stopped tftpd (ocf::heartbeat:Xinetd): Stopped Clone Set: cl-xinetd [xinetd] Started: [ s01-0 s01-1 ] Clone Set: cl-ospf-routing [ospf-routing] Started: [ s01-0 s01-1 ] Clone Set: connected-outer [ping-bmc-and-switch] Started: [ s01-0 s01-1 ] Resource Group: iscsi-vds-dom0-stateless-0-target-all iscsi-vds-dom0-stateless-0-target (ocf::vds-ok:iSCSITarget): Stopped ( disabled ) iscsi-vds-dom0-stateless-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Stopped ( disabled ) Resource Group: iscsi-vds-dom0-stateless-0-vips vip-227 (ocf::heartbeat:IPaddr2): Stopped vip-228 (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] Masters: [ s01-0 ] - Slaves (target-role): [ s01-1 ] + Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] - Slaves (target-role): [ s01-0 s01-1 ] + Slaves: [ s01-0 s01-1 ] Clone Set: cl-dlm [dlm] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] Stopped (disabled): [ s01-0 s01-1 ] Clone Set: cl-gfs2 [gfs2] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-http [drbd-vds-http] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-http-fs [vds-http-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-clvmd [clvmd] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data] Started: [ s01-0 s01-1 ] mgmt-vm (ocf::vds-ok:VirtualDomain): Started s01-0 Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service] Started: [ s01-0 s01-1 ] Clone Set: cl-libvirtd [libvirtd] Started: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool] Started: [ s01-0 s01-1 ] Transition Summary: * Migrate mgmt-vm (Started s01-0 -> s01-1) Executing cluster transition: * Resource action: mgmt-vm migrate_to on s01-0 * Resource action: mgmt-vm migrate_from on s01-1 * Resource action: mgmt-vm stop on s01-0 * Pseudo action: all_stopped * Pseudo action: mgmt-vm_start_0 * Resource action: mgmt-vm monitor=10000 on s01-1 Revised cluster status: Online: [ s01-0 s01-1 ] stonith-s01-0 (stonith:external/ipmi): Started s01-1 stonith-s01-1 (stonith:external/ipmi): Started s01-0 Resource Group: iscsi-pool-0-target-all iscsi-pool-0-target (ocf::vds-ok:iSCSITarget): Started s01-0 iscsi-pool-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-0 Resource Group: iscsi-pool-0-vips vip-235 (ocf::heartbeat:IPaddr2): Started s01-0 vip-236 (ocf::heartbeat:IPaddr2): Started s01-0 Resource Group: iscsi-pool-1-target-all iscsi-pool-1-target (ocf::vds-ok:iSCSITarget): Started s01-1 iscsi-pool-1-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-1 Resource Group: iscsi-pool-1-vips vip-237 (ocf::heartbeat:IPaddr2): Started s01-1 vip-238 (ocf::heartbeat:IPaddr2): Started s01-1 Master/Slave Set: ms-drbd-pool-0 [drbd-pool-0] Masters: [ s01-0 ] - Slaves (target-role): [ s01-1 ] + Slaves: [ s01-1 ] Master/Slave Set: ms-drbd-pool-1 [drbd-pool-1] Masters: [ s01-1 ] - Slaves (target-role): [ s01-0 ] + Slaves: [ s01-0 ] Master/Slave Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] Masters: [ s01-0 ] - Slaves (target-role): [ s01-1 ] + Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] Masters: [ s01-1 ] - Slaves (target-role): [ s01-0 ] + Slaves: [ s01-0 ] Clone Set: cl-o2cb [o2cb] Stopped (disabled): [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-service [drbd-s01-service] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-service-fs [s01-service-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-ietd [ietd] Started: [ s01-0 s01-1 ] Clone Set: cl-dhcpd [dhcpd] Stopped (disabled): [ s01-0 s01-1 ] Resource Group: http-server vip-233 (ocf::heartbeat:IPaddr2): Started s01-0 nginx (lsb:nginx): Stopped ( disabled ) Master/Slave Set: ms-drbd-s01-logs [drbd-s01-logs] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-logs-fs [s01-logs-fs] Started: [ s01-0 s01-1 ] Resource Group: syslog-server vip-234 (ocf::heartbeat:IPaddr2): Started s01-1 syslog-ng (ocf::heartbeat:syslog-ng): Started s01-1 Resource Group: tftp-server vip-232 (ocf::heartbeat:IPaddr2): Stopped tftpd (ocf::heartbeat:Xinetd): Stopped Clone Set: cl-xinetd [xinetd] Started: [ s01-0 s01-1 ] Clone Set: cl-ospf-routing [ospf-routing] Started: [ s01-0 s01-1 ] Clone Set: connected-outer [ping-bmc-and-switch] Started: [ s01-0 s01-1 ] Resource Group: iscsi-vds-dom0-stateless-0-target-all iscsi-vds-dom0-stateless-0-target (ocf::vds-ok:iSCSITarget): Stopped ( disabled ) iscsi-vds-dom0-stateless-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Stopped ( disabled ) Resource Group: iscsi-vds-dom0-stateless-0-vips vip-227 (ocf::heartbeat:IPaddr2): Stopped vip-228 (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] Masters: [ s01-0 ] - Slaves (target-role): [ s01-1 ] + Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] - Slaves (target-role): [ s01-0 s01-1 ] + Slaves: [ s01-0 s01-1 ] Clone Set: cl-dlm [dlm] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] Stopped (disabled): [ s01-0 s01-1 ] Clone Set: cl-gfs2 [gfs2] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-http [drbd-vds-http] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-http-fs [vds-http-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-clvmd [clvmd] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data] Started: [ s01-0 s01-1 ] mgmt-vm (ocf::vds-ok:VirtualDomain): Started s01-1 Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service] Started: [ s01-0 s01-1 ] Clone Set: cl-libvirtd [libvirtd] Started: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool] Started: [ s01-0 s01-1 ] diff --git a/pengine/test10/colocation_constraint_stops_master.summary b/pengine/test10/colocation_constraint_stops_master.summary index f59737d263..e0b69765fb 100644 --- a/pengine/test10/colocation_constraint_stops_master.summary +++ b/pengine/test10/colocation_constraint_stops_master.summary @@ -1,38 +1,37 @@ -2 of 1 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Masters: [ fc16-builder ] Transition Summary: * Demote NATIVE_RSC_A:0 (Master -> Stopped fc16-builder) Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_demote_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_demote_0 * Pseudo action: MASTER_RSC_A_demote_0 * Resource action: NATIVE_RSC_A:0 demote on fc16-builder * Pseudo action: MASTER_RSC_A_demoted_0 * Pseudo action: MASTER_RSC_A_post_notify_demoted_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-post_notify_demoted_0 * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] diff --git a/pengine/test10/colocation_constraint_stops_slave.summary b/pengine/test10/colocation_constraint_stops_slave.summary index 878b83b4bb..fe9e044025 100644 --- a/pengine/test10/colocation_constraint_stops_slave.summary +++ b/pengine/test10/colocation_constraint_stops_slave.summary @@ -1,34 +1,34 @@ -2 of 2 resources DISABLED and 0 BLOCKED from being started due to failures +1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] - Slaves (target-role): [ fc16-builder ] + Slaves: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: * Stop NATIVE_RSC_A:0 (fc16-builder) * Stop NATIVE_RSC_B (fc16-builder) Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_B stop on fc16-builder * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/inc11.summary b/pengine/test10/inc11.summary index 452a68bfe8..6f4d8ef7b9 100644 --- a/pengine/test10/inc11.summary +++ b/pengine/test10/inc11.summary @@ -1,42 +1,42 @@ Current cluster status: Online: [ node0 node1 node2 ] simple-rsc (heartbeat:apache): Stopped Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped Transition Summary: * Start simple-rsc (node2) * Start child_rsc1:0 (node1) * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) Executing cluster transition: * Resource action: simple-rsc monitor on node2 * Resource action: simple-rsc monitor on node1 * Resource action: simple-rsc monitor on node0 * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:0 monitor on node0 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:1 monitor on node0 * Pseudo action: rsc1_start_0 * Resource action: simple-rsc start on node2 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node0 node1 node2 ] simple-rsc (heartbeat:apache): Started node2 Master/Slave Set: rsc1 [child_rsc1] (unique) - child_rsc1:0 (ocf::heartbeat:apache): Started node1 + child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 diff --git a/pengine/test10/inc12.summary b/pengine/test10/inc12.summary index af2315defb..3df5a59751 100644 --- a/pengine/test10/inc12.summary +++ b/pengine/test10/inc12.summary @@ -1,131 +1,131 @@ Current cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ] Stopped: [ c001n03 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped - ocf_msdummy:2 (ocf::heartbeat:Stateful): Started c001n04 - ocf_msdummy:3 (ocf::heartbeat:Stateful): Started c001n04 - ocf_msdummy:4 (ocf::heartbeat:Stateful): Started c001n05 - ocf_msdummy:5 (ocf::heartbeat:Stateful): Started c001n05 - ocf_msdummy:6 (ocf::heartbeat:Stateful): Started c001n06 - ocf_msdummy:7 (ocf::heartbeat:Stateful): Started c001n06 - ocf_msdummy:8 (ocf::heartbeat:Stateful): Started c001n07 - ocf_msdummy:9 (ocf::heartbeat:Stateful): Started c001n07 - ocf_msdummy:10 (ocf::heartbeat:Stateful): Started c001n02 - ocf_msdummy:11 (ocf::heartbeat:Stateful): Started c001n02 + ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave c001n04 + ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave c001n04 + ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave c001n05 + ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave c001n05 + ocf_msdummy:6 (ocf::heartbeat:Stateful): Slave c001n06 + ocf_msdummy:7 (ocf::heartbeat:Stateful): Slave c001n06 + ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave c001n07 + ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave c001n07 + ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave c001n02 + ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave c001n02 Transition Summary: * Stop ocf_192.168.100.181 (c001n02) * Stop heartbeat_192.168.100.182 (c001n02) * Stop ocf_192.168.100.183 (c001n02) * Stop lsb_dummy (c001n04) * Stop rsc_c001n03 (c001n05) * Stop rsc_c001n02 (c001n02) * Stop rsc_c001n04 (c001n04) * Stop rsc_c001n05 (c001n05) * Stop rsc_c001n06 (c001n06) * Stop rsc_c001n07 (c001n07) * Stop child_DoFencing:0 (c001n02) * Stop child_DoFencing:1 (c001n04) * Stop child_DoFencing:2 (c001n05) * Stop child_DoFencing:3 (c001n06) * Stop child_DoFencing:4 (c001n07) * Stop ocf_msdummy:10 (c001n02) * Stop ocf_msdummy:11 (c001n02) * Stop ocf_msdummy:2 (c001n04) * Stop ocf_msdummy:3 (c001n04) * Stop ocf_msdummy:4 (c001n05) * Stop ocf_msdummy:5 (c001n05) * Stop ocf_msdummy:6 (c001n06) * Stop ocf_msdummy:7 (c001n06) * Stop ocf_msdummy:8 (c001n07) * Stop ocf_msdummy:9 (c001n07) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n02 * Resource action: lsb_dummy stop on c001n04 * Resource action: rsc_c001n03 stop on c001n05 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n04 stop on c001n04 * Resource action: rsc_c001n05 stop on c001n05 * Resource action: rsc_c001n06 stop on c001n06 * Resource action: rsc_c001n07 stop on c001n07 * Pseudo action: DoFencing_stop_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n02 * Resource action: child_DoFencing:1 stop on c001n02 * Resource action: child_DoFencing:2 stop on c001n04 * Resource action: child_DoFencing:3 stop on c001n05 * Resource action: child_DoFencing:4 stop on c001n06 * Resource action: child_DoFencing:5 stop on c001n07 * Pseudo action: DoFencing_stopped_0 * Resource action: ocf_msdummy:10 stop on c001n02 * Resource action: ocf_msdummy:11 stop on c001n02 * Resource action: ocf_msdummy:2 stop on c001n04 * Resource action: ocf_msdummy:3 stop on c001n04 * Resource action: ocf_msdummy:4 stop on c001n05 * Resource action: ocf_msdummy:5 stop on c001n05 * Resource action: ocf_msdummy:6 stop on c001n06 * Resource action: ocf_msdummy:7 stop on c001n06 * Resource action: ocf_msdummy:8 stop on c001n07 * Resource action: ocf_msdummy:9 stop on c001n07 * Pseudo action: master_rsc_1_stopped_0 * Cluster action: do_shutdown on c001n07 * Cluster action: do_shutdown on c001n06 * Cluster action: do_shutdown on c001n05 * Cluster action: do_shutdown on c001n04 * Resource action: ocf_192.168.100.181 stop on c001n02 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Cluster action: do_shutdown on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped heartbeat_192.168.100.182 (heartbeat:IPaddr): Stopped ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/master-0.summary b/pengine/test10/master-0.summary index f86b1fead0..6d2bd02a12 100644 --- a/pengine/test10/master-0.summary +++ b/pengine/test10/master-0.summary @@ -1,45 +1,45 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) * Start child_rsc1:1 (node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) - child_rsc1:0 (ocf::heartbeat:apache): Started node1 - child_rsc1:1 (ocf::heartbeat:apache): Started node2 - child_rsc1:2 (ocf::heartbeat:apache): Started node1 - child_rsc1:3 (ocf::heartbeat:apache): Started node2 + child_rsc1:0 (ocf::heartbeat:apache): Slave node1 + child_rsc1:1 (ocf::heartbeat:apache): Slave node2 + child_rsc1:2 (ocf::heartbeat:apache): Slave node1 + child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-1.summary b/pengine/test10/master-1.summary index f82d702347..a45943c376 100644 --- a/pengine/test10/master-1.summary +++ b/pengine/test10/master-1.summary @@ -1,49 +1,49 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) - child_rsc1:0 (ocf::heartbeat:apache): Started node1 + child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 - child_rsc1:2 (ocf::heartbeat:apache): Started node1 - child_rsc1:3 (ocf::heartbeat:apache): Started node2 + child_rsc1:2 (ocf::heartbeat:apache): Slave node1 + child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-10.summary b/pengine/test10/master-10.summary index 1d469708a6..cd0efc3123 100644 --- a/pengine/test10/master-10.summary +++ b/pengine/test10/master-10.summary @@ -1,73 +1,73 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) * Promote child_rsc1:0 (Stopped -> Master node1) * Start child_rsc1:1 (node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc1_pre_notify_promote_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_promote_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:0 promote on node1 * Pseudo action: rsc1_promoted_0 * Pseudo action: rsc1_post_notify_promoted_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_promoted_0 * Resource action: child_rsc1:0 monitor=11000 on node1 * Resource action: child_rsc1:1 monitor=1000 on node2 * Resource action: child_rsc1:2 monitor=1000 on node1 * Resource action: child_rsc1:3 monitor=1000 on node2 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Master node1 - child_rsc1:1 (ocf::heartbeat:apache): Started node2 - child_rsc1:2 (ocf::heartbeat:apache): Started node1 - child_rsc1:3 (ocf::heartbeat:apache): Started node2 + child_rsc1:1 (ocf::heartbeat:apache): Slave node2 + child_rsc1:2 (ocf::heartbeat:apache): Slave node1 + child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-11.summary b/pengine/test10/master-11.summary index 1dc0fe1279..a13760f950 100644 --- a/pengine/test10/master-11.summary +++ b/pengine/test10/master-11.summary @@ -1,39 +1,39 @@ Current cluster status: Online: [ node1 node2 ] simple-rsc (heartbeat:apache): Stopped Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped Transition Summary: * Start simple-rsc (node2) * Start child_rsc1:0 (node1) * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) Executing cluster transition: * Resource action: simple-rsc monitor on node2 * Resource action: simple-rsc monitor on node1 * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: simple-rsc start on node2 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node1 node2 ] simple-rsc (heartbeat:apache): Started node2 Master/Slave Set: rsc1 [child_rsc1] (unique) - child_rsc1:0 (ocf::heartbeat:apache): Started node1 + child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 diff --git a/pengine/test10/master-12.summary b/pengine/test10/master-12.summary index b4197988a0..59f2a3b45b 100644 --- a/pengine/test10/master-12.summary +++ b/pengine/test10/master-12.summary @@ -1,31 +1,31 @@ Current cluster status: Online: [ sel3 sel4 ] Master/Slave Set: ms-drbd0 [drbd0] Masters: [ sel3 ] Slaves: [ sel4 ] Master/Slave Set: ms-sf [sf] (unique) - sf:0 (ocf::heartbeat:Stateful): Started sel3 - sf:1 (ocf::heartbeat:Stateful): Started sel4 + sf:0 (ocf::heartbeat:Stateful): Slave sel3 + sf:1 (ocf::heartbeat:Stateful): Slave sel4 fs0 (ocf::heartbeat:Filesystem): Started sel3 Transition Summary: * Promote sf:0 (Slave -> Master sel3) Executing cluster transition: * Pseudo action: ms-sf_promote_0 * Resource action: sf:0 promote on sel3 * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ sel3 sel4 ] Master/Slave Set: ms-drbd0 [drbd0] Masters: [ sel3 ] Slaves: [ sel4 ] Master/Slave Set: ms-sf [sf] (unique) sf:0 (ocf::heartbeat:Stateful): Master sel3 - sf:1 (ocf::heartbeat:Stateful): Started sel4 + sf:1 (ocf::heartbeat:Stateful): Slave sel4 fs0 (ocf::heartbeat:Filesystem): Started sel3 diff --git a/pengine/test10/master-13.summary b/pengine/test10/master-13.summary index 304f19cfc2..1488a48fc4 100644 --- a/pengine/test10/master-13.summary +++ b/pengine/test10/master-13.summary @@ -1,61 +1,60 @@ -3 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ frigg odin ] Master/Slave Set: ms_drbd [drbd0] Masters: [ frigg ] - Slaves (target-role): [ odin ] + Slaves: [ odin ] Resource Group: group IPaddr0 (ocf::heartbeat:IPaddr): Stopped MailTo (ocf::heartbeat:MailTo): Stopped Transition Summary: * Promote drbd0:0 (Slave -> Master odin) * Demote drbd0:1 (Master -> Slave frigg) * Start IPaddr0 (odin) * Start MailTo (odin) Executing cluster transition: * Resource action: drbd0:1 cancel=12000 on odin * Resource action: drbd0:0 cancel=10000 on frigg * Pseudo action: ms_drbd_pre_notify_demote_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-pre_notify_demote_0 * Pseudo action: ms_drbd_demote_0 * Resource action: drbd0:0 demote on frigg * Pseudo action: ms_drbd_demoted_0 * Pseudo action: ms_drbd_post_notify_demoted_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_pre_notify_promote_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_promote_0 * Resource action: drbd0:1 promote on odin * Pseudo action: ms_drbd_promoted_0 * Pseudo action: ms_drbd_post_notify_promoted_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-post_notify_promoted_0 * Pseudo action: group_start_0 * Resource action: IPaddr0 start on odin * Resource action: MailTo start on odin * Resource action: drbd0:1 monitor=10000 on odin * Resource action: drbd0:0 monitor=12000 on frigg * Pseudo action: group_running_0 * Resource action: IPaddr0 monitor=5000 on odin Revised cluster status: Online: [ frigg odin ] Master/Slave Set: ms_drbd [drbd0] Masters: [ odin ] - Slaves (target-role): [ frigg ] + Slaves: [ frigg ] Resource Group: group IPaddr0 (ocf::heartbeat:IPaddr): Started odin MailTo (ocf::heartbeat:MailTo): Started odin diff --git a/pengine/test10/master-2.summary b/pengine/test10/master-2.summary index 3074e202e6..b8f5447c7f 100644 --- a/pengine/test10/master-2.summary +++ b/pengine/test10/master-2.summary @@ -1,69 +1,69 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) * Promote child_rsc1:0 (Stopped -> Master node1) * Start child_rsc1:1 (node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc1_pre_notify_promote_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_promote_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:0 promote on node1 * Pseudo action: rsc1_promoted_0 * Pseudo action: rsc1_post_notify_promoted_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_promoted_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Master node1 - child_rsc1:1 (ocf::heartbeat:apache): Started node2 - child_rsc1:2 (ocf::heartbeat:apache): Started node1 - child_rsc1:3 (ocf::heartbeat:apache): Started node2 + child_rsc1:1 (ocf::heartbeat:apache): Slave node2 + child_rsc1:2 (ocf::heartbeat:apache): Slave node1 + child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-3.summary b/pengine/test10/master-3.summary index f82d702347..a45943c376 100644 --- a/pengine/test10/master-3.summary +++ b/pengine/test10/master-3.summary @@ -1,49 +1,49 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) - child_rsc1:0 (ocf::heartbeat:apache): Started node1 + child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 - child_rsc1:2 (ocf::heartbeat:apache): Started node1 - child_rsc1:3 (ocf::heartbeat:apache): Started node2 + child_rsc1:2 (ocf::heartbeat:apache): Slave node1 + child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-4.summary b/pengine/test10/master-4.summary index c9c2c87640..54d2213db6 100644 --- a/pengine/test10/master-4.summary +++ b/pengine/test10/master-4.summary @@ -1,92 +1,92 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 ocf_child (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_child (heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n08 child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n02 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) - ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 + ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 Transition Summary: * Promote ocf_msdummy:0 (Slave -> Master c001n08) Executing cluster transition: * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n02 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Resource action: ocf_msdummy:0 cancel=5000 on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n03 * Resource action: ocf_msdummy:2 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n03 * Resource action: ocf_msdummy:3 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n01 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n01 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:6 monitor on c001n01 * Resource action: ocf_msdummy:7 monitor on c001n08 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n01 * Pseudo action: master_rsc_1_promote_0 * Resource action: ocf_msdummy:0 promote on c001n08 * Pseudo action: master_rsc_1_promoted_0 * Resource action: ocf_msdummy:0 monitor=6000 on c001n08 Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 ocf_child (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_child (heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n08 child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n02 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 diff --git a/pengine/test10/master-5.summary b/pengine/test10/master-5.summary index 7ff0a0f434..771bef5ab8 100644 --- a/pengine/test10/master-5.summary +++ b/pengine/test10/master-5.summary @@ -1,86 +1,86 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 ocf_child (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_child (heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n08 child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n02 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 Transition Summary: Executing cluster transition: * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n02 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Resource action: ocf_msdummy:2 monitor on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n03 * Resource action: ocf_msdummy:2 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n03 * Resource action: ocf_msdummy:3 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n01 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n01 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:6 monitor on c001n01 * Resource action: ocf_msdummy:7 monitor on c001n08 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n01 Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 ocf_child (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_child (heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n08 child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n02 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 diff --git a/pengine/test10/master-6.summary b/pengine/test10/master-6.summary index 4df46f2074..5d49158360 100644 --- a/pengine/test10/master-6.summary +++ b/pengine/test10/master-6.summary @@ -1,85 +1,85 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n08 child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Started c001n01 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 Transition Summary: Executing cluster transition: * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:2 monitor on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n01 * Resource action: ocf_msdummy:3 monitor on c001n03 * Resource action: ocf_msdummy:3 monitor on c001n01 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n01 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n01 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:6 monitor on c001n02 * Resource action: ocf_msdummy:7 monitor on c001n08 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n08 child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Started c001n01 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 diff --git a/pengine/test10/master-7.summary b/pengine/test10/master-7.summary index 708be8cb35..747254246b 100644 --- a/pengine/test10/master-7.summary +++ b/pengine/test10/master-7.summary @@ -1,120 +1,120 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n01 (UNCLEAN) - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 ( UNCLEAN ) + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: * Move DcIPaddr (Started c001n01 -> c001n03) * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move lsb_dummy (Started c001n02 -> c001n08) * Move rsc_c001n01 (Started c001n01 -> c001n03) * Stop child_DoFencing:0 (c001n01) * Demote ocf_msdummy:0 (Master -> Stopped c001n01) * Stop ocf_msdummy:4 (c001n01) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: stonith_complete * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: ocf_msdummy:4_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-8.summary b/pengine/test10/master-8.summary index d9f793bb06..450737b50e 100644 --- a/pengine/test10/master-8.summary +++ b/pengine/test10/master-8.summary @@ -1,124 +1,124 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: * Move DcIPaddr (Started c001n01 -> c001n03) * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move lsb_dummy (Started c001n02 -> c001n08) * Move rsc_c001n01 (Started c001n01 -> c001n03) * Stop child_DoFencing:0 (c001n01) * Demote ocf_msdummy:0 (Master -> Slave c001n01 - blocked) * Move ocf_msdummy:0 (Slave c001n01 -> c001n03) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: stonith_complete * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Resource action: ocf_msdummy:0 start on c001n03 * Pseudo action: master_rsc_1_running_0 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 * Resource action: ocf_msdummy:0 monitor=5000 on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) - ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n03 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n08 + ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-colocation.summary b/pengine/test10/master-colocation.summary index 324cf40f43..c5d708bc27 100644 --- a/pengine/test10/master-colocation.summary +++ b/pengine/test10/master-colocation.summary @@ -1,33 +1,32 @@ -2 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ box1 box2 ] Master/Slave Set: ms-conntrackd [conntrackd-stateful] - Slaves (target-role): [ box1 box2 ] + Slaves: [ box1 box2 ] Resource Group: virtualips externalip (ocf::heartbeat:IPaddr2): Started box2 internalip (ocf::heartbeat:IPaddr2): Started box2 sship (ocf::heartbeat:IPaddr2): Started box2 Transition Summary: * Promote conntrackd-stateful:1 (Slave -> Master box2) Executing cluster transition: * Resource action: conntrackd-stateful:0 monitor=29000 on box1 * Pseudo action: ms-conntrackd_promote_0 * Resource action: conntrackd-stateful:1 promote on box2 * Pseudo action: ms-conntrackd_promoted_0 * Resource action: conntrackd-stateful:1 monitor=30000 on box2 Revised cluster status: Online: [ box1 box2 ] Master/Slave Set: ms-conntrackd [conntrackd-stateful] Masters: [ box2 ] - Slaves (target-role): [ box1 ] + Slaves: [ box1 ] Resource Group: virtualips externalip (ocf::heartbeat:IPaddr2): Started box2 internalip (ocf::heartbeat:IPaddr2): Started box2 sship (ocf::heartbeat:IPaddr2): Started box2 diff --git a/pengine/test10/master-depend.summary b/pengine/test10/master-depend.summary index 51bb3ebfa8..e6f33cb7fd 100644 --- a/pengine/test10/master-depend.summary +++ b/pengine/test10/master-depend.summary @@ -1,59 +1,59 @@ -5 of 10 resources DISABLED and 0 BLOCKED from being started due to failures +3 of 10 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ vbox4 ] OFFLINE: [ vbox3 ] Master/Slave Set: drbd [drbd0] Stopped: [ vbox3 vbox4 ] Clone Set: cman_clone [cman] Stopped: [ vbox3 vbox4 ] Clone Set: clvmd_clone [clvmd] Stopped: [ vbox3 vbox4 ] vmnci36 (ocf::heartbeat:vm): Stopped vmnci37 (ocf::heartbeat:vm): Stopped ( disabled ) vmnci38 (ocf::heartbeat:vm): Stopped ( disabled ) vmnci55 (ocf::heartbeat:vm): Stopped ( disabled ) Transition Summary: * Start drbd0:0 (vbox4) * Start cman:0 (vbox4) Executing cluster transition: * Resource action: drbd0:0 monitor on vbox4 * Pseudo action: drbd_pre_notify_start_0 * Resource action: cman:0 monitor on vbox4 * Pseudo action: cman_clone_start_0 * Resource action: clvmd:0 monitor on vbox4 * Resource action: vmnci36 monitor on vbox4 * Resource action: vmnci37 monitor on vbox4 * Resource action: vmnci38 monitor on vbox4 * Resource action: vmnci55 monitor on vbox4 * Pseudo action: drbd_confirmed-pre_notify_start_0 * Pseudo action: drbd_start_0 * Resource action: cman:0 start on vbox4 * Pseudo action: cman_clone_running_0 * Resource action: drbd0:0 start on vbox4 * Pseudo action: drbd_running_0 * Pseudo action: drbd_post_notify_running_0 * Resource action: drbd0:0 notify on vbox4 * Pseudo action: drbd_confirmed-post_notify_running_0 * Resource action: drbd0:0 monitor=60000 on vbox4 Revised cluster status: Online: [ vbox4 ] OFFLINE: [ vbox3 ] Master/Slave Set: drbd [drbd0] - Slaves (target-role): [ vbox4 ] + Slaves: [ vbox4 ] Stopped: [ vbox3 ] Clone Set: cman_clone [cman] Started: [ vbox4 ] Stopped: [ vbox3 ] Clone Set: clvmd_clone [clvmd] Stopped: [ vbox3 vbox4 ] vmnci36 (ocf::heartbeat:vm): Stopped vmnci37 (ocf::heartbeat:vm): Stopped ( disabled ) vmnci38 (ocf::heartbeat:vm): Stopped ( disabled ) vmnci55 (ocf::heartbeat:vm): Stopped ( disabled ) diff --git a/pengine/test10/master-failed-demote-2.summary b/pengine/test10/master-failed-demote-2.summary index 6a754bcfc5..847e0a1536 100644 --- a/pengine/test10/master-failed-demote-2.summary +++ b/pengine/test10/master-failed-demote-2.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): FAILED dl380g5b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 - stateful-1:1 (ocf::heartbeat:Stateful): Started dl380g5a - stateful-2:1 (ocf::heartbeat:Stateful): Started dl380g5a + stateful-1:1 (ocf::heartbeat:Stateful): Slave dl380g5a + stateful-2:1 (ocf::heartbeat:Stateful): Slave dl380g5a Transition Summary: * Stop stateful-1:0 (dl380g5b) * Promote stateful-1:1 (Slave -> Master dl380g5a) * Promote stateful-2:1 (Slave -> Master dl380g5a) Executing cluster transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: all_stopped * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Stopped stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Master dl380g5a diff --git a/pengine/test10/master-failed-demote.summary b/pengine/test10/master-failed-demote.summary index 9288c0be11..cc3fbee717 100644 --- a/pengine/test10/master-failed-demote.summary +++ b/pengine/test10/master-failed-demote.summary @@ -1,63 +1,63 @@ Current cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): FAILED dl380g5b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 - stateful-1:1 (ocf::heartbeat:Stateful): Started dl380g5a - stateful-2:1 (ocf::heartbeat:Stateful): Started dl380g5a + stateful-1:1 (ocf::heartbeat:Stateful): Slave dl380g5a + stateful-2:1 (ocf::heartbeat:Stateful): Slave dl380g5a Transition Summary: * Stop stateful-1:0 (dl380g5b) * Promote stateful-1:1 (Slave -> Master dl380g5a) * Promote stateful-2:1 (Slave -> Master dl380g5a) Executing cluster transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_pre_notify_stop_0 * Resource action: stateful-1:0 notify on dl380g5b * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_stop_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_post_notify_stopped_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms-sf_pre_notify_promote_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_promote_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 * Pseudo action: ms-sf_post_notify_promoted_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a Revised cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Stopped stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Master dl380g5a diff --git a/pengine/test10/master-group.summary b/pengine/test10/master-group.summary index c1560cd080..397401083e 100644 --- a/pengine/test10/master-group.summary +++ b/pengine/test10/master-group.summary @@ -1,35 +1,35 @@ Current cluster status: Online: [ rh44-1 rh44-2 ] Resource Group: test resource_1 (ocf::heartbeat:IPaddr): Started rh44-1 Master/Slave Set: ms-sf [grp_ms_sf] (unique) Resource Group: grp_ms_sf:0 - master_slave_Stateful:0 (ocf::heartbeat:Stateful): Started rh44-2 + master_slave_Stateful:0 (ocf::heartbeat:Stateful): Slave rh44-2 Resource Group: grp_ms_sf:1 - master_slave_Stateful:1 (ocf::heartbeat:Stateful): Started rh44-1 + master_slave_Stateful:1 (ocf::heartbeat:Stateful): Slave rh44-1 Transition Summary: * Promote master_slave_Stateful:1 (Slave -> Master rh44-1) Executing cluster transition: * Resource action: master_slave_Stateful:1 cancel=5000 on rh44-1 * Pseudo action: ms-sf_promote_0 * Pseudo action: grp_ms_sf:1_promote_0 * Resource action: master_slave_Stateful:1 promote on rh44-1 * Pseudo action: grp_ms_sf:1_promoted_0 * Resource action: master_slave_Stateful:1 monitor=6000 on rh44-1 * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ rh44-1 rh44-2 ] Resource Group: test resource_1 (ocf::heartbeat:IPaddr): Started rh44-1 Master/Slave Set: ms-sf [grp_ms_sf] (unique) Resource Group: grp_ms_sf:0 - master_slave_Stateful:0 (ocf::heartbeat:Stateful): Started rh44-2 + master_slave_Stateful:0 (ocf::heartbeat:Stateful): Slave rh44-2 Resource Group: grp_ms_sf:1 master_slave_Stateful:1 (ocf::heartbeat:Stateful): Master rh44-1 diff --git a/pengine/test10/master-ordering.summary b/pengine/test10/master-ordering.summary index 47db1e6244..c8e40943d1 100644 --- a/pengine/test10/master-ordering.summary +++ b/pengine/test10/master-ordering.summary @@ -1,95 +1,94 @@ -4 of 17 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ webcluster01 ] OFFLINE: [ webcluster02 ] mysql-server (ocf::heartbeat:mysql): Stopped extip_1 (ocf::heartbeat:IPaddr2): Stopped extip_2 (ocf::heartbeat:IPaddr2): Stopped Resource Group: group_main intip_0_main (ocf::heartbeat:IPaddr2): Stopped intip_1_master (ocf::heartbeat:IPaddr2): Stopped intip_2_slave (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms_drbd_www [drbd_www] Stopped: [ webcluster01 webcluster02 ] Clone Set: clone_ocfs2_www [ocfs2_www] (unique) ocfs2_www:0 (ocf::heartbeat:Filesystem): Stopped ocfs2_www:1 (ocf::heartbeat:Filesystem): Stopped Clone Set: clone_webservice [group_webservice] Stopped: [ webcluster01 webcluster02 ] Master/Slave Set: ms_drbd_mysql [drbd_mysql] Stopped: [ webcluster01 webcluster02 ] fs_mysql (ocf::heartbeat:Filesystem): Stopped Transition Summary: * Start extip_1 (webcluster01) * Start extip_2 (webcluster01) * Start intip_1_master (webcluster01) * Start intip_2_slave (webcluster01) * Start drbd_www:0 (webcluster01) * Start drbd_mysql:0 (webcluster01) Executing cluster transition: * Resource action: mysql-server monitor on webcluster01 * Resource action: extip_1 monitor on webcluster01 * Resource action: extip_2 monitor on webcluster01 * Resource action: intip_0_main monitor on webcluster01 * Resource action: intip_1_master monitor on webcluster01 * Resource action: intip_2_slave monitor on webcluster01 * Resource action: drbd_www:0 monitor on webcluster01 * Pseudo action: ms_drbd_www_pre_notify_start_0 * Resource action: ocfs2_www:0 monitor on webcluster01 * Resource action: ocfs2_www:1 monitor on webcluster01 * Resource action: apache2:0 monitor on webcluster01 * Resource action: mysql-proxy:0 monitor on webcluster01 * Resource action: drbd_mysql:0 monitor on webcluster01 * Pseudo action: ms_drbd_mysql_pre_notify_start_0 * Resource action: fs_mysql monitor on webcluster01 * Resource action: extip_1 start on webcluster01 * Resource action: extip_2 start on webcluster01 * Resource action: intip_1_master start on webcluster01 * Resource action: intip_2_slave start on webcluster01 * Pseudo action: ms_drbd_www_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_www_start_0 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_mysql_start_0 * Resource action: extip_1 monitor=30000 on webcluster01 * Resource action: extip_2 monitor=30000 on webcluster01 * Resource action: intip_1_master monitor=30000 on webcluster01 * Resource action: intip_2_slave monitor=30000 on webcluster01 * Resource action: drbd_www:0 start on webcluster01 * Pseudo action: ms_drbd_www_running_0 * Resource action: drbd_mysql:0 start on webcluster01 * Pseudo action: ms_drbd_mysql_running_0 * Pseudo action: ms_drbd_www_post_notify_running_0 * Pseudo action: ms_drbd_mysql_post_notify_running_0 * Resource action: drbd_www:0 notify on webcluster01 * Pseudo action: ms_drbd_www_confirmed-post_notify_running_0 * Resource action: drbd_mysql:0 notify on webcluster01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_running_0 Revised cluster status: Online: [ webcluster01 ] OFFLINE: [ webcluster02 ] mysql-server (ocf::heartbeat:mysql): Stopped extip_1 (ocf::heartbeat:IPaddr2): Started webcluster01 extip_2 (ocf::heartbeat:IPaddr2): Started webcluster01 Resource Group: group_main intip_0_main (ocf::heartbeat:IPaddr2): Stopped intip_1_master (ocf::heartbeat:IPaddr2): Started webcluster01 intip_2_slave (ocf::heartbeat:IPaddr2): Started webcluster01 Master/Slave Set: ms_drbd_www [drbd_www] - Slaves (target-role): [ webcluster01 ] + Slaves: [ webcluster01 ] Stopped: [ webcluster02 ] Clone Set: clone_ocfs2_www [ocfs2_www] (unique) ocfs2_www:0 (ocf::heartbeat:Filesystem): Stopped ocfs2_www:1 (ocf::heartbeat:Filesystem): Stopped Clone Set: clone_webservice [group_webservice] Stopped: [ webcluster01 webcluster02 ] Master/Slave Set: ms_drbd_mysql [drbd_mysql] - Slaves (target-role): [ webcluster01 ] + Slaves: [ webcluster01 ] Stopped: [ webcluster02 ] fs_mysql (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/master-partially-demoted-group.summary b/pengine/test10/master-partially-demoted-group.summary index 150f767293..0bda6050d0 100644 --- a/pengine/test10/master-partially-demoted-group.summary +++ b/pengine/test10/master-partially-demoted-group.summary @@ -1,118 +1,117 @@ -10 of 16 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ sd01-0 sd01-1 ] stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 Resource Group: cdev-pool-0-iscsi-export cdev-pool-0-iscsi-target (ocf::vds-ok:iSCSITarget): Started sd01-1 cdev-pool-0-iscsi-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started sd01-1 Master/Slave Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] Masters: [ sd01-1 ] - Slaves (target-role): [ sd01-0 ] + Slaves: [ sd01-0 ] Clone Set: cl-ietd [ietd] Started: [ sd01-0 sd01-1 ] Clone Set: cl-vlan1-net [vlan1-net] Started: [ sd01-0 sd01-1 ] Resource Group: cdev-pool-0-iscsi-vips vip-164 (ocf::heartbeat:IPaddr2): Started sd01-1 vip-165 (ocf::heartbeat:IPaddr2): Started sd01-1 Master/Slave Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] Masters: [ sd01-1 ] - Slaves (target-role): [ sd01-0 ] + Slaves: [ sd01-0 ] Transition Summary: * Move vip-164 (Started sd01-1 -> sd01-0) * Move vip-165 (Started sd01-1 -> sd01-0) * Move cdev-pool-0-iscsi-target (Started sd01-1 -> sd01-0) * Move cdev-pool-0-iscsi-lun-1 (Started sd01-1 -> sd01-0) * Demote vip-164-fw:0 (Master -> Slave sd01-1) * Promote vip-164-fw:1 (Slave -> Master sd01-0) * Promote vip-165-fw:1 (Slave -> Master sd01-0) * Demote cdev-pool-0-drbd:0 (Master -> Slave sd01-1) * Promote cdev-pool-0-drbd:1 (Slave -> Master sd01-0) Executing cluster transition: * Resource action: vip-165-fw monitor=10000 on sd01-1 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demote_0 * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_demote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demote_0 * Resource action: vip-164-fw demote on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_demote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demoted_0 * Resource action: vip-164-fw monitor=10000 on sd01-1 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demoted_0 * Pseudo action: cdev-pool-0-iscsi-vips_stop_0 * Resource action: vip-165 stop on sd01-1 * Resource action: vip-164 stop on sd01-1 * Pseudo action: cdev-pool-0-iscsi-vips_stopped_0 * Pseudo action: cdev-pool-0-iscsi-export_stop_0 * Resource action: cdev-pool-0-iscsi-lun-1 stop on sd01-1 * Resource action: cdev-pool-0-iscsi-target stop on sd01-1 * Pseudo action: all_stopped * Pseudo action: cdev-pool-0-iscsi-export_stopped_0 * Pseudo action: ms-cdev-pool-0-drbd_demote_0 * Resource action: cdev-pool-0-drbd demote on sd01-1 * Pseudo action: ms-cdev-pool-0-drbd_demoted_0 * Pseudo action: ms-cdev-pool-0-drbd_post_notify_demoted_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_promote_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms-cdev-pool-0-drbd_promote_0 * Resource action: cdev-pool-0-drbd promote on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_promoted_0 * Pseudo action: ms-cdev-pool-0-drbd_post_notify_promoted_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_promoted_0 * Pseudo action: cdev-pool-0-iscsi-export_start_0 * Resource action: cdev-pool-0-iscsi-target start on sd01-0 * Resource action: cdev-pool-0-iscsi-lun-1 start on sd01-0 * Resource action: cdev-pool-0-drbd monitor=20000 on sd01-1 * Resource action: cdev-pool-0-drbd monitor=10000 on sd01-0 * Pseudo action: cdev-pool-0-iscsi-export_running_0 * Resource action: cdev-pool-0-iscsi-target monitor=10000 on sd01-0 * Resource action: cdev-pool-0-iscsi-lun-1 monitor=10000 on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips_start_0 * Resource action: vip-164 start on sd01-0 * Resource action: vip-165 start on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips_running_0 * Resource action: vip-164 monitor=30000 on sd01-0 * Resource action: vip-165 monitor=30000 on sd01-0 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_promote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promote_0 * Resource action: vip-164-fw promote on sd01-0 * Resource action: vip-165-fw promote on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promoted_0 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promoted_0 Revised cluster status: Online: [ sd01-0 sd01-1 ] stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 Resource Group: cdev-pool-0-iscsi-export cdev-pool-0-iscsi-target (ocf::vds-ok:iSCSITarget): Started sd01-0 cdev-pool-0-iscsi-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started sd01-0 Master/Slave Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] Masters: [ sd01-0 ] - Slaves (target-role): [ sd01-1 ] + Slaves: [ sd01-1 ] Clone Set: cl-ietd [ietd] Started: [ sd01-0 sd01-1 ] Clone Set: cl-vlan1-net [vlan1-net] Started: [ sd01-0 sd01-1 ] Resource Group: cdev-pool-0-iscsi-vips vip-164 (ocf::heartbeat:IPaddr2): Started sd01-0 vip-165 (ocf::heartbeat:IPaddr2): Started sd01-0 Master/Slave Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] Masters: [ sd01-0 ] - Slaves (target-role): [ sd01-1 ] + Slaves: [ sd01-1 ] diff --git a/pengine/test10/master-reattach.summary b/pengine/test10/master-reattach.summary index b2caa4303e..008a03b2bf 100644 --- a/pengine/test10/master-reattach.summary +++ b/pengine/test10/master-reattach.summary @@ -1,33 +1,32 @@ -2 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ dktest1 dktest2 ] Master/Slave Set: ms-drbd1 [drbd1] (unmanaged) - drbd1 (ocf::heartbeat:drbd): Master dktest1 ( target-role:started, unmanaged ) - drbd1 (ocf::heartbeat:drbd): Slave dktest2 ( target-role:started, unmanaged ) + drbd1 (ocf::heartbeat:drbd): Master dktest1 ( unmanaged ) + drbd1 (ocf::heartbeat:drbd): Slave dktest2 ( unmanaged ) Resource Group: apache apache-vip (ocf::heartbeat:IPaddr2): Started dktest1 (unmanaged) mount (ocf::heartbeat:Filesystem): Started dktest1 (unmanaged) webserver (ocf::heartbeat:apache): Started dktest1 (unmanaged) Transition Summary: Executing cluster transition: * Resource action: drbd1:0 monitor=10000 on dktest1 * Resource action: drbd1:0 monitor=11000 on dktest2 * Resource action: apache-vip monitor=60000 on dktest1 * Resource action: mount monitor=10000 on dktest1 * Resource action: webserver monitor=30000 on dktest1 Revised cluster status: Online: [ dktest1 dktest2 ] Master/Slave Set: ms-drbd1 [drbd1] (unmanaged) - drbd1 (ocf::heartbeat:drbd): Master dktest1 ( target-role:started, unmanaged ) - drbd1 (ocf::heartbeat:drbd): Slave dktest2 ( target-role:started, unmanaged ) + drbd1 (ocf::heartbeat:drbd): Master dktest1 ( unmanaged ) + drbd1 (ocf::heartbeat:drbd): Slave dktest2 ( unmanaged ) Resource Group: apache apache-vip (ocf::heartbeat:IPaddr2): Started dktest1 (unmanaged) mount (ocf::heartbeat:Filesystem): Started dktest1 (unmanaged) webserver (ocf::heartbeat:apache): Started dktest1 (unmanaged) diff --git a/pengine/test10/migrate-partial-4.summary b/pengine/test10/migrate-partial-4.summary index 382d8c3b92..c3f7012212 100644 --- a/pengine/test10/migrate-partial-4.summary +++ b/pengine/test10/migrate-partial-4.summary @@ -1,126 +1,125 @@ -17 of 36 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] drbd-local (ocf::vds-ok:Ticketer): Started lustre01-left drbd-stacked (ocf::vds-ok:Ticketer): Stopped drbd-testfs-local (ocf::vds-ok:Ticketer): Stopped drbd-testfs-stacked (ocf::vds-ok:Ticketer): Stopped ip-testfs-mdt0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0001-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0002-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0003-left (ocf::heartbeat:IPaddr2): Stopped lustre (ocf::vds-ok:Ticketer): Started lustre03-left mgs (ocf::vds-ok:lustre-server): Stopped testfs (ocf::vds-ok:Ticketer): Started lustre02-left testfs-mdt0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0001 (ocf::vds-ok:lustre-server): Stopped testfs-ost0002 (ocf::vds-ok:lustre-server): Stopped testfs-ost0003 (ocf::vds-ok:lustre-server): Stopped Resource Group: booth ip-booth (ocf::heartbeat:IPaddr2): Started lustre02-left boothd (ocf::pacemaker:booth-site): Started lustre02-left Master/Slave Set: ms-drbd-mgs [drbd-mgs] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Transition Summary: * Start drbd-stacked (lustre02-left) * Start drbd-testfs-local (lustre03-left) * Migrate lustre (Started lustre03-left -> lustre04-left) * Move testfs (Started lustre02-left -> lustre03-left) * Start drbd-mgs:0 (lustre01-left) * Start drbd-mgs:1 (lustre02-left) Executing cluster transition: * Resource action: drbd-stacked start on lustre02-left * Resource action: drbd-testfs-local start on lustre03-left * Resource action: lustre migrate_to on lustre03-left * Resource action: testfs stop on lustre02-left * Resource action: testfs stop on lustre01-left * Pseudo action: ms-drbd-mgs_pre_notify_start_0 * Resource action: lustre migrate_from on lustre04-left * Resource action: lustre stop on lustre03-left * Resource action: testfs start on lustre03-left * Pseudo action: ms-drbd-mgs_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd-mgs_start_0 * Pseudo action: all_stopped * Pseudo action: lustre_start_0 * Resource action: drbd-mgs:0 start on lustre01-left * Resource action: drbd-mgs:1 start on lustre02-left * Pseudo action: ms-drbd-mgs_running_0 * Pseudo action: ms-drbd-mgs_post_notify_running_0 * Resource action: drbd-mgs:0 notify on lustre01-left * Resource action: drbd-mgs:1 notify on lustre02-left * Pseudo action: ms-drbd-mgs_confirmed-post_notify_running_0 * Resource action: drbd-mgs:0 monitor=30000 on lustre01-left * Resource action: drbd-mgs:1 monitor=30000 on lustre02-left Revised cluster status: Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] drbd-local (ocf::vds-ok:Ticketer): Started lustre01-left drbd-stacked (ocf::vds-ok:Ticketer): Started lustre02-left drbd-testfs-local (ocf::vds-ok:Ticketer): Started lustre03-left drbd-testfs-stacked (ocf::vds-ok:Ticketer): Stopped ip-testfs-mdt0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0001-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0002-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0003-left (ocf::heartbeat:IPaddr2): Stopped lustre (ocf::vds-ok:Ticketer): Started lustre04-left mgs (ocf::vds-ok:lustre-server): Stopped testfs (ocf::vds-ok:Ticketer): Started lustre03-left testfs-mdt0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0001 (ocf::vds-ok:lustre-server): Stopped testfs-ost0002 (ocf::vds-ok:lustre-server): Stopped testfs-ost0003 (ocf::vds-ok:lustre-server): Stopped Resource Group: booth ip-booth (ocf::heartbeat:IPaddr2): Started lustre02-left boothd (ocf::pacemaker:booth-site): Started lustre02-left Master/Slave Set: ms-drbd-mgs [drbd-mgs] - Slaves (target-role): [ lustre01-left lustre02-left ] + Slaves: [ lustre01-left lustre02-left ] Master/Slave Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] diff --git a/pengine/test10/order_constraint_stops_master.summary b/pengine/test10/order_constraint_stops_master.summary index abf1e21687..f1125a12a5 100644 --- a/pengine/test10/order_constraint_stops_master.summary +++ b/pengine/test10/order_constraint_stops_master.summary @@ -1,42 +1,42 @@ -3 of 2 resources DISABLED and 0 BLOCKED from being started due to failures +1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Masters: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder2 ( disabled ) Transition Summary: * Stop NATIVE_RSC_A:0 (Master fc16-builder) * Stop NATIVE_RSC_B (fc16-builder2) Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_demote_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_demote_0 * Pseudo action: MASTER_RSC_A_demote_0 * Resource action: NATIVE_RSC_A:0 demote on fc16-builder * Pseudo action: MASTER_RSC_A_demoted_0 * Pseudo action: MASTER_RSC_A_post_notify_demoted_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-post_notify_demoted_0 * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Resource action: NATIVE_RSC_B stop on fc16-builder2 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/order_constraint_stops_slave.summary b/pengine/test10/order_constraint_stops_slave.summary index a84c8632da..843d3b61a2 100644 --- a/pengine/test10/order_constraint_stops_slave.summary +++ b/pengine/test10/order_constraint_stops_slave.summary @@ -1,34 +1,34 @@ -2 of 2 resources DISABLED and 0 BLOCKED from being started due to failures +1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] - Slaves (target-role): [ fc16-builder ] + Slaves: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: * Stop NATIVE_RSC_A:0 (Slave fc16-builder) * Stop NATIVE_RSC_B (fc16-builder) Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Resource action: NATIVE_RSC_B stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/probe-2.summary b/pengine/test10/probe-2.summary index 7bd44d2da4..e8a2269ed4 100644 --- a/pengine/test10/probe-2.summary +++ b/pengine/test10/probe-2.summary @@ -1,163 +1,162 @@ -4 of 22 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby Online: [ wc01 ] Resource Group: group_www_data fs_www_data (ocf::heartbeat:Filesystem): Started wc01 nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 intip_nfs (ocf::heartbeat:IPaddr2): Started wc01 Master/Slave Set: ms_drbd_mysql [drbd_mysql] Masters: [ wc02 ] - Slaves (target-role): [ wc01 ] + Slaves: [ wc01 ] Resource Group: group_mysql fs_mysql (ocf::heartbeat:Filesystem): Started wc02 intip_sql (ocf::heartbeat:IPaddr2): Started wc02 mysql-server (ocf::heartbeat:mysql): Started wc02 Master/Slave Set: ms_drbd_www [drbd_www] Masters: [ wc01 ] - Slaves (target-role): [ wc02 ] + Slaves: [ wc02 ] Clone Set: clone_nfs-common [group_nfs-common] Started: [ wc01 wc02 ] Clone Set: clone_mysql-proxy [group_mysql-proxy] Started: [ wc01 wc02 ] Clone Set: clone_webservice [group_webservice] Started: [ wc01 wc02 ] Resource Group: group_ftpd extip_ftp (ocf::heartbeat:IPaddr2): Started wc01 pure-ftpd (ocf::heartbeat:Pure-FTPd): Started wc01 Clone Set: DoFencing [stonith_rackpdu] (unique) stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 stonith_rackpdu:1 (stonith:external/rackpdu): Started wc02 Transition Summary: * Promote drbd_mysql:0 (Slave -> Master wc01) * Demote drbd_mysql:1 (Master -> Stopped wc02) * Move fs_mysql (Started wc02 -> wc01) * Move intip_sql (Started wc02 -> wc01) * Move mysql-server (Started wc02 -> wc01) * Stop drbd_www:1 (wc02) * Stop nfs-common:1 (wc02) * Stop mysql-proxy:1 (wc02) * Stop fs_www:1 (wc02) * Stop apache2:1 (wc02) * Restart stonith_rackpdu:0 (Started wc01) * Stop stonith_rackpdu:1 (wc02) Executing cluster transition: * Resource action: drbd_mysql:0 cancel=10000 on wc01 * Pseudo action: ms_drbd_mysql_pre_notify_demote_0 * Pseudo action: group_mysql_stop_0 * Resource action: mysql-server stop on wc02 * Pseudo action: ms_drbd_www_pre_notify_stop_0 * Pseudo action: clone_mysql-proxy_stop_0 * Pseudo action: clone_webservice_stop_0 * Pseudo action: DoFencing_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_demote_0 * Resource action: intip_sql stop on wc02 * Resource action: drbd_www:0 notify on wc01 * Resource action: drbd_www:1 notify on wc02 * Pseudo action: ms_drbd_www_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_www_stop_0 * Pseudo action: group_mysql-proxy:1_stop_0 * Resource action: mysql-proxy:1 stop on wc02 * Pseudo action: group_webservice:1_stop_0 * Resource action: apache2:1 stop on wc02 * Resource action: stonith_rackpdu:0 stop on wc01 * Resource action: stonith_rackpdu:1 stop on wc02 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Resource action: fs_mysql stop on wc02 * Resource action: drbd_www:1 stop on wc02 * Pseudo action: ms_drbd_www_stopped_0 * Pseudo action: group_mysql-proxy:1_stopped_0 * Pseudo action: clone_mysql-proxy_stopped_0 * Resource action: fs_www:1 stop on wc02 * Resource action: stonith_rackpdu:0 start on wc01 * Pseudo action: DoFencing_running_0 * Pseudo action: group_mysql_stopped_0 * Pseudo action: ms_drbd_www_post_notify_stopped_0 * Pseudo action: group_webservice:1_stopped_0 * Pseudo action: clone_webservice_stopped_0 * Resource action: stonith_rackpdu:0 monitor=5000 on wc01 * Pseudo action: ms_drbd_mysql_demote_0 * Resource action: drbd_www:0 notify on wc01 * Pseudo action: ms_drbd_www_confirmed-post_notify_stopped_0 * Pseudo action: clone_nfs-common_stop_0 * Resource action: drbd_mysql:1 demote on wc02 * Pseudo action: ms_drbd_mysql_demoted_0 * Pseudo action: group_nfs-common:1_stop_0 * Resource action: nfs-common:1 stop on wc02 * Pseudo action: ms_drbd_mysql_post_notify_demoted_0 * Pseudo action: group_nfs-common:1_stopped_0 * Pseudo action: clone_nfs-common_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_mysql_pre_notify_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_mysql_stop_0 * Resource action: drbd_mysql:1 stop on wc02 * Pseudo action: ms_drbd_mysql_stopped_0 * Pseudo action: ms_drbd_mysql_post_notify_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_mysql_pre_notify_promote_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_mysql_promote_0 * Resource action: drbd_mysql:0 promote on wc01 * Pseudo action: ms_drbd_mysql_promoted_0 * Pseudo action: ms_drbd_mysql_post_notify_promoted_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_promoted_0 * Pseudo action: group_mysql_start_0 * Resource action: fs_mysql start on wc01 * Resource action: intip_sql start on wc01 * Resource action: mysql-server start on wc01 * Resource action: drbd_mysql:0 monitor=5000 on wc01 * Pseudo action: group_mysql_running_0 * Resource action: fs_mysql monitor=30000 on wc01 * Resource action: intip_sql monitor=30000 on wc01 * Resource action: mysql-server monitor=30000 on wc01 Revised cluster status: Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby Online: [ wc01 ] Resource Group: group_www_data fs_www_data (ocf::heartbeat:Filesystem): Started wc01 nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 intip_nfs (ocf::heartbeat:IPaddr2): Started wc01 Master/Slave Set: ms_drbd_mysql [drbd_mysql] Masters: [ wc01 ] Stopped: [ wc02 ] Resource Group: group_mysql fs_mysql (ocf::heartbeat:Filesystem): Started wc01 intip_sql (ocf::heartbeat:IPaddr2): Started wc01 mysql-server (ocf::heartbeat:mysql): Started wc01 Master/Slave Set: ms_drbd_www [drbd_www] Masters: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_nfs-common [group_nfs-common] Started: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_mysql-proxy [group_mysql-proxy] Started: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_webservice [group_webservice] Started: [ wc01 ] Stopped: [ wc02 ] Resource Group: group_ftpd extip_ftp (ocf::heartbeat:IPaddr2): Started wc01 pure-ftpd (ocf::heartbeat:Pure-FTPd): Started wc01 Clone Set: DoFencing [stonith_rackpdu] (unique) stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 stonith_rackpdu:1 (stonith:external/rackpdu): Stopped diff --git a/pengine/test10/rec-node-13.summary b/pengine/test10/rec-node-13.summary index 82f210a929..7c6f52b0b1 100644 --- a/pengine/test10/rec-node-13.summary +++ b/pengine/test10/rec-node-13.summary @@ -1,79 +1,79 @@ Current cluster status: Node c001n04 (9e080e6d-7a25-4dac-be89-f6f4f128623d): UNCLEAN (online) Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 - ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 - ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 - ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 + ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 + ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 Transition Summary: * Stop ocf_msdummy:6 (c001n04) Executing cluster transition: * Fencing c001n04 (reboot) * Pseudo action: stonith_complete * Pseudo action: master_rsc_1_stop_0 * Pseudo action: ocf_msdummy:6_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n04 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 + ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 - ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 - ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 - ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 + ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 + ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 diff --git a/pengine/test10/stonith-0.summary b/pengine/test10/stonith-0.summary index 4ec1ff74bd..0fcaf80cbe 100644 --- a/pengine/test10/stonith-0.summary +++ b/pengine/test10/stonith-0.summary @@ -1,109 +1,109 @@ Current cluster status: Node c001n03 (f5e1d2de-73da-432a-9d5c-37472253c2ee): UNCLEAN (online) Node c001n05 (52a5ea5e-86ee-442c-b251-0bc9825c517e): UNCLEAN (online) Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started [ c001n03 c001n05 ] heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): FAILED [ c001n03 c001n05 ] lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] Stopped: [ c001n03 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n04 - ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n04 - ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 - ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 + ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 + ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 + ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 Transition Summary: * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Recover ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move rsc_c001n05 (Started c001n05 -> c001n07) * Move rsc_c001n07 (Started c001n03 -> c001n07) Executing cluster transition: * Resource action: child_DoFencing:4 monitor=20000 on c001n08 * Fencing c001n05 (reboot) * Fencing c001n03 (reboot) * Pseudo action: stonith_complete * Pseudo action: group-1_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: rsc_c001n05_stop_0 * Pseudo action: rsc_c001n07_stop_0 * Pseudo action: heartbeat_192.168.100.182_stop_0 * Resource action: rsc_c001n05 start on c001n07 * Resource action: rsc_c001n07 start on c001n07 * Pseudo action: ocf_192.168.100.181_stop_0 * Pseudo action: ocf_192.168.100.181_stop_0 * Resource action: rsc_c001n05 monitor=5000 on c001n07 * Resource action: rsc_c001n07 monitor=5000 on c001n07 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] OFFLINE: [ c001n03 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] Stopped: [ c001n03 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 - ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n02 - ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 - ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n07 + ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 + ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n04 - ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n04 - ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 - ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Started c001n06 + ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 + ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 + ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 diff --git a/pengine/test10/stonith-1.summary b/pengine/test10/stonith-1.summary index e99bb5ec5a..589e4023d0 100644 --- a/pengine/test10/stonith-1.summary +++ b/pengine/test10/stonith-1.summary @@ -1,112 +1,112 @@ Current cluster status: Node sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): UNCLEAN (offline) Online: [ sles-1 sles-2 sles-4 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 (UNCLEAN) rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 (UNCLEAN) rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 Clone Set: DoFencing [child_DoFencing] child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN) Started: [ sles-1 sles-2 ] Stopped: [ sles-4 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped - ocf_msdummy:2 (ocf::heartbeat:Stateful): Started sles-3 (UNCLEAN) + ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN ) ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped - ocf_msdummy:5 (ocf::heartbeat:Stateful): Started sles-3 (UNCLEAN) + ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN ) ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped Transition Summary: * Start r192.168.100.183 (sles-1) * Move migrator (Started sles-3 -> sles-4) * Move rsc_sles-3 (Started sles-3 -> sles-4) * Move child_DoFencing:2 (Started sles-3 -> sles-4) * Start ocf_msdummy:0 (sles-4) * Start ocf_msdummy:1 (sles-1) * Move ocf_msdummy:2 (Slave sles-3 -> sles-2) * Start ocf_msdummy:3 (sles-4) * Start ocf_msdummy:4 (sles-1) * Move ocf_msdummy:5 (Slave sles-3 -> sles-2) Executing cluster transition: * Pseudo action: group-1_start_0 * Resource action: r192.168.100.182 monitor=5000 on sles-1 * Resource action: lsb_dummy monitor=5000 on sles-2 * Resource action: rsc_sles-2 monitor=5000 on sles-2 * Resource action: rsc_sles-4 monitor=5000 on sles-4 * Pseudo action: DoFencing_stop_0 * Fencing sles-3 (reboot) * Pseudo action: stonith_complete * Resource action: r192.168.100.183 start on sles-1 * Pseudo action: migrator_stop_0 * Pseudo action: rsc_sles-3_stop_0 * Pseudo action: child_DoFencing:2_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: group-1_running_0 * Resource action: r192.168.100.183 monitor=5000 on sles-1 * Resource action: migrator start on sles-4 * Resource action: rsc_sles-3 start on sles-4 * Resource action: child_DoFencing:2 start on sles-4 * Pseudo action: DoFencing_running_0 * Pseudo action: ocf_msdummy:2_stop_0 * Pseudo action: ocf_msdummy:5_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Resource action: migrator monitor=10000 on sles-4 * Resource action: rsc_sles-3 monitor=5000 on sles-4 * Resource action: child_DoFencing:2 monitor=60000 on sles-4 * Resource action: ocf_msdummy:0 start on sles-4 * Resource action: ocf_msdummy:1 start on sles-1 * Resource action: ocf_msdummy:2 start on sles-2 * Resource action: ocf_msdummy:3 start on sles-4 * Resource action: ocf_msdummy:4 start on sles-1 * Resource action: ocf_msdummy:5 start on sles-2 * Pseudo action: master_rsc_1_running_0 * Resource action: ocf_msdummy:0 monitor=5000 on sles-4 * Resource action: ocf_msdummy:1 monitor=5000 on sles-1 * Resource action: ocf_msdummy:2 monitor=5000 on sles-2 * Resource action: ocf_msdummy:3 monitor=5000 on sles-4 * Resource action: ocf_msdummy:4 monitor=5000 on sles-1 * Resource action: ocf_msdummy:5 monitor=5000 on sles-2 Revised cluster status: Online: [ sles-1 sles-2 sles-4 ] OFFLINE: [ sles-3 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-4 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-4 ] Stopped: [ sles-3 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) - ocf_msdummy:0 (ocf::heartbeat:Stateful): Started sles-4 - ocf_msdummy:1 (ocf::heartbeat:Stateful): Started sles-1 - ocf_msdummy:2 (ocf::heartbeat:Stateful): Started sles-2 - ocf_msdummy:3 (ocf::heartbeat:Stateful): Started sles-4 - ocf_msdummy:4 (ocf::heartbeat:Stateful): Started sles-1 - ocf_msdummy:5 (ocf::heartbeat:Stateful): Started sles-2 + ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-4 + ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-1 + ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-2 + ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-4 + ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-1 + ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/stonith-2.summary b/pengine/test10/stonith-2.summary index 169f44f416..b02c9b4e75 100644 --- a/pengine/test10/stonith-2.summary +++ b/pengine/test10/stonith-2.summary @@ -1,77 +1,77 @@ Current cluster status: Node sles-5 (434915c6-7b40-4d30-95ff-dc0ff3dc005a): UNCLEAN (offline) Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-5 (ocf::heartbeat:IPaddr): Stopped rsc_sles-6 (ocf::heartbeat:IPaddr): Started sles-6 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] Stopped: [ sles-5 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) - ocf_msdummy:0 (ocf::heartbeat:Stateful): Started sles-3 - ocf_msdummy:1 (ocf::heartbeat:Stateful): Started sles-4 - ocf_msdummy:2 (ocf::heartbeat:Stateful): Started sles-4 - ocf_msdummy:3 (ocf::heartbeat:Stateful): Started sles-1 - ocf_msdummy:4 (ocf::heartbeat:Stateful): Started sles-2 - ocf_msdummy:5 (ocf::heartbeat:Stateful): Started sles-1 + ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-3 + ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-4 + ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-4 + ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-1 + ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-2 + ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped - ocf_msdummy:8 (ocf::heartbeat:Stateful): Started sles-6 - ocf_msdummy:9 (ocf::heartbeat:Stateful): Started sles-6 - ocf_msdummy:10 (ocf::heartbeat:Stateful): Started sles-2 - ocf_msdummy:11 (ocf::heartbeat:Stateful): Started sles-3 + ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave sles-6 + ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave sles-6 + ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave sles-2 + ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave sles-3 Transition Summary: * Start rsc_sles-5 (sles-6) Executing cluster transition: * Fencing sles-5 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: rsc_sles-5 start on sles-6 * Resource action: rsc_sles-5 monitor=5000 on sles-6 Revised cluster status: Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] OFFLINE: [ sles-5 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-5 (ocf::heartbeat:IPaddr): Started sles-6 rsc_sles-6 (ocf::heartbeat:IPaddr): Started sles-6 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] Stopped: [ sles-5 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) - ocf_msdummy:0 (ocf::heartbeat:Stateful): Started sles-3 - ocf_msdummy:1 (ocf::heartbeat:Stateful): Started sles-4 - ocf_msdummy:2 (ocf::heartbeat:Stateful): Started sles-4 - ocf_msdummy:3 (ocf::heartbeat:Stateful): Started sles-1 - ocf_msdummy:4 (ocf::heartbeat:Stateful): Started sles-2 - ocf_msdummy:5 (ocf::heartbeat:Stateful): Started sles-1 + ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-3 + ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-4 + ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-4 + ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-1 + ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-2 + ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped - ocf_msdummy:8 (ocf::heartbeat:Stateful): Started sles-6 - ocf_msdummy:9 (ocf::heartbeat:Stateful): Started sles-6 - ocf_msdummy:10 (ocf::heartbeat:Stateful): Started sles-2 - ocf_msdummy:11 (ocf::heartbeat:Stateful): Started sles-3 + ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave sles-6 + ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave sles-6 + ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave sles-2 + ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave sles-3 diff --git a/pengine/test10/unmanaged-master.summary b/pengine/test10/unmanaged-master.summary index 066f139fa8..024179a727 100644 --- a/pengine/test10/unmanaged-master.summary +++ b/pengine/test10/unmanaged-master.summary @@ -1,61 +1,61 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 ] OFFLINE: [ pcmk-3 pcmk-4 ] Clone Set: Fencing [FencingChild] (unmanaged) FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged) FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged) Stopped: [ pcmk-3 pcmk-4 ] Resource Group: group-1 r192.168.122.126 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) r192.168.122.127 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) r192.168.122.128 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 (unmanaged) rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 (unmanaged) rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 (unmanaged) lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged) migrator (ocf::pacemaker:Dummy): Started pcmk-4 (unmanaged) Clone Set: Connectivity [ping-1] (unmanaged) ping-1 (ocf::pacemaker:ping): Started pcmk-2 (unmanaged) ping-1 (ocf::pacemaker:ping): Started pcmk-1 (unmanaged) Stopped: [ pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] (unmanaged) stateful-1 (ocf::pacemaker:Stateful): Master pcmk-2 (unmanaged) - stateful-1 (ocf::pacemaker:Stateful): Started pcmk-1 (unmanaged) + stateful-1 (ocf::pacemaker:Stateful): Slave pcmk-1 ( unmanaged ) Stopped: [ pcmk-3 pcmk-4 ] Transition Summary: Executing cluster transition: * Cluster action: do_shutdown on pcmk-2 * Cluster action: do_shutdown on pcmk-1 Revised cluster status: Online: [ pcmk-1 pcmk-2 ] OFFLINE: [ pcmk-3 pcmk-4 ] Clone Set: Fencing [FencingChild] (unmanaged) FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged) FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged) Stopped: [ pcmk-3 pcmk-4 ] Resource Group: group-1 r192.168.122.126 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) r192.168.122.127 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) r192.168.122.128 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 (unmanaged) rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged) rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 (unmanaged) rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 (unmanaged) lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged) migrator (ocf::pacemaker:Dummy): Started pcmk-4 (unmanaged) Clone Set: Connectivity [ping-1] (unmanaged) ping-1 (ocf::pacemaker:ping): Started pcmk-2 (unmanaged) ping-1 (ocf::pacemaker:ping): Started pcmk-1 (unmanaged) Stopped: [ pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] (unmanaged) stateful-1 (ocf::pacemaker:Stateful): Master pcmk-2 (unmanaged) - stateful-1 (ocf::pacemaker:Stateful): Started pcmk-1 (unmanaged) + stateful-1 (ocf::pacemaker:Stateful): Slave pcmk-1 ( unmanaged ) Stopped: [ pcmk-3 pcmk-4 ] diff --git a/pengine/test10/use-after-free-merge.summary b/pengine/test10/use-after-free-merge.summary index dea0ae40ed..c74af65c31 100644 --- a/pengine/test10/use-after-free-merge.summary +++ b/pengine/test10/use-after-free-merge.summary @@ -1,42 +1,42 @@ -6 of 5 resources DISABLED and 0 BLOCKED from being started due to failures +4 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ hex-13 hex-14 ] fencing-sbd (stonith:external/sbd): Stopped Resource Group: g0 d0 (ocf::heartbeat:Dummy): Stopped ( disabled ) d1 (ocf::heartbeat:Dummy): Stopped ( disabled ) Master/Slave Set: ms0 [s0] Stopped: [ hex-13 hex-14 ] Transition Summary: * Start fencing-sbd (hex-14) * Start s0:0 (hex-13) * Start s0:1 (hex-14) Executing cluster transition: * Resource action: fencing-sbd monitor on hex-14 * Resource action: fencing-sbd monitor on hex-13 * Resource action: d0 monitor on hex-14 * Resource action: d0 monitor on hex-13 * Resource action: d1 monitor on hex-14 * Resource action: d1 monitor on hex-13 * Resource action: s0:0 monitor on hex-13 * Resource action: s0:1 monitor on hex-14 * Pseudo action: ms0_start_0 * Resource action: fencing-sbd start on hex-14 * Resource action: s0:0 start on hex-13 * Resource action: s0:1 start on hex-14 * Pseudo action: ms0_running_0 Revised cluster status: Online: [ hex-13 hex-14 ] fencing-sbd (stonith:external/sbd): Started hex-14 Resource Group: g0 d0 (ocf::heartbeat:Dummy): Stopped ( disabled ) d1 (ocf::heartbeat:Dummy): Stopped ( disabled ) Master/Slave Set: ms0 [s0] - Slaves (target-role): [ hex-13 hex-14 ] + Slaves: [ hex-13 hex-14 ]