diff --git a/pengine/allocate.c b/pengine/allocate.c index 3a883ad321..8ea2ea1e2a 100644 --- a/pengine/allocate.c +++ b/pengine/allocate.c @@ -1,2481 +1,2485 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_allocate); void set_alloc_actions(pe_working_set_t * data_set); void migrate_reload_madness(pe_working_set_t * data_set); extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set); extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); static void apply_remote_node_ordering(pe_working_set_t *data_set); static enum remote_connection_state get_remote_node_state(pe_node_t *node); enum remote_connection_state { remote_state_unknown = 0, remote_state_alive = 1, remote_state_resting = 2, remote_state_failed = 3, remote_state_stopped = 4 }; resource_alloc_functions_t resource_class_alloc_functions[] = { { native_merge_weights, native_color, native_create_actions, native_create_probe, native_internal_constraints, native_rsc_colocation_lh, native_rsc_colocation_rh, native_rsc_location, native_action_flags, native_update_actions, native_expand, native_append_meta, }, { group_merge_weights, group_color, group_create_actions, native_create_probe, group_internal_constraints, group_rsc_colocation_lh, group_rsc_colocation_rh, group_rsc_location, group_action_flags, group_update_actions, group_expand, group_append_meta, }, { clone_merge_weights, clone_color, clone_create_actions, clone_create_probe, clone_internal_constraints, clone_rsc_colocation_lh, clone_rsc_colocation_rh, clone_rsc_location, clone_action_flags, container_update_actions, clone_expand, clone_append_meta, }, { master_merge_weights, master_color, master_create_actions, clone_create_probe, master_internal_constraints, clone_rsc_colocation_lh, master_rsc_colocation_rh, clone_rsc_location, clone_action_flags, container_update_actions, clone_expand, master_append_meta, }, { container_merge_weights, container_color, container_create_actions, container_create_probe, container_internal_constraints, container_rsc_colocation_lh, container_rsc_colocation_rh, container_rsc_location, container_action_flags, container_update_actions, container_expand, container_append_meta, } }; gboolean update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line) { static unsigned long calls = 0; gboolean changed = FALSE; gboolean clear = is_set(flags, pe_action_clear); enum pe_action_flags last = action->flags; if (clear) { action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags); } else { action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags); } if (last != action->flags) { calls++; changed = TRUE; /* Useful for tracking down _who_ changed a specific flag */ /* CRM_ASSERT(calls != 534); */ clear_bit(flags, pe_action_clear); crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)", action->uuid, action->node ? action->node->details->uname : "[none]", clear ? "un-" : "", flags, last, action->flags, calls, source); } return changed; } static gboolean check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry, gboolean active_here, pe_working_set_t * data_set) { int attr_lpc = 0; gboolean force_restart = FALSE; gboolean delete_resource = FALSE; gboolean changed = FALSE; const char *value = NULL; const char *old_value = NULL; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; for (; attr_lpc < DIMOF(attr_list); attr_lpc++) { value = crm_element_value(rsc->xml, attr_list[attr_lpc]); old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]); if (value == old_value /* ie. NULL */ || crm_str_eq(value, old_value, TRUE)) { continue; } changed = TRUE; trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set); if (active_here) { force_restart = TRUE; crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s", rsc->id, node->details->uname, attr_list[attr_lpc], crm_str(old_value), crm_str(value)); } } if (force_restart) { /* make sure the restart happens */ stop_action(rsc, node, FALSE); set_bit(rsc->flags, pe_rsc_start_pending); delete_resource = TRUE; } else if (changed) { delete_resource = TRUE; } return delete_resource; } static void CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node, const char *reason, pe_working_set_t * data_set) { int interval = 0; action_t *cancel = NULL; char *key = NULL; const char *task = NULL; const char *call_id = NULL; const char *interval_s = NULL; CRM_CHECK(xml_op != NULL, return); CRM_CHECK(active_node != NULL, return); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = generate_op_key(rsc->id, task, interval); crm_info("Action %s on %s will be stopped: %s", key, active_node->details->uname, reason ? reason : "unknown"); /* TODO: This looks highly dangerous if we ever try to schedule 'key' too */ cancel = custom_action(rsc, strdup(key), RSC_CANCEL, active_node, FALSE, TRUE, data_set); free(cancel->task); free(cancel->cancel_task); cancel->task = strdup(RSC_CANCEL); cancel->cancel_task = strdup(task); add_hash_param(cancel->meta, XML_LRM_ATTR_TASK, task); add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id); add_hash_param(cancel->meta, XML_LRM_ATTR_INTERVAL, interval_s); custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set); free(key); key = NULL; } static gboolean check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op, pe_working_set_t * data_set) { char *key = NULL; int interval = 0; const char *interval_s = NULL; const op_digest_cache_t *digest_data = NULL; gboolean did_change = FALSE; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *op_version; const char *digest_secure = NULL; CRM_CHECK(active_node != NULL, return FALSE); if (safe_str_eq(task, RSC_STOP)) { return FALSE; } interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if (interval > 0) { xmlNode *op_match = NULL; /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = generate_op_key(rsc->id, task, interval); pe_rsc_trace(rsc, "Checking parameters for %s", key); op_match = find_rsc_op_entry(rsc, key); if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) { CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set); free(key); return TRUE; } else if (op_match == NULL) { pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname); free(key); return TRUE; } free(key); key = NULL; } crm_trace("Testing %s_%s_%d on %s", rsc->id, task, interval, active_node->details->uname); if (interval == 0 && safe_str_eq(task, RSC_STATUS)) { /* Reload based on the start action not a probe */ task = RSC_START; } else if (interval == 0 && safe_str_eq(task, RSC_MIGRATED)) { /* Reload based on the start action not a migrate */ task = RSC_START; } else if (interval == 0 && safe_str_eq(task, RSC_PROMOTE)) { /* Reload based on the start action not a promote */ task = RSC_START; } op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set); if(is_set(data_set->flags, pe_flag_sanitized)) { digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST); } if(digest_data->rc != RSC_DIGEST_MATCH && digest_secure && digest_data->digest_secure_calc && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) { fprintf(stdout, "Only 'private' parameters to %s_%s_%d on %s changed: %s\n", rsc->id, task, interval, active_node->details->uname, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); } else if (digest_data->rc == RSC_DIGEST_RESTART) { /* Changes that force a restart */ const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); did_change = TRUE; key = generate_op_key(rsc->id, task, interval); crm_log_xml_info(digest_data->params_restart, "params:restart"); pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", key, active_node->details->uname, crm_str(digest_restart), digest_data->digest_restart_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set); trigger_unfencing(rsc, NULL, "Device parameters changed", NULL, data_set); } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) { /* Changes that can potentially be handled by a reload */ const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); const char *digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); did_change = TRUE; trigger_unfencing(rsc, NULL, "Device parameters changed (reload)", NULL, data_set); crm_log_xml_info(digest_data->params_all, "params:reload"); key = generate_op_key(rsc->id, task, interval); pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (reload:%s) %s", key, active_node->details->uname, crm_str(digest_all), digest_data->digest_all_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); if (interval > 0) { action_t *op = NULL; #if 0 /* Always reload/restart the entire resource */ ReloadRsc(rsc, active_node, data_set); #else /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */ op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_reschedule); #endif } else if (digest_restart && rsc->isolation_wrapper == NULL && (uber_parent(rsc))->isolation_wrapper == NULL) { pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id); /* Reload this resource */ ReloadRsc(rsc, active_node, data_set); free(key); } else { pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id); /* Re-send the start/demote/promote op * Recurring ops will be detected independently */ custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set); } } return did_change; } static void check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set) { GListPtr gIter = NULL; int offset = -1; int interval = 0; int stop_index = 0; int start_index = 0; const char *task = NULL; const char *interval_s = NULL; xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; gboolean is_probe = FALSE; gboolean did_change = FALSE; CRM_CHECK(node != NULL, return); if (is_set(rsc->flags, pe_rsc_orphan)) { resource_t *parent = uber_parent(rsc); if(parent == NULL || pe_rsc_is_clone(parent) == FALSE || is_set(parent->flags, pe_rsc_unique)) { pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id); DeleteRsc(rsc, node, FALSE, data_set); } else { pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id); } return; } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) { if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s", rsc->id, node->details->uname); return; } pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname); if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; offset++; if (start_index < stop_index) { /* stopped */ continue; } else if (offset < start_index) { /* action occurred prior to a start */ continue; } is_probe = FALSE; did_change = FALSE; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if (interval == 0 && safe_str_eq(task, RSC_STATUS)) { is_probe = TRUE; } if (interval > 0 && (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); } else if (is_probe || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || interval > 0 || safe_str_eq(task, RSC_MIGRATED)) { did_change = check_action_definition(rsc, node, rsc_op, data_set); } if (did_change && get_failcount(node, rsc, NULL, data_set)) { char *key = NULL; action_t *action_clear = NULL; key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); action_clear = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set); set_bit(action_clear->flags, pe_action_runnable); crm_notice("Clearing failure of %s on %s " "because action definition changed " CRM_XS " %s", rsc->id, node->details->uname, action_clear->uuid); } } g_list_free(sorted_op_list); } static GListPtr find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones, gboolean partial, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean match = FALSE; if (id == NULL) { return NULL; } else if (rsc == NULL && data_set) { for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } return result; } else if (rsc == NULL) { return NULL; } if (partial) { if (strstr(rsc->id, id)) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) { match = TRUE; } } else { if (strcmp(rsc->id, id) == 0) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } } if (match) { result = g_list_prepend(result, rsc); } if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } } return result; } static void check_actions(pe_working_set_t * data_set) { const char *id = NULL; node_t *node = NULL; xmlNode *lrm_rscs = NULL; xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); xmlNode *node_state = NULL; for (node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { id = crm_element_value(node_state, XML_ATTR_ID); lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE); node = pe_find_node_id(data_set->nodes, id); if (node == NULL) { continue; /* Still need to check actions for a maintenance node to cancel existing monitor operations */ } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) { crm_trace("Skipping param check for %s: can't run resources", node->details->uname); continue; } crm_trace("Processing node %s", node->details->uname); if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if (xml_has_children(rsc_entry)) { GListPtr gIter = NULL; GListPtr result = NULL; const char *rsc_id = ID(rsc_entry); CRM_CHECK(rsc_id != NULL, return); result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set); for (gIter = result; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->variant != pe_native) { continue; } check_actions_for(rsc_entry, rsc, node, data_set); } g_list_free(result); } } } } } } } static gboolean apply_placement_constraints(pe_working_set_t * data_set) { GListPtr gIter = NULL; crm_trace("Applying constraints..."); for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { rsc_to_node_t *cons = (rsc_to_node_t *) gIter->data; cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons); } return TRUE; } static gboolean failcount_clear_action_exists(node_t * node, resource_t * rsc) { gboolean rc = FALSE; char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); GListPtr list = find_actions_exact(rsc->actions, key, node); if (list) { rc = TRUE; } g_list_free(list); free(key); return rc; } /*! * \internal * \brief Force resource away if failures hit migration threshold * * \param[in,out] rsc Resource to check for failures * \param[in,out] node Node to check for failures * \param[in,out] data_set Cluster working set to update */ static void check_migration_threshold(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { int fail_count, countdown; resource_t *failed; /* Migration threshold of 0 means never force away */ if (rsc->migration_threshold == 0) { return; } /* If there are no failures, there's no need to force away */ fail_count = get_failcount_all(node, rsc, NULL, data_set); if (fail_count <= 0) { return; } /* How many more times recovery will be tried on this node */ countdown = QB_MAX(rsc->migration_threshold - fail_count, 0); /* If failed resource has a parent, we'll force the parent away */ failed = rsc; if (is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(rsc); } if (countdown == 0) { resource_location(failed, node, -INFINITY, "__fail_limit__", data_set); crm_warn("Forcing %s away from %s after %d failures (max=%d)", failed->id, node->details->uname, fail_count, rsc->migration_threshold); } else { crm_info("%s can fail %d more times on %s before being forced off", failed->id, countdown, node->details->uname); } } static void common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; common_apply_stickiness(child_rsc, node, data_set); } return; } if (is_set(rsc->flags, pe_rsc_managed) && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) { node_t *current = pe_find_node_id(rsc->running_on, node->details->id); node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (current == NULL) { } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) { resource_t *sticky_rsc = rsc; resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set); pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location" " (node=%s, weight=%d)", sticky_rsc->id, node->details->uname, rsc->stickiness); } else { GHashTableIter iter; node_t *nIter = NULL; pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric" " and node %s is not explicitly allowed", rsc->id, node->details->uname); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) { crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight); } } } /* Check the migration threshold only if a failcount clear action * has not already been placed for this resource on the node. * There is no sense in potentially forcing the resource from this * node if the failcount is being reset anyway. */ if (failcount_clear_action_exists(node, rsc) == FALSE) { check_migration_threshold(rsc, node, data_set); } } void complex_set_cmds(resource_t * rsc) { GListPtr gIter = rsc->children; rsc->cmds = &resource_class_alloc_functions[rsc->variant]; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; complex_set_cmds(child_rsc); } } void set_alloc_actions(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; complex_set_cmds(rsc); } } static void calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data) { const char *key = (const char *)gKey; const char *value = (const char *)gValue; int *system_health = (int *)user_data; if (!gKey || !gValue || !user_data) { return; } /* Does it start with #health? */ if (0 == strncmp(key, "#health", 7)) { int score; /* Convert the value into an integer */ score = char2score(value); /* Add it to the running total */ *system_health = merge_weights(score, *system_health); } } static gboolean apply_system_health(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy"); int base_health = 0; if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) { /* Prevent any accidental health -> score translation */ node_score_red = 0; node_score_yellow = 0; node_score_green = 0; return TRUE; } else if (safe_str_eq(health_strategy, "migrate-on-red")) { /* Resources on nodes which have health values of red are * weighted away from that node. */ node_score_red = -INFINITY; node_score_yellow = 0; node_score_green = 0; } else if (safe_str_eq(health_strategy, "only-green")) { /* Resources on nodes which have health values of red or yellow * are forced away from that node. */ node_score_red = -INFINITY; node_score_yellow = -INFINITY; node_score_green = 0; } else if (safe_str_eq(health_strategy, "progressive")) { /* Same as the above, but use the r/y/g scores provided by the user * Defaults are provided by the pe_prefs table * Also, custom health "base score" can be used */ base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0"); } else if (safe_str_eq(health_strategy, "custom")) { /* Requires the admin to configure the rsc_location constaints for * processing the stored health scores */ /* TODO: Check for the existence of appropriate node health constraints */ return TRUE; } else { crm_err("Unknown node health strategy: %s", health_strategy); return FALSE; } crm_info("Applying automated node health strategy: %s", health_strategy); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int system_health = base_health; node_t *node = (node_t *) gIter->data; /* Search through the node hash table for system health entries. */ g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health); crm_info(" Node %s has an combined system health of %d", node->details->uname, system_health); /* If the health is non-zero, then create a new rsc2node so that the * weight will be added later on. */ if (system_health != 0) { GListPtr gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set); } } } return TRUE; } gboolean stage0(pe_working_set_t * data_set) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); if (data_set->input == NULL) { return FALSE; } if (is_set(data_set->flags, pe_flag_have_status) == FALSE) { crm_trace("Calculating status"); cluster_status(data_set); } set_alloc_actions(data_set); apply_system_health(data_set); unpack_constraints(cib_constraints, data_set); return TRUE; } /* * Check nodes for resources started outside of the LRM */ gboolean probe_resources(pe_working_set_t * data_set) { action_t *probe_node_complete = NULL; for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; const char *probed = g_hash_table_lookup(node->details->attrs, CRM_OP_PROBED); if (is_container_remote_node(node)) { /* TODO enable guest node probes once ordered probing is implemented */ continue; } else if (node->details->online == FALSE && node->details->remote_rsc) { enum remote_connection_state state = get_remote_node_state(node); if(state == remote_state_failed) { pe_fence_node(data_set, node, "the connection is unrecoverable"); } continue; } else if(node->details->online == FALSE) { continue; } else if (node->details->unclean) { continue; } else if (node->details->rsc_discovery_enabled == FALSE) { /* resource discovery is disabled for this node */ continue; } if (probed != NULL && crm_is_true(probed) == FALSE) { action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname), CRM_OP_REPROBE, node, FALSE, TRUE, data_set); add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); continue; } for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set); } } return TRUE; } static void rsc_discover_filter(resource_t *rsc, node_t *node) { GListPtr gIter = rsc->children; resource_t *top = uber_parent(rsc); node_t *match; if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) { return; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_discover_filter(child_rsc, node); } match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match && match->rsc_discover_mode != discover_exclusive) { match->weight = -INFINITY; } } /* * Count how many valid nodes we have (so we know the maximum number of * colors we can resolve). * * Apply node constraints (ie. filter the "allowed_nodes" part of resources */ gboolean stage2(pe_working_set_t * data_set) { GListPtr gIter = NULL; crm_trace("Applying placement constraints"); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node == NULL) { /* error */ } else if (node->weight >= 0.0 /* global weight */ && node->details->online && node->details->type != node_ping) { data_set->max_valid_nodes++; } } apply_placement_constraints(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; node_t *node = (node_t *) gIter->data; gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; common_apply_stickiness(rsc, node, data_set); rsc_discover_filter(rsc, node); } } return TRUE; } /* * Create internal resource constraints before allocation */ gboolean stage3(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; rsc->cmds->internal_constraints(rsc, data_set); } return TRUE; } /* * Check for orphaned or redefined actions */ gboolean stage4(pe_working_set_t * data_set) { check_actions(data_set); return TRUE; } static gint sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data) { int rc = 0; int r1_weight = -INFINITY; int r2_weight = -INFINITY; const char *reason = "existence"; const GListPtr nodes = (GListPtr) data; resource_t *resource1 = (resource_t *) convert_const_pointer(a); resource_t *resource2 = (resource_t *) convert_const_pointer(b); node_t *r1_node = NULL; node_t *r2_node = NULL; GListPtr gIter = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; if (a == NULL && b == NULL) { goto done; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } reason = "priority"; r1_weight = resource1->priority; r2_weight = resource2->priority; if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "no node list"; if (nodes == NULL) { goto done; } r1_nodes = rsc_merge_weights(resource1, resource1->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes); r2_nodes = rsc_merge_weights(resource2, resource2->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes); /* Current location score */ reason = "current location"; r1_weight = -INFINITY; r2_weight = -INFINITY; if (resource1->running_on) { r1_node = g_list_nth_data(resource1->running_on, 0); r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id); if (r1_node != NULL) { r1_weight = r1_node->weight; } } if (resource2->running_on) { r2_node = g_list_nth_data(resource2->running_on, 0); r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id); if (r2_node != NULL) { r2_weight = r2_node->weight; } } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "score"; for (gIter = nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; r1_node = NULL; r2_node = NULL; r1_weight = -INFINITY; if (r1_nodes) { r1_node = g_hash_table_lookup(r1_nodes, node->details->id); } if (r1_node) { r1_weight = r1_node->weight; } r2_weight = -INFINITY; if (r2_nodes) { r2_node = g_hash_table_lookup(r2_nodes, node->details->id); } if (r2_node) { r2_weight = r2_node->weight; } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } } done: crm_trace("%s (%d) on %s %c %s (%d) on %s: %s", resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a", rc < 0 ? '>' : rc > 0 ? '<' : '=', resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason); if (r1_nodes) { g_hash_table_destroy(r1_nodes); } if (r2_nodes) { g_hash_table_destroy(r2_nodes); } return rc; } static void allocate_resources(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_have_remote_nodes)) { /* Force remote connection resources to be allocated first. This * also forces any colocation dependencies to be allocated as well */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->is_remote_node == FALSE) { continue; } pe_rsc_trace(rsc, "Allocating: %s", rsc->id); /* For remote node connection resources, always prefer the partial * migration target during resource allocation, if the rsc is in the * middle of a migration. */ rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set); } } /* now do the rest of the resources */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->is_remote_node == TRUE) { continue; } pe_rsc_trace(rsc, "Allocating: %s", rsc->id); rsc->cmds->allocate(rsc, NULL, data_set); } } static void cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { return; } /* Don't recurse into ->children, those are just unallocated clone instances */ if(is_not_set(rsc->flags, pe_rsc_orphan)) { return; } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node->details->online && get_failcount(node, rsc, NULL, data_set)) { char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set); add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); pe_rsc_info(rsc, "Clearing failure of %s on %s because it is orphaned " CRM_XS " %s", rsc->id, node->details->uname, clear_op->uuid); custom_action_order(rsc, NULL, clear_op, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional, data_set); } } } gboolean stage5(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr nodes = g_list_copy(data_set->nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); data_set->resources = g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes); g_list_free(nodes); } gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node); } crm_trace("Allocating services"); /* Take (next) highest resource, assign it and create its actions */ allocate_resources(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node); } if (is_set(data_set->flags, pe_flag_startup_probes)) { crm_trace("Calculating needed probes"); /* This code probably needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=100: With probes: ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints 36s ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph Without probes: ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph */ probe_resources(data_set); } crm_trace("Handle orphans"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; cleanup_orphans(rsc, data_set); } crm_trace("Creating actions"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; rsc->cmds->create_actions(rsc, data_set); } crm_trace("Creating done"); return TRUE; } static gboolean is_managed(const resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_managed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (is_managed(child_rsc)) { return TRUE; } } return FALSE; } static gboolean any_managed_resources(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (is_managed(rsc)) { return TRUE; } } return FALSE; } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * * \param[in] node Guest node to fence * \param[in] done STONITH_DONE operation * \param[in] data_set Working set of CIB state */ static void fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set) { resource_t *container = node->details->remote_rsc->container; pe_action_t *stop = NULL; pe_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (container recovery). */ const char *fence_action = "off"; /* Check whether guest's container resource is has any explicit stop or * start (the stop may be implied by fencing of the guest's host). */ if (container) { stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL); if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) { fence_action = "reboot"; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and crmd can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, data_set); update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable, __FUNCTION__, __LINE__); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if (stop) { order_actions(stop, stonith_op, pe_order_runnable_left|pe_order_implies_then); crm_info("Implying guest node %s is down (action %d) " "after container %s is stopped (action %d)", node->details->uname, stonith_op->id, container->id, stop->id); } else { crm_info("Implying guest node %s is down (action %d) ", node->details->uname, stonith_op->id); } /* @TODO: Order pseudo-fence after any (optional) fence of guest's host */ /* Order/imply other actions relative to pseudo-fence as with real fence */ stonith_constraints(node, stonith_op, data_set); order_actions(stonith_op, done, pe_order_implies_then); } /* * Create dependencies for stonith and shutdown operations */ gboolean stage6(pe_working_set_t * data_set) { action_t *dc_down = NULL; action_t *dc_fence = NULL; action_t *stonith_op = NULL; action_t *last_stonith = NULL; gboolean integrity_lost = FALSE; action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *done = get_pseudo_op(STONITH_DONE, data_set); gboolean need_stonith = TRUE; GListPtr gIter; GListPtr stonith_ops = NULL; /* Remote ordering constraints need to happen prior to calculate * fencing because it is one more place we will mark the node as * dirty. * * A nice side-effect of doing it first is that we can remove a * bunch of special logic from apply_*_ordering() because its * already part of pe_fence_node() */ crm_trace("Creating remote ordering constraints"); apply_remote_node_ordering(data_set); crm_trace("Processing fencing and shutdown cases"); if (any_managed_resources(data_set) == FALSE) { crm_notice("Delaying fencing operations until there are resources to manage"); need_stonith = FALSE; } /* Check each node for stonith/shutdown */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* Guest nodes are "fenced" by recovering their container resource, * so handle them separately. */ if (is_container_remote_node(node)) { if (node->details->remote_requires_reset && need_stonith) { fence_guest(node, done, data_set); } continue; } stonith_op = NULL; if (node->details->unclean && need_stonith && pe_can_fence(data_set, node)) { pe_warn("Scheduling Node %s for STONITH", node->details->uname); stonith_op = pe_fence_op(node, NULL, FALSE, data_set); stonith_constraints(node, stonith_op, data_set); if (node->details->is_dc) { dc_down = stonith_op; dc_fence = stonith_op; } else if (is_set(data_set->flags, pe_flag_concurrent_fencing) == FALSE) { if (last_stonith) { order_actions(last_stonith, stonith_op, pe_order_optional); } last_stonith = stonith_op; } else { order_actions(stonith_op, done, pe_order_implies_then); stonith_ops = g_list_append(stonith_ops, stonith_op); } } else if (node->details->online && node->details->shutdown && /* TODO define what a shutdown op means for a remote node. * For now we do not send shutdown operations for remote nodes, but * if we can come up with a good use for this in the future, we will. */ is_remote_node(node) == FALSE) { action_t *down_op = NULL; crm_notice("Scheduling Node %s for shutdown", node->details->uname); down_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname), CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set); shutdown_constraints(node, down_op, data_set); add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); if (node->details->is_dc) { dc_down = down_op; } } if (node->details->unclean && stonith_op == NULL) { integrity_lost = TRUE; pe_warn("Node %s is unclean!", node->details->uname); } } if (integrity_lost) { if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED"); pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE"); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) { crm_notice("Cannot fence unclean nodes until quorum is" " attained (or no-quorum-policy is set to ignore)"); } } if (dc_down != NULL) { GListPtr gIter = NULL; crm_trace("Ordering shutdowns before %s on %s (DC)", dc_down->task, dc_down->node->details->uname); add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *node_stop = (action_t *) gIter->data; if (safe_str_neq(CRM_OP_SHUTDOWN, node_stop->task)) { continue; } else if (node_stop->node->details->is_dc) { continue; } crm_debug("Ordering shutdown on %s before %s on %s", node_stop->node->details->uname, dc_down->task, dc_down->node->details->uname); order_actions(node_stop, dc_down, pe_order_optional); } if (last_stonith) { if (dc_down != last_stonith) { order_actions(last_stonith, dc_down, pe_order_optional); } } else { GListPtr gIter2 = NULL; for (gIter2 = stonith_ops; gIter2 != NULL; gIter2 = gIter2->next) { stonith_op = (action_t *) gIter2->data; if (dc_down != stonith_op) { order_actions(stonith_op, dc_down, pe_order_optional); } } } } if (dc_fence) { order_actions(dc_down, done, pe_order_implies_then); } else if (last_stonith) { order_actions(last_stonith, done, pe_order_implies_then); } order_actions(done, all_stopped, pe_order_implies_then); g_list_free(stonith_ops); return TRUE; } /* * Determine the sets of independent actions and the correct order for the * actions in each set. * * Mark dependencies of un-runnable actions un-runnable * */ static GListPtr find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if (list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *tmp = NULL; char *task = NULL; int interval = 0; if (parse_op_key(original_key, &tmp, &task, &interval)) { key = generate_op_key(rsc->id, task, interval); /* crm_err("looking up %s instead of %s", key, original_key); */ /* slist_iter(action, action_t, actions, lpc, */ /* crm_err(" - %s", action->uuid)); */ list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } free(key); free(tmp); free(task); } return list; } static void rsc_order_then(action_t * lh_action, resource_t * rsc, order_constraint_t * order) { GListPtr gIter = NULL; GListPtr rh_actions = NULL; action_t *rh_action = NULL; enum pe_ordering type = order->type; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); rh_action = order->rh_action; crm_trace("Processing RH of ordering constraint %d", order->id); if (rh_action != NULL) { rh_actions = g_list_prepend(NULL, rh_action); } else if (rsc != NULL) { rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task); } if (rh_actions == NULL) { pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id, order->rh_action_task); if (lh_action) { pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid); } return; } if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) { pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid, order->rh_action_task); clear_bit(type, pe_order_implies_then); } gIter = rh_actions; for (; gIter != NULL; gIter = gIter->next) { action_t *rh_action_iter = (action_t *) gIter->data; if (lh_action) { order_actions(lh_action, rh_action_iter, type); } else if (type & pe_order_implies_then) { update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type); } } g_list_free(rh_actions); } static void rsc_order_first(resource_t * lh_rsc, order_constraint_t * order, pe_working_set_t * data_set) { GListPtr gIter = NULL; GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_trace("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if (lh_action != NULL) { lh_actions = g_list_prepend(NULL, lh_action); } else if (lh_action == NULL) { lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task); } if (lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *rsc_id = NULL; char *op_type = NULL; int interval = 0; parse_op_key(order->lh_action_task, &rsc_id, &op_type, &interval); key = generate_op_key(lh_rsc->id, op_type, interval); if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else { pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); lh_actions = g_list_prepend(NULL, lh_action); } free(op_type); free(rsc_id); } gIter = lh_actions; for (; gIter != NULL; gIter = gIter->next) { action_t *lh_action_iter = (action_t *) gIter->data; if (rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if (rh_rsc) { rsc_order_then(lh_action_iter, rh_rsc, order); } else if (order->rh_action) { order_actions(lh_action_iter, order->rh_action, order->type); } } g_list_free(lh_actions); } extern gboolean update_action(action_t * action); extern void update_colo_start_chain(action_t * action); static int is_recurring_action(action_t *action) { const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); int interval = crm_parse_int(interval_s, "0"); if(interval > 0) { return TRUE; } return FALSE; } static void apply_container_ordering(action_t *action, pe_working_set_t *data_set) { /* VMs are also classified as containers for these purposes... in * that they both involve a 'thing' running on a real or remote * cluster node. * * This allows us to be smarter about the type and extent of * recovery actions required in various scenarios */ resource_t *remote_rsc = NULL; resource_t *container = NULL; enum action_tasks task = text2task(action->task); if (action->rsc == NULL) { return; } CRM_ASSERT(action->node); CRM_ASSERT(is_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); container = remote_rsc->container; CRM_ASSERT(container); if(is_set(container->flags, pe_rsc_failed)) { pe_fence_node(data_set, action->node, "container failed"); } crm_trace("Order %s action %s relative to %s%s for %s%s", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, is_set(container->flags, pe_rsc_failed)? "failed " : "", container->id); switch (task) { case start_rsc: case action_promote: /* Force resource recovery if the container is recovered */ custom_action_order(container, generate_op_key(container->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_implies_then | pe_order_runnable_left, data_set); /* Wait for the connection resource to be up too */ custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_runnable_left, data_set); break; case stop_rsc: if(is_set(container->flags, pe_rsc_failed)) { /* When the container representing a guest node fails, * the stop action for all the resources living in * that container is implied by the container * stopping. This is similar to how fencing operations * work for cluster nodes. */ } else { /* Otherwise, ensure the operation happens before the connection is brought down */ custom_action_order(action->rsc, NULL, action, remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL, pe_order_preserve, data_set); } break; case action_demote: if(is_set(container->flags, pe_rsc_failed)) { /* Just like a stop, the demote is implied by the * container having failed/stopped * * If we really wanted to we would order the demote * after the stop, IFF the containers current role was * stopped (otherwise we re-introduce an ordering * loop) */ } else { /* Otherwise, ensure the operation happens before the connection is brought down */ custom_action_order(action->rsc, NULL, action, remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL, pe_order_preserve, data_set); } break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ if(task != no_action) { custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_runnable_left | pe_order_implies_then, data_set); } } else { custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_runnable_left, data_set); } break; } } static enum remote_connection_state get_remote_node_state(pe_node_t *node) { resource_t *remote_rsc = NULL; node_t *cluster_node = NULL; if(node == NULL) { return remote_state_unknown; } remote_rsc = node->details->remote_rsc; CRM_ASSERT(remote_rsc); if(remote_rsc->running_on) { cluster_node = remote_rsc->running_on->data; } /* If the cluster node the remote connection resource resides on * is unclean or went offline, we can't process any operations * on that remote node until after it starts elsewhere. */ if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) { /* There is nowhere left to run the connection resource, * and the resource is in a failed state (either directly * or because it is located on a failed node). * * If there are any resources known to be active on it (stop), * or if there are resources in an unknown state (probe), we * must assume the worst and fence it. */ if (is_set(remote_rsc->flags, pe_rsc_failed)) { return remote_state_failed; } else if(cluster_node && cluster_node->details->unclean) { return remote_state_failed; } else { return remote_state_stopped; } } else if (cluster_node == NULL) { /* Connection is recoverable but not currently running anywhere, see if we can recover it first */ return remote_state_unknown; } else if(cluster_node->details->unclean == TRUE || cluster_node->details->online == FALSE) { /* Connection is running on a dead node, see if we can recover it first */ return remote_state_resting; } else if (g_list_length(remote_rsc->running_on) > 1 && remote_rsc->partial_migration_source && remote_rsc->partial_migration_target) { /* We're in the middle of migrating a connection resource, * wait until after the resource migrates before performing * any actions. */ return remote_state_resting; } return remote_state_alive; } static void apply_remote_ordering(action_t *action, pe_working_set_t *data_set) { resource_t *remote_rsc = NULL; node_t *cluster_node = NULL; enum action_tasks task = text2task(action->task); enum remote_connection_state state = get_remote_node_state(action->node); enum pe_ordering order_opts = pe_order_none; if (action->rsc == NULL) { return; } CRM_ASSERT(action->node); CRM_ASSERT(is_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); if(remote_rsc->running_on) { cluster_node = remote_rsc->running_on->data; } crm_trace("Order %s action %s relative to %s%s (state %d)", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, state); switch (task) { case start_rsc: case action_promote: /* This as an internally generated constraint exempt from * user constraint prohibitions, and this action isn't runnable * if the connection start isn't runnable. */ order_opts = pe_order_preserve | pe_order_runnable_left; if (state == remote_state_failed) { /* Force recovery, by making this action required */ order_opts |= pe_order_implies_then; } /* Ensure connection is up before running this action */ custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, order_opts, data_set); break; case stop_rsc: /* Handle special case with remote node where stop actions need to be * ordered after the connection resource starts somewhere else. */ if(state == remote_state_resting) { /* Wait for the connection resource to be up and assume everything is as we left it */ custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_runnable_left, data_set); } else { if(state == remote_state_failed) { /* We would only be here if the resource is * running on the remote node. Since we have no * way to stop it, it is necessary to fence the * node. */ pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable"); } custom_action_order(action->rsc, NULL, action, remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL, pe_order_preserve | pe_order_implies_first, data_set); } break; case action_demote: /* Only order this demote relative to the connection start if the * connection isn't being torn down. Otherwise, the demote would be * blocked because the connection start would not be allowed. */ if(state == remote_state_resting || state == remote_state_unknown) { custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve, data_set); } /* Otherwise we can rely on the stop ordering */ break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_runnable_left | pe_order_implies_then, data_set); } else { if(task == monitor_rsc && state == remote_state_failed) { /* We would only be here if we do not know the * state of the resource on the remote node. * Since we have no way to find out, it is * necessary to fence the node. */ pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable"); } if(cluster_node && state == remote_state_stopped) { /* The connection is currently up, but is going * down permanently. * * Make sure we check services are actually * stopped _before_ we let the connection get * closed */ custom_action_order(action->rsc, NULL, action, remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL, pe_order_preserve | pe_order_runnable_left, data_set); } else { custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL, action->rsc, NULL, action, pe_order_preserve | pe_order_runnable_left, data_set); } } break; } } static void apply_remote_node_ordering(pe_working_set_t *data_set) { if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) { return; } for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->rsc == NULL) { continue; } /* Special case. */ if (action->rsc && action->rsc->is_remote_node && safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { /* If we are clearing the failcount of an actual remote node * connection resource, then make sure this happens before allowing * the connection to start if we are planning on starting the * connection during this transition. */ custom_action_order(action->rsc, NULL, action, action->rsc, generate_op_key(action->rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); continue; } /* If the action occurs on a Pacemaker Remote node, create * ordering constraints that guarantee the action occurs while the node * is active (after start, before stop ... things like that). */ if (action->node == NULL || is_remote_node(action->node) == FALSE || action->node->details->remote_rsc == NULL || is_set(action->flags, pe_action_pseudo)) { crm_trace("Nothing required for %s on %s", action->uuid, action->node?action->node->details->uname:"NA"); } else if(action->node->details->remote_rsc->container) { crm_trace("Container ordering for %s", action->uuid); apply_container_ordering(action, data_set); } else { crm_trace("Remote ordering for %s", action->uuid); apply_remote_ordering(action, data_set); } } } static void order_probes(pe_working_set_t * data_set) { #if 0 GListPtr gIter = NULL; for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; /* Given "A then B", we would prefer to wait for A to be * started before probing B. * * If A was a filesystem on which the binaries and data for B * lived, it would have been useful if the author of B's agent * could assume that A is running before B.monitor will be * called. * * However we can't _only_ probe once A is running, otherwise * we'd not detect the state of B if A could not be started * for some reason. * * In practice however, we cannot even do an opportunistic * version of this because B may be moving: * * B.probe -> B.start * B.probe -> B.stop * B.stop -> B.start * A.stop -> A.start * A.start -> B.probe * * So far so good, but if we add the result of this code: * * B.stop -> A.stop * * Then we get a loop: * * B.probe -> B.stop -> A.stop -> A.start -> B.probe * * We could kill the 'B.probe -> B.stop' dependency, but that * could mean stopping B "too" soon, because B.start must wait * for the probes to complete. * * Another option is to allow it only if A is a non-unique * clone with clone-max == node-max (since we'll never be * moving it). However, we could still be stopping one * instance at the same time as starting another. * The complexity of checking for allowed conditions combined * with the ever narrowing usecase suggests that this code * should remain disabled until someone gets smarter. */ action_t *start = NULL; GListPtr actions = NULL; GListPtr probes = NULL; char *key = NULL; key = start_key(rsc); actions = find_actions(rsc->actions, key, NULL); free(key); if (actions) { start = actions->data; g_list_free(actions); } if(start == NULL) { crm_err("No start action for %s", rsc->id); continue; } key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0); probes = find_actions(rsc->actions, key, NULL); free(key); for (actions = start->actions_before; actions != NULL; actions = actions->next) { action_wrapper_t *before = (action_wrapper_t *) actions->data; GListPtr pIter = NULL; action_t *first = before->action; resource_t *first_rsc = first->rsc; if(first->required_runnable_before) { GListPtr clone_actions = NULL; for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) { before = (action_wrapper_t *) clone_actions->data; crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid); CRM_ASSERT(before->action->rsc); first_rsc = before->action->rsc; break; } } else if(safe_str_neq(first->task, RSC_START)) { crm_trace("Not a start op %s for %s", first->uuid, start->uuid); } if(first_rsc == NULL) { continue; } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) { crm_trace("Same parent %s for %s", first_rsc->id, start->uuid); continue; } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) { crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid); continue; } crm_err("Appplying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant); for (pIter = probes; pIter != NULL; pIter = pIter->next) { action_t *probe = (action_t *) pIter->data; crm_err("Ordering %s before %s", first->uuid, probe->uuid); order_actions(first, probe, pe_order_optional); } } } #endif } gboolean stage7(pe_working_set_t * data_set) { GListPtr gIter = NULL; crm_trace("Applying ordering constraints"); /* Don't ask me why, but apparently they need to be processed in * the order they were created in... go figure * * Also g_list_append() has horrendous performance characteristics * So we need to use g_list_prepend() and then reverse the list here */ data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints); for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) { order_constraint_t *order = (order_constraint_t *) gIter->data; resource_t *rsc = order->lh_rsc; crm_trace("Applying ordering constraint: %d", order->id); if (rsc != NULL) { crm_trace("rsc_action-to-*"); rsc_order_first(rsc, order, data_set); continue; } rsc = order->rh_rsc; if (rsc != NULL) { crm_trace("action-to-rsc_action"); rsc_order_then(order->lh_action, rsc, order); } else { crm_trace("action-to-action"); order_actions(order->lh_action, order->rh_action, order->type); } } for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; update_colo_start_chain(action); } crm_trace("Ordering probes"); order_probes(data_set); crm_trace("Updating %d actions", g_list_length(data_set->actions)); for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; update_action(action); } LogNodeActions(data_set, FALSE); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; LogActions(rsc, data_set, FALSE); } return TRUE; } int transition_id = -1; /* * Create a dependency graph to send to the transitioner (via the CRMd) */ gboolean stage8(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *value = NULL; transition_id++; crm_trace("Creating transition graph %d.", transition_id); data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH); value = pe_pref(data_set->config_hash, "cluster-delay"); crm_xml_add(data_set->graph, "cluster-delay", value); value = pe_pref(data_set->config_hash, "stonith-timeout"); crm_xml_add(data_set->graph, "stonith-timeout", value); crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY"); if (is_set(data_set->flags, pe_flag_start_failure_fatal)) { crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(data_set->graph, "failed-start-offset", "1"); } value = pe_pref(data_set->config_hash, "batch-limit"); crm_xml_add(data_set->graph, "batch-limit", value); crm_xml_add_int(data_set->graph, "transition_id", transition_id); value = pe_pref(data_set->config_hash, "migration-limit"); if (crm_int_helper(value, NULL) > 0) { crm_xml_add(data_set->graph, "migration-limit", value); } /* errors... slist_iter(action, action_t, action_list, lpc, if(action->optional == FALSE && action->runnable == FALSE) { print_action("Ignoring", action, TRUE); } ); */ gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id); rsc->cmds->expand(rsc, data_set); } crm_log_xml_trace(data_set->graph, "created resource-driven action list"); /* pseudo action to distribute list of nodes with maintenance state update */ add_maintenance_update(data_set); /* catch any non-resource specific actions */ crm_trace("processing non-resource actions"); gIter = data_set->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->rsc && action->node && action->node->details->shutdown && is_not_set(action->rsc->flags, pe_rsc_maintenance) && is_not_set(action->flags, pe_action_optional) && is_not_set(action->flags, pe_action_runnable) && crm_str_eq(action->task, RSC_STOP, TRUE) ) { /* Eventually we should just ignore the 'fence' case * But for now it's the best way to detect (in CTS) when * CIB resource updates are being lost */ if (is_set(data_set->flags, pe_flag_have_quorum) || data_set->no_quorum_policy == no_quorum_ignore) { crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)", action->node->details->unclean ? "fence" : "shut down", action->node->details->uname, action->rsc->id, is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked", is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "", action->uuid); } } graph_element_from_action(action, data_set); } crm_log_xml_trace(data_set->graph, "created generic action list"); crm_trace("Created transition graph %d.", transition_id); return TRUE; } void LogNodeActions(pe_working_set_t * data_set, gboolean terminal) { GListPtr gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { char *node_name = NULL; - const char *task = NULL; + char *task = NULL; action_t *action = (action_t *) gIter->data; if (action->rsc != NULL) { continue; + } else if (is_set(action->flags, pe_action_optional)) { + continue; } if (is_container_remote_node(action->node)) { node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id); } else if(action->node) { node_name = crm_strdup_printf("%s", action->node->details->uname); } if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { - task = "Shutdown"; + task = strdup("Shutdown"); } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { - task = "Fence"; + const char *op = g_hash_table_lookup(action->meta, "stonith_action"); + task = crm_strdup_printf("Fence (%s)", op); } if(task == NULL) { /* Nothing to report */ } else if(terminal) { printf(" * %s %s\n", task, node_name); } else { crm_notice(" * %s %s\n", task, node_name); } free(node_name); + free(task); } } void cleanup_alloc_calculations(pe_working_set_t * data_set) { if (data_set == NULL) { return; } crm_trace("deleting %d order cons: %p", g_list_length(data_set->ordering_constraints), data_set->ordering_constraints); pe_free_ordering(data_set->ordering_constraints); data_set->ordering_constraints = NULL; crm_trace("deleting %d node cons: %p", g_list_length(data_set->placement_constraints), data_set->placement_constraints); pe_free_rsc_to_node(data_set->placement_constraints); data_set->placement_constraints = NULL; crm_trace("deleting %d inter-resource cons: %p", g_list_length(data_set->colocation_constraints), data_set->colocation_constraints); g_list_free_full(data_set->colocation_constraints, free); data_set->colocation_constraints = NULL; crm_trace("deleting %d ticket deps: %p", g_list_length(data_set->ticket_constraints), data_set->ticket_constraints); g_list_free_full(data_set->ticket_constraints, free); data_set->ticket_constraints = NULL; cleanup_calculations(data_set); } diff --git a/pengine/test10/594.summary b/pengine/test10/594.summary index d9fe8c163f..ef2a02ca7f 100644 --- a/pengine/test10/594.summary +++ b/pengine/test10/594.summary @@ -1,56 +1,56 @@ Current cluster status: Node hadev3 (879e65f8-4b38-4c56-9552-4752ad436669): UNCLEAN (offline) Online: [ hadev1 hadev2 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 child_DoFencing:1 (stonith:ssh): Started hadev1 child_DoFencing:2 (stonith:ssh): Started hadev1 Transition Summary: - * Fence hadev3 + * Fence (reboot) hadev3 * Shutdown hadev2 * Move DcIPaddr (Started hadev2 -> hadev1) * Move rsc_hadev2 (Started hadev2 -> hadev1) * Stop child_DoFencing:0 (hadev2) * Stop child_DoFencing:2 (hadev1) Executing cluster transition: * Resource action: DcIPaddr monitor on hadev1 * Resource action: rsc_hadev3 monitor on hadev2 * Resource action: rsc_hadev2 monitor on hadev1 * Resource action: child_DoFencing:0 monitor on hadev1 * Resource action: child_DoFencing:2 monitor on hadev2 * Pseudo action: DoFencing_stop_0 * Fencing hadev3 (reboot) * Pseudo action: stonith_complete * Resource action: DcIPaddr stop on hadev2 * Resource action: rsc_hadev2 stop on hadev2 * Resource action: child_DoFencing:0 stop on hadev2 * Resource action: child_DoFencing:2 stop on hadev1 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on hadev2 * Pseudo action: all_stopped * Resource action: DcIPaddr start on hadev1 * Resource action: rsc_hadev2 start on hadev1 * Resource action: DcIPaddr monitor=5000 on hadev1 * Resource action: rsc_hadev2 monitor=5000 on hadev1 Revised cluster status: Online: [ hadev1 hadev2 ] OFFLINE: [ hadev3 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started hadev1 child_DoFencing:2 (stonith:ssh): Stopped diff --git a/pengine/test10/829.summary b/pengine/test10/829.summary index a9d25e01f6..556699f104 100644 --- a/pengine/test10/829.summary +++ b/pengine/test10/829.summary @@ -1,64 +1,64 @@ Current cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n01 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 (UNCLEAN) rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n08 Transition Summary: - * Fence c001n02 + * Fence (reboot) c001n02 * Move rsc_c001n02 (Started c001n02 -> c001n01) * Stop child_DoFencing:0 (c001n02) Executing cluster transition: * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:0 monitor on c001n01 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Fencing c001n02 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc_c001n02_stop_0 * Pseudo action: DoFencing_stop_0 * Resource action: rsc_c001n02 start on c001n01 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: all_stopped * Resource action: rsc_c001n02 monitor=5000 on c001n01 Revised cluster status: Online: [ c001n01 c001n03 c001n08 ] OFFLINE: [ c001n02 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n08 diff --git a/pengine/test10/bug-5186-partial-migrate.summary b/pengine/test10/bug-5186-partial-migrate.summary index 63ed2d7565..a32e81da21 100644 --- a/pengine/test10/bug-5186-partial-migrate.summary +++ b/pengine/test10/bug-5186-partial-migrate.summary @@ -1,91 +1,91 @@ Current cluster status: Node bl460g1n7 (3232261593): UNCLEAN (offline) Online: [ bl460g1n6 bl460g1n8 ] prmDummy (ocf::pacemaker:Dummy): Started bl460g1n7 (UNCLEAN) prmVM2 (ocf::heartbeat:VirtualDomain): Migrating bl460g1n7 (UNCLEAN) Resource Group: grpStonith6 prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 Resource Group: grpStonith7 prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 Resource Group: grpStonith8 prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n7 (UNCLEAN) prmStonith8-2 (stonith:external/ipmi): Started bl460g1n7 (UNCLEAN) Clone Set: clnDiskd1 [prmDiskd1] prmDiskd1 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Clone Set: clnDiskd2 [prmDiskd2] prmDiskd2 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Clone Set: clnPing [prmPing] prmPing (ocf::pacemaker:ping): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Transition Summary: - * Fence bl460g1n7 + * Fence (reboot) bl460g1n7 * Move prmDummy (Started bl460g1n7 -> bl460g1n6) * Move prmVM2 (Started bl460g1n7 -> bl460g1n8) * Move prmStonith8-1 (Started bl460g1n7 -> bl460g1n6) * Move prmStonith8-2 (Started bl460g1n7 -> bl460g1n6) * Stop prmDiskd1:0 (bl460g1n7) * Stop prmDiskd2:0 (bl460g1n7) * Stop prmPing:0 (bl460g1n7) Executing cluster transition: * Resource action: prmVM2 stop on bl460g1n6 * Pseudo action: grpStonith8_stop_0 * Pseudo action: prmStonith8-2_stop_0 * Fencing bl460g1n7 (reboot) * Pseudo action: stonith_complete * Pseudo action: prmDummy_stop_0 * Pseudo action: prmVM2_stop_0 * Pseudo action: prmStonith8-1_stop_0 * Pseudo action: clnDiskd1_stop_0 * Pseudo action: clnDiskd2_stop_0 * Pseudo action: clnPing_stop_0 * Resource action: prmDummy start on bl460g1n6 * Resource action: prmVM2 start on bl460g1n8 * Pseudo action: grpStonith8_stopped_0 * Pseudo action: grpStonith8_start_0 * Resource action: prmStonith8-1 start on bl460g1n6 * Resource action: prmStonith8-2 start on bl460g1n6 * Pseudo action: prmDiskd1_stop_0 * Pseudo action: clnDiskd1_stopped_0 * Pseudo action: prmDiskd2_stop_0 * Pseudo action: clnDiskd2_stopped_0 * Pseudo action: prmPing_stop_0 * Pseudo action: clnPing_stopped_0 * Pseudo action: all_stopped * Resource action: prmVM2 monitor=10000 on bl460g1n8 * Pseudo action: grpStonith8_running_0 * Resource action: prmStonith8-1 monitor=10000 on bl460g1n6 * Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6 Revised cluster status: Online: [ bl460g1n6 bl460g1n8 ] OFFLINE: [ bl460g1n7 ] prmDummy (ocf::pacemaker:Dummy): Started bl460g1n6 prmVM2 (ocf::heartbeat:VirtualDomain): Started bl460g1n8 Resource Group: grpStonith6 prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 Resource Group: grpStonith7 prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 Resource Group: grpStonith8 prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith8-2 (stonith:external/ipmi): Started bl460g1n6 Clone Set: clnDiskd1 [prmDiskd1] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] Clone Set: clnDiskd2 [prmDiskd2] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] Clone Set: clnPing [prmPing] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] diff --git a/pengine/test10/bug-cl-5247.summary b/pengine/test10/bug-cl-5247.summary index 8ea3ff015c..f70a9eaab0 100644 --- a/pengine/test10/bug-cl-5247.summary +++ b/pengine/test10/bug-cl-5247.summary @@ -1,101 +1,101 @@ Using the original execution date of: 2015-08-12 02:53:40Z Current cluster status: Online: [ bl460g8n3 bl460g8n4 ] Containers: [ pgsr01:prmDB1 ] prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 prmDB2 (ocf::heartbeat:VirtualDomain): FAILED bl460g8n4 Resource Group: grpStonith1 prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 Resource Group: grpStonith2 prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 Resource Group: master-group vip-master (ocf::heartbeat:Dummy): FAILED pgsr02 vip-rep (ocf::heartbeat:Dummy): FAILED pgsr02 Master/Slave Set: msPostgresql [pgsql] Masters: [ pgsr01 ] Stopped: [ bl460g8n3 bl460g8n4 ] Transition Summary: - * Fence pgsr02 (resource: prmDB2) + * Fence (off) pgsr02 (resource: prmDB2) * Stop prmDB2 (bl460g8n4) * Restart prmStonith1-2 (Started bl460g8n4) * Restart prmStonith2-2 (Started bl460g8n3) * Recover vip-master (Started pgsr02 -> pgsr01) * Recover vip-rep (Started pgsr02 -> pgsr01) * Demote pgsql:0 (Master -> Stopped pgsr02) * Stop pgsr02 (bl460g8n4) Executing cluster transition: * Pseudo action: grpStonith1_stop_0 * Resource action: prmStonith1-2 stop on bl460g8n4 * Pseudo action: grpStonith2_stop_0 * Resource action: prmStonith2-2 stop on bl460g8n3 * Pseudo action: msPostgresql_pre_notify_demote_0 * Resource action: pgsr01 monitor on bl460g8n4 * Resource action: pgsr02 monitor on bl460g8n3 * Pseudo action: grpStonith1_stopped_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stopped_0 * Pseudo action: grpStonith2_start_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 * Pseudo action: msPostgresql_demote_0 * Resource action: pgsr02 stop on bl460g8n4 * Resource action: prmDB2 stop on bl460g8n4 * Pseudo action: stonith-pgsr02-off on pgsr02 * Pseudo action: stonith_complete * Pseudo action: pgsql_post_notify_stop_0 * Pseudo action: pgsql_demote_0 * Pseudo action: msPostgresql_demoted_0 * Pseudo action: msPostgresql_post_notify_demoted_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: master-group_stop_0 * Pseudo action: vip-rep_stop_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: vip-master_stop_0 * Pseudo action: pgsql_stop_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: master-group_stopped_0 * Pseudo action: master-group_start_0 * Resource action: vip-master start on pgsr01 * Resource action: vip-rep start on pgsr01 * Pseudo action: msPostgresql_post_notify_stopped_0 * Pseudo action: master-group_running_0 * Resource action: vip-master monitor=10000 on pgsr01 * Resource action: vip-rep monitor=10000 on pgsr01 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 * Pseudo action: pgsql_notified_0 * Resource action: pgsql monitor=9000 on pgsr01 * Pseudo action: all_stopped * Resource action: prmStonith1-2 start on bl460g8n4 * Resource action: prmStonith1-2 monitor=3600000 on bl460g8n4 * Resource action: prmStonith2-2 start on bl460g8n3 * Resource action: prmStonith2-2 monitor=3600000 on bl460g8n3 * Pseudo action: grpStonith1_running_0 * Pseudo action: grpStonith2_running_0 Using the original execution date of: 2015-08-12 02:53:40Z Revised cluster status: Online: [ bl460g8n3 bl460g8n4 ] Containers: [ pgsr01:prmDB1 ] prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 prmDB2 (ocf::heartbeat:VirtualDomain): FAILED Resource Group: grpStonith1 prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 Resource Group: grpStonith2 prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 Resource Group: master-group vip-master (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ] vip-rep (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ] Master/Slave Set: msPostgresql [pgsql] Masters: [ pgsr01 ] Stopped: [ bl460g8n3 bl460g8n4 ] diff --git a/pengine/test10/bug-lf-2508.summary b/pengine/test10/bug-lf-2508.summary index 7b436d4309..7931d19fe1 100644 --- a/pengine/test10/bug-lf-2508.summary +++ b/pengine/test10/bug-lf-2508.summary @@ -1,112 +1,112 @@ Current cluster status: Node srv02 (71085d5e-1c63-49e0-8c8c-400d610b4182): UNCLEAN (offline) Online: [ srv01 srv03 srv04 ] Resource Group: Group01 Dummy01 (ocf::heartbeat:Dummy): Stopped Resource Group: Group02 Dummy02 (ocf::heartbeat:Dummy): Started srv02 (UNCLEAN) Resource Group: Group03 Dummy03 (ocf::heartbeat:Dummy): Started srv03 Clone Set: clnStonith1 [grpStonith1] Resource Group: grpStonith1:1 prmStonith1-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith1-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Started: [ srv03 srv04 ] Stopped: [ srv01 ] Clone Set: clnStonith2 [grpStonith2] Started: [ srv01 srv03 srv04 ] Stopped: [ srv02 ] Clone Set: clnStonith3 [grpStonith3] Resource Group: grpStonith3:0 prmStonith3-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith3-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith3:1 prmStonith3-1 (stonith:external/stonith-helper): Started srv01 prmStonith3-3 (stonith:external/ssh): Stopped Started: [ srv04 ] Stopped: [ srv03 ] Clone Set: clnStonith4 [grpStonith4] Resource Group: grpStonith4:1 prmStonith4-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith4-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Started: [ srv01 srv03 ] Stopped: [ srv04 ] Transition Summary: - * Fence srv02 + * Fence (reboot) srv02 * Start Dummy01 (srv01) * Move Dummy02 (Started srv02 -> srv04) * Stop prmStonith1-1:1 (srv02) * Stop prmStonith1-3:1 (srv02) * Stop prmStonith3-1:0 (srv02) * Stop prmStonith3-3:0 (srv02) * Start prmStonith3-3:1 (srv01) * Stop prmStonith4-1:1 (srv02) * Stop prmStonith4-3:1 (srv02) Executing cluster transition: * Pseudo action: Group01_start_0 * Resource action: prmStonith3-1:1 monitor=3600000 on srv01 * Fencing srv02 (reboot) * Pseudo action: stonith_complete * Resource action: Dummy01 start on srv01 * Pseudo action: Group02_stop_0 * Pseudo action: Dummy02_stop_0 * Pseudo action: clnStonith1_stop_0 * Pseudo action: clnStonith3_stop_0 * Pseudo action: clnStonith4_stop_0 * Pseudo action: Group01_running_0 * Resource action: Dummy01 monitor=10000 on srv01 * Pseudo action: Group02_stopped_0 * Pseudo action: Group02_start_0 * Resource action: Dummy02 start on srv04 * Pseudo action: grpStonith1:1_stop_0 * Pseudo action: prmStonith1-3:1_stop_0 * Pseudo action: grpStonith3:0_stop_0 * Pseudo action: prmStonith3-3:1_stop_0 * Pseudo action: grpStonith4:1_stop_0 * Pseudo action: prmStonith4-3:1_stop_0 * Pseudo action: Group02_running_0 * Resource action: Dummy02 monitor=10000 on srv04 * Pseudo action: prmStonith1-1:1_stop_0 * Pseudo action: prmStonith3-1:1_stop_0 * Pseudo action: prmStonith4-1:1_stop_0 * Pseudo action: all_stopped * Pseudo action: grpStonith1:1_stopped_0 * Pseudo action: clnStonith1_stopped_0 * Pseudo action: grpStonith3:0_stopped_0 * Pseudo action: clnStonith3_stopped_0 * Pseudo action: clnStonith3_start_0 * Pseudo action: grpStonith4:1_stopped_0 * Pseudo action: clnStonith4_stopped_0 * Pseudo action: grpStonith3:1_start_0 * Resource action: prmStonith3-3:1 start on srv01 * Pseudo action: grpStonith3:1_running_0 * Resource action: prmStonith3-3:1 monitor=3600000 on srv01 * Pseudo action: clnStonith3_running_0 Revised cluster status: Online: [ srv01 srv03 srv04 ] OFFLINE: [ srv02 ] Resource Group: Group01 Dummy01 (ocf::heartbeat:Dummy): Started srv01 Resource Group: Group02 Dummy02 (ocf::heartbeat:Dummy): Started srv04 Resource Group: Group03 Dummy03 (ocf::heartbeat:Dummy): Started srv03 Clone Set: clnStonith1 [grpStonith1] Started: [ srv03 srv04 ] Stopped: [ srv01 srv02 ] Clone Set: clnStonith2 [grpStonith2] Started: [ srv01 srv03 srv04 ] Stopped: [ srv02 ] Clone Set: clnStonith3 [grpStonith3] Started: [ srv01 srv04 ] Stopped: [ srv02 srv03 ] Clone Set: clnStonith4 [grpStonith4] Started: [ srv01 srv03 ] Stopped: [ srv02 srv04 ] diff --git a/pengine/test10/bug-lf-2551.summary b/pengine/test10/bug-lf-2551.summary index ffb7c6d933..ef2e54a457 100644 --- a/pengine/test10/bug-lf-2551.summary +++ b/pengine/test10/bug-lf-2551.summary @@ -1,226 +1,226 @@ Current cluster status: Node hex-9: UNCLEAN (offline) Online: [ hex-0 hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Started hex-0 Clone Set: base-clone [base-group] Resource Group: base-group:3 dlm (ocf::pacemaker:controld): Started hex-9 (UNCLEAN) o2cb (ocf::ocfs2:o2cb): Started hex-9 (UNCLEAN) clvm (ocf::lvm2:clvmd): Started hex-9 (UNCLEAN) cmirrord (ocf::lvm2:cmirrord): Started hex-9 (UNCLEAN) vg1 (ocf::heartbeat:LVM): Started hex-9 (UNCLEAN) ocfs2-1 (ocf::heartbeat:Filesystem): Started hex-9 (UNCLEAN) Started: [ hex-0 hex-7 hex-8 ] vm-01 (ocf::heartbeat:Xen): Started hex-7 vm-02 (ocf::heartbeat:Xen): Started hex-8 vm-03 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-04 (ocf::heartbeat:Xen): Started hex-7 vm-05 (ocf::heartbeat:Xen): Started hex-8 fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN) vm-06 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-07 (ocf::heartbeat:Xen): Started hex-7 vm-08 (ocf::heartbeat:Xen): Started hex-8 vm-09 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-10 (ocf::heartbeat:Xen): Started hex-0 vm-11 (ocf::heartbeat:Xen): Started hex-7 vm-12 (ocf::heartbeat:Xen): Started hex-8 vm-13 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-14 (ocf::heartbeat:Xen): Started hex-0 vm-15 (ocf::heartbeat:Xen): Started hex-7 vm-16 (ocf::heartbeat:Xen): Started hex-8 vm-17 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-18 (ocf::heartbeat:Xen): Started hex-0 vm-19 (ocf::heartbeat:Xen): Started hex-7 vm-20 (ocf::heartbeat:Xen): Started hex-8 vm-21 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-22 (ocf::heartbeat:Xen): Started hex-0 vm-23 (ocf::heartbeat:Xen): Started hex-7 vm-24 (ocf::heartbeat:Xen): Started hex-8 vm-25 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-26 (ocf::heartbeat:Xen): Started hex-0 vm-27 (ocf::heartbeat:Xen): Started hex-7 vm-28 (ocf::heartbeat:Xen): Started hex-8 vm-29 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-30 (ocf::heartbeat:Xen): Started hex-0 vm-31 (ocf::heartbeat:Xen): Started hex-7 vm-32 (ocf::heartbeat:Xen): Started hex-8 dummy1 (ocf::heartbeat:Dummy): Started hex-9 (UNCLEAN) vm-33 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-34 (ocf::heartbeat:Xen): Started hex-0 vm-35 (ocf::heartbeat:Xen): Started hex-7 vm-36 (ocf::heartbeat:Xen): Started hex-8 vm-37 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-38 (ocf::heartbeat:Xen): Started hex-0 vm-39 (ocf::heartbeat:Xen): Started hex-7 vm-40 (ocf::heartbeat:Xen): Started hex-8 vm-41 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-42 (ocf::heartbeat:Xen): Started hex-0 vm-43 (ocf::heartbeat:Xen): Started hex-7 vm-44 (ocf::heartbeat:Xen): Started hex-8 vm-45 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-46 (ocf::heartbeat:Xen): Started hex-0 vm-47 (ocf::heartbeat:Xen): Started hex-7 vm-48 (ocf::heartbeat:Xen): Started hex-8 vm-49 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-50 (ocf::heartbeat:Xen): Started hex-0 vm-51 (ocf::heartbeat:Xen): Started hex-7 vm-52 (ocf::heartbeat:Xen): Started hex-8 vm-53 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-54 (ocf::heartbeat:Xen): Started hex-0 vm-55 (ocf::heartbeat:Xen): Started hex-7 vm-56 (ocf::heartbeat:Xen): Started hex-8 vm-57 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-58 (ocf::heartbeat:Xen): Started hex-0 vm-59 (ocf::heartbeat:Xen): Started hex-7 vm-60 (ocf::heartbeat:Xen): Started hex-8 vm-61 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-62 (ocf::heartbeat:Xen): Stopped vm-63 (ocf::heartbeat:Xen): Stopped vm-64 (ocf::heartbeat:Xen): Stopped Transition Summary: - * Fence hex-9 + * Fence (reboot) hex-9 * Move fencing-sbd (Started hex-9 -> hex-0) * Move dummy1 (Started hex-9 -> hex-0) * Stop dlm:3 (hex-9) * Stop o2cb:3 (hex-9) * Stop clvm:3 (hex-9) * Stop cmirrord:3 (hex-9) * Stop vg1:3 (hex-9) * Stop ocfs2-1:3 (hex-9) * Stop vm-03 (hex-9) * Stop vm-06 (hex-9) * Stop vm-09 (hex-9) * Stop vm-13 (hex-9) * Stop vm-17 (hex-9) * Stop vm-21 (hex-9) * Stop vm-25 (hex-9) * Stop vm-29 (hex-9) * Stop vm-33 (hex-9) * Stop vm-37 (hex-9) * Stop vm-41 (hex-9) * Stop vm-45 (hex-9) * Stop vm-49 (hex-9) * Stop vm-53 (hex-9) * Stop vm-57 (hex-9) * Stop vm-61 (hex-9) Executing cluster transition: * Pseudo action: fencing-sbd_stop_0 * Resource action: dummy1 monitor=300000 on hex-8 * Resource action: dummy1 monitor=300000 on hex-7 * Fencing hex-9 (reboot) * Pseudo action: stonith_complete * Pseudo action: load_stopped_hex-8 * Pseudo action: load_stopped_hex-7 * Pseudo action: load_stopped_hex-0 * Resource action: fencing-sbd start on hex-0 * Pseudo action: dummy1_stop_0 * Pseudo action: vm-03_stop_0 * Pseudo action: vm-06_stop_0 * Pseudo action: vm-09_stop_0 * Pseudo action: vm-13_stop_0 * Pseudo action: vm-17_stop_0 * Pseudo action: vm-21_stop_0 * Pseudo action: vm-25_stop_0 * Pseudo action: vm-29_stop_0 * Pseudo action: vm-33_stop_0 * Pseudo action: vm-37_stop_0 * Pseudo action: vm-41_stop_0 * Pseudo action: vm-45_stop_0 * Pseudo action: vm-49_stop_0 * Pseudo action: vm-53_stop_0 * Pseudo action: vm-57_stop_0 * Pseudo action: vm-61_stop_0 * Pseudo action: load_stopped_hex-9 * Resource action: dummy1 start on hex-0 * Pseudo action: base-clone_stop_0 * Resource action: dummy1 monitor=30000 on hex-0 * Pseudo action: base-group:3_stop_0 * Pseudo action: ocfs2-1:3_stop_0 * Pseudo action: vg1:3_stop_0 * Pseudo action: cmirrord:3_stop_0 * Pseudo action: clvm:3_stop_0 * Pseudo action: o2cb:3_stop_0 * Pseudo action: dlm:3_stop_0 * Pseudo action: all_stopped * Pseudo action: base-group:3_stopped_0 * Pseudo action: base-clone_stopped_0 Revised cluster status: Online: [ hex-0 hex-7 hex-8 ] OFFLINE: [ hex-9 ] vm-00 (ocf::heartbeat:Xen): Started hex-0 Clone Set: base-clone [base-group] Started: [ hex-0 hex-7 hex-8 ] Stopped: [ hex-9 ] vm-01 (ocf::heartbeat:Xen): Started hex-7 vm-02 (ocf::heartbeat:Xen): Started hex-8 vm-03 (ocf::heartbeat:Xen): Stopped vm-04 (ocf::heartbeat:Xen): Started hex-7 vm-05 (ocf::heartbeat:Xen): Started hex-8 fencing-sbd (stonith:external/sbd): Started hex-0 vm-06 (ocf::heartbeat:Xen): Stopped vm-07 (ocf::heartbeat:Xen): Started hex-7 vm-08 (ocf::heartbeat:Xen): Started hex-8 vm-09 (ocf::heartbeat:Xen): Stopped vm-10 (ocf::heartbeat:Xen): Started hex-0 vm-11 (ocf::heartbeat:Xen): Started hex-7 vm-12 (ocf::heartbeat:Xen): Started hex-8 vm-13 (ocf::heartbeat:Xen): Stopped vm-14 (ocf::heartbeat:Xen): Started hex-0 vm-15 (ocf::heartbeat:Xen): Started hex-7 vm-16 (ocf::heartbeat:Xen): Started hex-8 vm-17 (ocf::heartbeat:Xen): Stopped vm-18 (ocf::heartbeat:Xen): Started hex-0 vm-19 (ocf::heartbeat:Xen): Started hex-7 vm-20 (ocf::heartbeat:Xen): Started hex-8 vm-21 (ocf::heartbeat:Xen): Stopped vm-22 (ocf::heartbeat:Xen): Started hex-0 vm-23 (ocf::heartbeat:Xen): Started hex-7 vm-24 (ocf::heartbeat:Xen): Started hex-8 vm-25 (ocf::heartbeat:Xen): Stopped vm-26 (ocf::heartbeat:Xen): Started hex-0 vm-27 (ocf::heartbeat:Xen): Started hex-7 vm-28 (ocf::heartbeat:Xen): Started hex-8 vm-29 (ocf::heartbeat:Xen): Stopped vm-30 (ocf::heartbeat:Xen): Started hex-0 vm-31 (ocf::heartbeat:Xen): Started hex-7 vm-32 (ocf::heartbeat:Xen): Started hex-8 dummy1 (ocf::heartbeat:Dummy): Started hex-0 vm-33 (ocf::heartbeat:Xen): Stopped vm-34 (ocf::heartbeat:Xen): Started hex-0 vm-35 (ocf::heartbeat:Xen): Started hex-7 vm-36 (ocf::heartbeat:Xen): Started hex-8 vm-37 (ocf::heartbeat:Xen): Stopped vm-38 (ocf::heartbeat:Xen): Started hex-0 vm-39 (ocf::heartbeat:Xen): Started hex-7 vm-40 (ocf::heartbeat:Xen): Started hex-8 vm-41 (ocf::heartbeat:Xen): Stopped vm-42 (ocf::heartbeat:Xen): Started hex-0 vm-43 (ocf::heartbeat:Xen): Started hex-7 vm-44 (ocf::heartbeat:Xen): Started hex-8 vm-45 (ocf::heartbeat:Xen): Stopped vm-46 (ocf::heartbeat:Xen): Started hex-0 vm-47 (ocf::heartbeat:Xen): Started hex-7 vm-48 (ocf::heartbeat:Xen): Started hex-8 vm-49 (ocf::heartbeat:Xen): Stopped vm-50 (ocf::heartbeat:Xen): Started hex-0 vm-51 (ocf::heartbeat:Xen): Started hex-7 vm-52 (ocf::heartbeat:Xen): Started hex-8 vm-53 (ocf::heartbeat:Xen): Stopped vm-54 (ocf::heartbeat:Xen): Started hex-0 vm-55 (ocf::heartbeat:Xen): Started hex-7 vm-56 (ocf::heartbeat:Xen): Started hex-8 vm-57 (ocf::heartbeat:Xen): Stopped vm-58 (ocf::heartbeat:Xen): Started hex-0 vm-59 (ocf::heartbeat:Xen): Started hex-7 vm-60 (ocf::heartbeat:Xen): Started hex-8 vm-61 (ocf::heartbeat:Xen): Stopped vm-62 (ocf::heartbeat:Xen): Stopped vm-63 (ocf::heartbeat:Xen): Stopped vm-64 (ocf::heartbeat:Xen): Stopped diff --git a/pengine/test10/bug-lf-2606.summary b/pengine/test10/bug-lf-2606.summary index ab93bb35bc..f30a053c77 100644 --- a/pengine/test10/bug-lf-2606.summary +++ b/pengine/test10/bug-lf-2606.summary @@ -1,45 +1,45 @@ 1 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node node2: UNCLEAN (online) Online: [ node1 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node2 ( disabled ) rsc2 (ocf::pacemaker:Dummy): Started node2 Master/Slave Set: ms3 [rsc3] Masters: [ node2 ] Slaves: [ node1 ] Transition Summary: - * Fence node2 + * Fence (reboot) node2 * Stop rsc1 (node2) * Move rsc2 (Started node2 -> node1) * Demote rsc3:1 (Master -> Stopped node2) Executing cluster transition: * Pseudo action: ms3_demote_0 * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc3:1_demote_0 * Pseudo action: ms3_demoted_0 * Pseudo action: ms3_stop_0 * Resource action: rsc2 start on node1 * Pseudo action: rsc3:1_stop_0 * Pseudo action: ms3_stopped_0 * Pseudo action: all_stopped * Resource action: rsc2 monitor=10000 on node1 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped ( disabled ) rsc2 (ocf::pacemaker:Dummy): Started node1 Master/Slave Set: ms3 [rsc3] Slaves: [ node1 ] Stopped: [ node2 ] diff --git a/pengine/test10/bug-rh-1097457.summary b/pengine/test10/bug-rh-1097457.summary index 8fe474a164..1c355c06d5 100644 --- a/pengine/test10/bug-rh-1097457.summary +++ b/pengine/test10/bug-rh-1097457.summary @@ -1,105 +1,105 @@ 2 of 26 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ lama2 lama3 ] Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 VM1 (ocf::heartbeat:VirtualDomain): Started lama2 FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1 FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1 VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3 VM3 (ocf::heartbeat:VirtualDomain): Started lama3 FSlun3 (ocf::heartbeat:Filesystem): FAILED lamaVM2 FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3 FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3 Resource Group: lamaVM1-G1 FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G2 FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G3 FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM2-G4 FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2 FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2 Clone Set: FAKE6-clone [FAKE6] Started: [ lamaVM1 lamaVM2 lamaVM3 ] Transition Summary: - * Fence lamaVM2 (resource: VM2) + * Fence (reboot) lamaVM2 (resource: VM2) * Recover VM2 (Started lama3) * Recover FSlun3 (Started lamaVM2 -> lama2) * Restart FAKE4 (Started lamaVM2) * Restart FAKE4-IP (Started lamaVM2) * Restart FAKE6:2 (Started lamaVM2) * Restart lamaVM2 (Started lama3) Executing cluster transition: * Resource action: lamaVM2 stop on lama3 * Resource action: VM2 stop on lama3 * Pseudo action: stonith-lamaVM2-reboot on lamaVM2 * Pseudo action: stonith_complete * Resource action: VM2 start on lama3 * Resource action: VM2 monitor=10000 on lama3 * Pseudo action: lamaVM2-G4_stop_0 * Pseudo action: FAKE4-IP_stop_0 * Pseudo action: FAKE6-clone_stop_0 * Resource action: lamaVM2 start on lama3 * Resource action: lamaVM2 monitor=30000 on lama3 * Resource action: FSlun3 monitor=10000 on lamaVM2 * Pseudo action: FAKE4_stop_0 * Pseudo action: FAKE6_stop_0 * Pseudo action: FAKE6-clone_stopped_0 * Pseudo action: FAKE6-clone_start_0 * Pseudo action: lamaVM2-G4_stopped_0 * Resource action: FAKE6 start on lamaVM2 * Resource action: FAKE6 monitor=30000 on lamaVM2 * Pseudo action: FAKE6-clone_running_0 * Pseudo action: FSlun3_stop_0 * Pseudo action: all_stopped * Resource action: FSlun3 start on lama2 * Pseudo action: lamaVM2-G4_start_0 * Resource action: FAKE4 start on lamaVM2 * Resource action: FAKE4 monitor=30000 on lamaVM2 * Resource action: FAKE4-IP start on lamaVM2 * Resource action: FAKE4-IP monitor=30000 on lamaVM2 * Resource action: FSlun3 monitor=10000 on lama2 * Pseudo action: lamaVM2-G4_running_0 Revised cluster status: Online: [ lama2 lama3 ] Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 VM1 (ocf::heartbeat:VirtualDomain): Started lama2 FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1 FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1 VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3 VM3 (ocf::heartbeat:VirtualDomain): Started lama3 FSlun3 (ocf::heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ] FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3 FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3 Resource Group: lamaVM1-G1 FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G2 FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G3 FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM2-G4 FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2 FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2 Clone Set: FAKE6-clone [FAKE6] Started: [ lamaVM1 lamaVM2 lamaVM3 ] diff --git a/pengine/test10/concurrent-fencing.summary b/pengine/test10/concurrent-fencing.summary index a274c3b614..1e4a8177ba 100644 --- a/pengine/test10/concurrent-fencing.summary +++ b/pengine/test10/concurrent-fencing.summary @@ -1,27 +1,27 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Node node2 (uuid2): UNCLEAN (offline) Node node3 (uuid3): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped Transition Summary: - * Fence node3 - * Fence node2 - * Fence node1 + * Fence (reboot) node3 + * Fence (reboot) node2 + * Fence (reboot) node1 Executing cluster transition: * Fencing node3 (reboot) * Fencing node1 (reboot) * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ node1 node2 node3 ] stonith-1 (stonith:dummy): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped diff --git a/pengine/test10/guest-node-host-dies.summary b/pengine/test10/guest-node-host-dies.summary index 717c43d82b..9f856132da 100644 --- a/pengine/test10/guest-node-host-dies.summary +++ b/pengine/test10/guest-node-host-dies.summary @@ -1,82 +1,82 @@ Current cluster status: Node rhel7-1 (1): UNCLEAN (offline) Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Fencing (stonith:fence_xvm): Started rhel7-4 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 ( UNCLEAN ) container1 (ocf::heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) container2 (ocf::heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) Master/Slave Set: lxc-ms-master [lxc-ms] Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Transition Summary: - * Fence rhel7-1 - * Fence lxc2 (resource: container2) - * Fence lxc1 (resource: container1) + * Fence (reboot) rhel7-1 + * Fence (reboot) lxc2 (resource: container2) + * Fence (reboot) lxc1 (resource: container1) * Restart Fencing (Started rhel7-4) * Move rsc_rhel7-1 (Started rhel7-1 -> rhel7-5) * Recover container1 (Started rhel7-1 -> rhel7-2) * Recover container2 (Started rhel7-1 -> rhel7-3) * Recover lxc-ms:0 (Master lxc1) * Recover lxc-ms:1 (Slave lxc2) * Move lxc1 (Started rhel7-1 -> rhel7-2) * Move lxc2 (Started rhel7-1 -> rhel7-3) Executing cluster transition: * Resource action: Fencing stop on rhel7-4 * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on rhel7-5 * Resource action: lxc1 monitor on rhel7-4 * Resource action: lxc1 monitor on rhel7-3 * Resource action: lxc2 monitor on rhel7-5 * Resource action: lxc2 monitor on rhel7-4 * Resource action: lxc2 monitor on rhel7-2 * Fencing rhel7-1 (reboot) * Pseudo action: rsc_rhel7-1_stop_0 * Pseudo action: lxc1_stop_0 * Pseudo action: lxc2_stop_0 * Pseudo action: container1_stop_0 * Pseudo action: container2_stop_0 * Pseudo action: stonith-lxc2-reboot on lxc2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: rsc_rhel7-1 start on rhel7-5 * Resource action: container1 start on rhel7-2 * Resource action: container2 start on rhel7-3 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc1 start on rhel7-2 * Resource action: lxc2 start on rhel7-3 * Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 monitor=30000 on rhel7-2 * Resource action: lxc2 monitor=30000 on rhel7-3 * Pseudo action: all_stopped * Resource action: Fencing start on rhel7-4 * Resource action: Fencing monitor=120000 on rhel7-4 * Resource action: lxc-ms start on lxc1 * Resource action: lxc-ms start on lxc2 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc-ms monitor=10000 on lxc2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised cluster status: Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] OFFLINE: [ rhel7-1 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started rhel7-4 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-5 container1 (ocf::heartbeat:VirtualDomain): Started rhel7-2 container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] diff --git a/pengine/test10/interleave-pseudo-stop.summary b/pengine/test10/interleave-pseudo-stop.summary index cf30da07d7..12be95607c 100644 --- a/pengine/test10/interleave-pseudo-stop.summary +++ b/pengine/test10/interleave-pseudo-stop.summary @@ -1,83 +1,83 @@ Current cluster status: Node node1 (f6d93040-a9ad-4745-a647-57ed32444ca8): UNCLEAN (offline) Online: [ node2 ] Clone Set: stonithcloneset [stonithclone] stonithclone (stonith:external/ssh): Started node1 (UNCLEAN) Started: [ node2 ] Clone Set: evmscloneset [evmsclone] evmsclone (ocf::heartbeat:EvmsSCC): Started node1 (UNCLEAN) Started: [ node2 ] Clone Set: imagestorecloneset [imagestoreclone] imagestoreclone (ocf::heartbeat:Filesystem): Started node1 (UNCLEAN) Started: [ node2 ] Clone Set: configstorecloneset [configstoreclone] configstoreclone (ocf::heartbeat:Filesystem): Started node1 (UNCLEAN) Started: [ node2 ] Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Stop stonithclone:1 (node1) * Stop evmsclone:1 (node1) * Stop imagestoreclone:1 (node1) * Stop configstoreclone:1 (node1) Executing cluster transition: * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: stonithcloneset_stop_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmsclone:0_post_notify_stop_0 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestoreclone:0_post_notify_stop_0 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstoreclone:0_post_notify_stop_0 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Pseudo action: stonithclone:0_stop_0 * Pseudo action: stonithcloneset_stopped_0 * Pseudo action: imagestoreclone:0_stop_0 * Pseudo action: imagestorecloneset_stopped_0 * Pseudo action: configstoreclone:0_stop_0 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestoreclone:0_notified_0 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstoreclone:0_notified_0 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_stop_0 * Pseudo action: evmsclone:0_stop_0 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmsclone:0_notified_0 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node2 ] Stopped (disabled): [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/master-7.summary b/pengine/test10/master-7.summary index 348b4eef39..2889efb6b1 100644 --- a/pengine/test10/master-7.summary +++ b/pengine/test10/master-7.summary @@ -1,121 +1,121 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 ( UNCLEAN ) ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: - * Fence c001n01 + * Fence (reboot) c001n01 * Move DcIPaddr (Started c001n01 -> c001n03) * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move lsb_dummy (Started c001n02 -> c001n08) * Move rsc_c001n01 (Started c001n01 -> c001n03) * Stop child_DoFencing:0 (c001n01) * Demote ocf_msdummy:0 (Master -> Stopped c001n01) * Stop ocf_msdummy:4 (c001n01) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: stonith_complete * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: ocf_msdummy:4_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-8.summary b/pengine/test10/master-8.summary index b77c88465e..5968a2531d 100644 --- a/pengine/test10/master-8.summary +++ b/pengine/test10/master-8.summary @@ -1,125 +1,125 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: - * Fence c001n01 + * Fence (reboot) c001n01 * Move DcIPaddr (Started c001n01 -> c001n03) * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move lsb_dummy (Started c001n02 -> c001n08) * Move rsc_c001n01 (Started c001n01 -> c001n03) * Stop child_DoFencing:0 (c001n01) * Demote ocf_msdummy:0 (Master -> Slave c001n01 - blocked) * Move ocf_msdummy:0 (Slave c001n01 -> c001n03) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: stonith_complete * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Resource action: ocf_msdummy:0 start on c001n03 * Pseudo action: master_rsc_1_running_0 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 * Resource action: ocf_msdummy:0 monitor=5000 on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/migrate-fencing.summary b/pengine/test10/migrate-fencing.summary index 831e49acb2..842c65df68 100644 --- a/pengine/test10/migrate-fencing.summary +++ b/pengine/test10/migrate-fencing.summary @@ -1,108 +1,108 @@ Current cluster status: Node pcmk-4: UNCLEAN (online) Online: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-4 r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-4 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-4 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-4 migrator (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-4 ] Slaves: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: - * Fence pcmk-4 + * Fence (reboot) pcmk-4 * Stop FencingChild:0 (pcmk-4) * Move r192.168.101.181 (Started pcmk-4 -> pcmk-1) * Move r192.168.101.182 (Started pcmk-4 -> pcmk-1) * Move r192.168.101.183 (Started pcmk-4 -> pcmk-1) * Move rsc_pcmk-4 (Started pcmk-4 -> pcmk-2) * Move lsb-dummy (Started pcmk-4 -> pcmk-1) * Migrate migrator (Started pcmk-1 -> pcmk-3) * Stop ping-1:0 (pcmk-4) * Demote stateful-1:0 (Master -> Stopped pcmk-4) * Promote stateful-1:1 (Slave -> Master pcmk-1) Executing cluster transition: * Resource action: stateful-1:3 monitor=15000 on pcmk-3 * Resource action: stateful-1:2 monitor=15000 on pcmk-2 * Fencing pcmk-4 (reboot) * Pseudo action: stonith_complete * Pseudo action: Fencing_stop_0 * Pseudo action: rsc_pcmk-4_stop_0 * Pseudo action: lsb-dummy_stop_0 * Resource action: migrator migrate_to on pcmk-1 * Pseudo action: Connectivity_stop_0 * Pseudo action: FencingChild:0_stop_0 * Pseudo action: Fencing_stopped_0 * Pseudo action: group-1_stop_0 * Pseudo action: r192.168.101.183_stop_0 * Resource action: rsc_pcmk-4 start on pcmk-2 * Resource action: migrator migrate_from on pcmk-3 * Resource action: migrator stop on pcmk-1 * Pseudo action: ping-1:0_stop_0 * Pseudo action: Connectivity_stopped_0 * Pseudo action: r192.168.101.182_stop_0 * Resource action: rsc_pcmk-4 monitor=5000 on pcmk-2 * Pseudo action: migrator_start_0 * Pseudo action: r192.168.101.181_stop_0 * Resource action: migrator monitor=10000 on pcmk-3 * Pseudo action: group-1_stopped_0 * Pseudo action: master-1_demote_0 * Pseudo action: stateful-1:0_demote_0 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Pseudo action: stateful-1:0_stop_0 * Pseudo action: master-1_stopped_0 * Pseudo action: all_stopped * Pseudo action: master-1_promote_0 * Resource action: stateful-1:1 promote on pcmk-1 * Pseudo action: master-1_promoted_0 * Pseudo action: group-1_start_0 * Resource action: r192.168.101.181 start on pcmk-1 * Resource action: r192.168.101.182 start on pcmk-1 * Resource action: r192.168.101.183 start on pcmk-1 * Resource action: stateful-1:1 monitor=16000 on pcmk-1 * Pseudo action: group-1_running_0 * Resource action: r192.168.101.181 monitor=5000 on pcmk-1 * Resource action: r192.168.101.182 monitor=5000 on pcmk-1 * Resource action: r192.168.101.183 monitor=5000 on pcmk-1 * Resource action: lsb-dummy start on pcmk-1 * Resource action: lsb-dummy monitor=5000 on pcmk-1 Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] OFFLINE: [ pcmk-4 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-2 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 migrator (ocf::pacemaker:Dummy): Started pcmk-3 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] diff --git a/pengine/test10/per-op-failcount.summary b/pengine/test10/per-op-failcount.summary index f050d59243..53b1625261 100644 --- a/pengine/test10/per-op-failcount.summary +++ b/pengine/test10/per-op-failcount.summary @@ -1,34 +1,34 @@ Using the original execution date of: 2017-04-06 09:04:22Z Current cluster status: Node rh73-01-snmp (3232238265): UNCLEAN (online) Online: [ rh73-02-snmp ] prmDummy (ocf::pacemaker:Dummy): FAILED rh73-01-snmp prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp prmStonith2-1 (stonith:external/ssh): Started rh73-01-snmp Transition Summary: - * Fence rh73-01-snmp + * Fence (reboot) rh73-01-snmp * Recover prmDummy (Started rh73-01-snmp -> rh73-02-snmp) * Move prmStonith2-1 (Started rh73-01-snmp -> rh73-02-snmp) Executing cluster transition: * Pseudo action: prmStonith2-1_stop_0 * Fencing rh73-01-snmp (reboot) * Pseudo action: stonith_complete * Pseudo action: prmDummy_stop_0 * Resource action: prmStonith2-1 start on rh73-02-snmp * Pseudo action: all_stopped * Resource action: prmDummy start on rh73-02-snmp * Resource action: prmDummy monitor=10000 on rh73-02-snmp Using the original execution date of: 2017-04-06 09:04:22Z Revised cluster status: Online: [ rh73-02-snmp ] OFFLINE: [ rh73-01-snmp ] prmDummy (ocf::pacemaker:Dummy): Started rh73-02-snmp prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp prmStonith2-1 (stonith:external/ssh): Started rh73-02-snmp diff --git a/pengine/test10/rec-node-11.summary b/pengine/test10/rec-node-11.summary index eb967ddedf..22f5af71b2 100644 --- a/pengine/test10/rec-node-11.summary +++ b/pengine/test10/rec-node-11.summary @@ -1,47 +1,47 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (online) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped Resource Group: group1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node2 Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Start stonith-1 (node2) * Move rsc1 (Started node1 -> node2) * Move rsc2 (Started node1 -> node2) * Restart rsc3 (Started node2) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: group1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc1_stop_0 * Pseudo action: group1_stopped_0 * Resource action: rsc3 stop on node2 * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc3 start on node2 * Pseudo action: group1_start_0 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Pseudo action: group1_running_0 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 Resource Group: group1 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-12.summary b/pengine/test10/rec-node-12.summary index 26f9b1309d..6316fdbaa6 100644 --- a/pengine/test10/rec-node-12.summary +++ b/pengine/test10/rec-node-12.summary @@ -1,92 +1,92 @@ Current cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n01 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Stopped child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: - * Fence c001n02 + * Fence (reboot) c001n02 * Start DcIPaddr (c001n08) * Start rsc_c001n08 (c001n08) * Start rsc_c001n02 (c001n01) * Start rsc_c001n03 (c001n03) * Start rsc_c001n01 (c001n01) * Start child_DoFencing:0 (c001n03) * Start child_DoFencing:1 (c001n01) * Start child_DoFencing:2 (c001n08) Executing cluster transition: * Resource action: DcIPaddr monitor on c001n08 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 monitor on c001n08 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n03 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n01 * Resource action: child_DoFencing:0 monitor on c001n08 * Resource action: child_DoFencing:0 monitor on c001n03 * Resource action: child_DoFencing:0 monitor on c001n01 * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Pseudo action: DoFencing_start_0 * Fencing c001n02 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: DcIPaddr start on c001n08 * Resource action: rsc_c001n08 start on c001n08 * Resource action: rsc_c001n02 start on c001n01 * Resource action: rsc_c001n03 start on c001n03 * Resource action: rsc_c001n01 start on c001n01 * Resource action: child_DoFencing:0 start on c001n03 * Resource action: child_DoFencing:1 start on c001n01 * Resource action: child_DoFencing:2 start on c001n08 * Pseudo action: DoFencing_running_0 * Resource action: DcIPaddr monitor=5000 on c001n08 * Resource action: rsc_c001n08 monitor=5000 on c001n08 * Resource action: rsc_c001n02 monitor=5000 on c001n01 * Resource action: rsc_c001n03 monitor=5000 on c001n03 * Resource action: rsc_c001n01 monitor=5000 on c001n01 * Resource action: child_DoFencing:0 monitor=5000 on c001n03 * Resource action: child_DoFencing:1 monitor=5000 on c001n01 * Resource action: child_DoFencing:2 monitor=5000 on c001n08 Revised cluster status: Online: [ c001n01 c001n03 c001n08 ] OFFLINE: [ c001n02 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n03 child_DoFencing:1 (stonith:ssh): Started c001n01 child_DoFencing:2 (stonith:ssh): Started c001n08 child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/rec-node-13.summary b/pengine/test10/rec-node-13.summary index e273a79769..c222ad8dec 100644 --- a/pengine/test10/rec-node-13.summary +++ b/pengine/test10/rec-node-13.summary @@ -1,80 +1,80 @@ Current cluster status: Node c001n04 (9e080e6d-7a25-4dac-be89-f6f4f128623d): UNCLEAN (online) Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 Transition Summary: - * Fence c001n04 + * Fence (reboot) c001n04 * Stop ocf_msdummy:6 (c001n04) Executing cluster transition: * Fencing c001n04 (reboot) * Pseudo action: stonith_complete * Pseudo action: master_rsc_1_stop_0 * Pseudo action: ocf_msdummy:6_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n04 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 diff --git a/pengine/test10/rec-node-14.summary b/pengine/test10/rec-node-14.summary index 532f3d9c30..f503a92391 100644 --- a/pengine/test10/rec-node-14.summary +++ b/pengine/test10/rec-node-14.summary @@ -1,27 +1,27 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Node node2 (uuid2): UNCLEAN (offline) Node node3 (uuid3): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped Transition Summary: - * Fence node3 - * Fence node2 - * Fence node1 + * Fence (reboot) node3 + * Fence (reboot) node2 + * Fence (reboot) node1 Executing cluster transition: * Fencing node1 (reboot) * Fencing node3 (reboot) * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ node1 node2 node3 ] stonith-1 (stonith:dummy): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped diff --git a/pengine/test10/rec-node-15.summary b/pengine/test10/rec-node-15.summary index 8d886805c7..2f706d2023 100644 --- a/pengine/test10/rec-node-15.summary +++ b/pengine/test10/rec-node-15.summary @@ -1,88 +1,88 @@ Current cluster status: Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby Node sapcl03 (0bfb78a2-fcd2-4f52-8a06-2d17437a6750): UNCLEAN (offline) Online: [ sapcl01 ] stonith-1 (stonith:dummy): Stopped Resource Group: app01 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_2 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: app02 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl02 LVM_12 (ocf::heartbeat:LVM): Started sapcl02 Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl02 Resource Group: oracle IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Stopped LVM_22 (ocf::heartbeat:LVM): Stopped Filesystem_23 (ocf::heartbeat:Filesystem): Stopped oracle_24 (ocf::heartbeat:oracle): Stopped oralsnr_25 (ocf::heartbeat:oralsnr): Stopped Transition Summary: - * Fence sapcl03 + * Fence (reboot) sapcl03 * Start stonith-1 (sapcl01) * Move IPaddr_192_168_1_102 (Started sapcl02 -> sapcl01) * Move LVM_12 (Started sapcl02 -> sapcl01) * Move Filesystem_13 (Started sapcl02 -> sapcl01) * Start IPaddr_192_168_1_104 (sapcl01) * Start LVM_22 (sapcl01) * Start Filesystem_23 (sapcl01) * Start oracle_24 (sapcl01) * Start oralsnr_25 (sapcl01) Executing cluster transition: * Resource action: stonith-1 monitor on sapcl02 * Resource action: stonith-1 monitor on sapcl01 * Pseudo action: app02_stop_0 * Resource action: Filesystem_13 stop on sapcl02 * Pseudo action: oracle_start_0 * Fencing sapcl03 (reboot) * Pseudo action: stonith_complete * Resource action: LVM_12 stop on sapcl02 * Resource action: IPaddr_192_168_1_104 start on sapcl01 * Resource action: LVM_22 start on sapcl01 * Resource action: Filesystem_23 start on sapcl01 * Resource action: oracle_24 start on sapcl01 * Resource action: oralsnr_25 start on sapcl01 * Resource action: IPaddr_192_168_1_102 stop on sapcl02 * Pseudo action: oracle_running_0 * Resource action: IPaddr_192_168_1_104 monitor=5000 on sapcl01 * Resource action: LVM_22 monitor=120000 on sapcl01 * Resource action: Filesystem_23 monitor=120000 on sapcl01 * Resource action: oracle_24 monitor=120000 on sapcl01 * Resource action: oralsnr_25 monitor=120000 on sapcl01 * Pseudo action: all_stopped * Resource action: stonith-1 start on sapcl01 * Pseudo action: app02_stopped_0 * Pseudo action: app02_start_0 * Resource action: IPaddr_192_168_1_102 start on sapcl01 * Resource action: LVM_12 start on sapcl01 * Resource action: Filesystem_13 start on sapcl01 * Pseudo action: app02_running_0 * Resource action: IPaddr_192_168_1_102 monitor=5000 on sapcl01 * Resource action: LVM_12 monitor=120000 on sapcl01 * Resource action: Filesystem_13 monitor=120000 on sapcl01 Revised cluster status: Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby Online: [ sapcl01 ] OFFLINE: [ sapcl03 ] stonith-1 (stonith:dummy): Started sapcl01 Resource Group: app01 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_2 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: app02 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_12 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: oracle IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_22 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_23 (ocf::heartbeat:Filesystem): Started sapcl01 oracle_24 (ocf::heartbeat:oracle): Started sapcl01 oralsnr_25 (ocf::heartbeat:oralsnr): Started sapcl01 diff --git a/pengine/test10/rec-node-2.summary b/pengine/test10/rec-node-2.summary index 5c8db02943..0e0183fce5 100644 --- a/pengine/test10/rec-node-2.summary +++ b/pengine/test10/rec-node-2.summary @@ -1,62 +1,62 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped Resource Group: group1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Stopped Resource Group: group2 rsc5 (heartbeat:apache): Stopped rsc6 (heartbeat:apache): Stopped Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Start stonith-1 (node2) * Start rsc1 (node2) * Start rsc2 (node2) * Start rsc3 (node2) * Start rsc4 (node2) * Start rsc5 (node2) * Start rsc6 (node2) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Pseudo action: group1_start_0 * Resource action: rsc3 monitor on node2 * Resource action: rsc4 monitor on node2 * Pseudo action: group2_start_0 * Resource action: rsc5 monitor on node2 * Resource action: rsc6 monitor on node2 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: rsc3 start on node2 * Resource action: rsc4 start on node2 * Resource action: rsc5 start on node2 * Resource action: rsc6 start on node2 * Pseudo action: group1_running_0 * Pseudo action: group2_running_0 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 Resource Group: group1 rsc3 (heartbeat:apache): Started node2 rsc4 (heartbeat:apache): Started node2 Resource Group: group2 rsc5 (heartbeat:apache): Started node2 rsc6 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-4.summary b/pengine/test10/rec-node-4.summary index 761573f0cb..0a2606e4c6 100644 --- a/pengine/test10/rec-node-4.summary +++ b/pengine/test10/rec-node-4.summary @@ -1,36 +1,36 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Start stonith-1 (node2) * Move rsc1 (Started node1 -> node2) * Move rsc2 (Started node1 -> node2) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-6.summary b/pengine/test10/rec-node-6.summary index fb294fbcff..530c7d20a6 100644 --- a/pengine/test10/rec-node-6.summary +++ b/pengine/test10/rec-node-6.summary @@ -1,36 +1,36 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (online) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Start stonith-1 (node2) * Move rsc1 (Started node1 -> node2) * Move rsc2 (Started node1 -> node2) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-7.summary b/pengine/test10/rec-node-7.summary index 761573f0cb..0a2606e4c6 100644 --- a/pengine/test10/rec-node-7.summary +++ b/pengine/test10/rec-node-7.summary @@ -1,36 +1,36 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Start stonith-1 (node2) * Move rsc1 (Started node1 -> node2) * Move rsc2 (Started node1 -> node2) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-rsc-5.summary b/pengine/test10/rec-rsc-5.summary index 7bcb1a3f41..c3e658ac9b 100644 --- a/pengine/test10/rec-rsc-5.summary +++ b/pengine/test10/rec-rsc-5.summary @@ -1,36 +1,36 @@ Current cluster status: Node node2 (uuid2): UNCLEAN (online) Online: [ node1 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): FAILED node2 rsc2 (heartbeat:apache): Started node2 Transition Summary: - * Fence node2 + * Fence (reboot) node2 * Start stonith-1 (node1) * Recover rsc1 (Started node2 -> node1) * Move rsc2 (Started node2 -> node1) Executing cluster transition: * Resource action: stonith-1 monitor on node1 * Resource action: rsc1 monitor on node1 * Resource action: rsc2 monitor on node1 * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: all_stopped * Resource action: stonith-1 start on node1 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] stonith-1 (stonith:dummy): Started node1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 diff --git a/pengine/test10/remote-fence-before-reconnect.summary b/pengine/test10/remote-fence-before-reconnect.summary index 520f5cfbb9..2ce3315401 100644 --- a/pengine/test10/remote-fence-before-reconnect.summary +++ b/pengine/test10/remote-fence-before-reconnect.summary @@ -1,39 +1,39 @@ Current cluster status: RemoteNode c7auto4: UNCLEAN (offline) Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto2 c7auto4 (ocf::pacemaker:remote): FAILED c7auto1 fake1 (ocf::heartbeat:Dummy): Started c7auto3 fake2 (ocf::heartbeat:Dummy): Started c7auto4 (UNCLEAN) fake3 (ocf::heartbeat:Dummy): Started c7auto1 fake4 (ocf::heartbeat:Dummy): Started c7auto2 fake5 (ocf::heartbeat:Dummy): Started c7auto3 Transition Summary: - * Fence c7auto4 + * Fence (reboot) c7auto4 * Stop c7auto4 (c7auto1) * Move fake2 (Started c7auto4 -> c7auto1) Executing cluster transition: * Fencing c7auto4 (reboot) * Pseudo action: stonith_complete * Pseudo action: fake2_stop_0 * Resource action: c7auto4 stop on c7auto1 * Resource action: fake2 start on c7auto1 * Pseudo action: all_stopped * Resource action: fake2 monitor=10000 on c7auto1 Revised cluster status: RemoteNode c7auto4: UNCLEAN (offline) Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto2 c7auto4 (ocf::pacemaker:remote): FAILED fake1 (ocf::heartbeat:Dummy): Started c7auto3 fake2 (ocf::heartbeat:Dummy): Started c7auto1 fake3 (ocf::heartbeat:Dummy): Started c7auto1 fake4 (ocf::heartbeat:Dummy): Started c7auto2 fake5 (ocf::heartbeat:Dummy): Started c7auto3 diff --git a/pengine/test10/remote-fence-unclean-3.summary b/pengine/test10/remote-fence-unclean-3.summary index ec24500635..9296add68d 100644 --- a/pengine/test10/remote-fence-unclean-3.summary +++ b/pengine/test10/remote-fence-unclean-3.summary @@ -1,85 +1,85 @@ Current cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] RemoteOFFLINE: [ overcloud-novacompute-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] fence1 (stonith:fence_xvm): Stopped overcloud-novacompute-0 (ocf::pacemaker:remote): FAILED overcloud-controller-0 Docker container set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-controller-0 rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-controller-1 rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-controller-2 Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Master overcloud-controller-0 galera-bundle-1 (ocf::heartbeat:galera): Master overcloud-controller-1 galera-bundle-2 (ocf::heartbeat:galera): Master overcloud-controller-2 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master overcloud-controller-0 redis-bundle-1 (ocf::heartbeat:redis): Slave overcloud-controller-1 redis-bundle-2 (ocf::heartbeat:redis): Slave overcloud-controller-2 ip-192.168.24.9 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 ip-10.0.0.7 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 ip-172.16.2.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2 ip-172.16.2.8 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 ip-172.16.1.9 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 ip-172.16.3.9 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started overcloud-controller-0 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started overcloud-controller-1 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started overcloud-controller-2 Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started overcloud-controller-0 Docker container: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest] openstack-cinder-backup-docker-0 (ocf::heartbeat:docker): Started overcloud-controller-1 Transition Summary: - * Fence overcloud-novacompute-0 + * Fence (reboot) overcloud-novacompute-0 * Start fence1 (overcloud-controller-0) * Stop overcloud-novacompute-0 (overcloud-controller-0) Executing cluster transition: * Resource action: fence1 monitor on overcloud-controller-2 * Resource action: fence1 monitor on overcloud-controller-1 * Resource action: fence1 monitor on overcloud-controller-0 * Resource action: overcloud-novacompute-0 stop on overcloud-controller-0 * Fencing overcloud-novacompute-0 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: fence1 start on overcloud-controller-0 * Resource action: fence1 monitor=60000 on overcloud-controller-0 Revised cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] RemoteOFFLINE: [ overcloud-novacompute-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] fence1 (stonith:fence_xvm): Started overcloud-controller-0 overcloud-novacompute-0 (ocf::pacemaker:remote): Stopped Docker container set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-controller-0 rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-controller-1 rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-controller-2 Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Master overcloud-controller-0 galera-bundle-1 (ocf::heartbeat:galera): Master overcloud-controller-1 galera-bundle-2 (ocf::heartbeat:galera): Master overcloud-controller-2 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master overcloud-controller-0 redis-bundle-1 (ocf::heartbeat:redis): Slave overcloud-controller-1 redis-bundle-2 (ocf::heartbeat:redis): Slave overcloud-controller-2 ip-192.168.24.9 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 ip-10.0.0.7 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 ip-172.16.2.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2 ip-172.16.2.8 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 ip-172.16.1.9 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 ip-172.16.3.9 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started overcloud-controller-0 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started overcloud-controller-1 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started overcloud-controller-2 Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started overcloud-controller-0 Docker container: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest] openstack-cinder-backup-docker-0 (ocf::heartbeat:docker): Started overcloud-controller-1 diff --git a/pengine/test10/remote-fence-unclean.summary b/pengine/test10/remote-fence-unclean.summary index 06940fc1d7..667549bb75 100644 --- a/pengine/test10/remote-fence-unclean.summary +++ b/pengine/test10/remote-fence-unclean.summary @@ -1,47 +1,47 @@ Current cluster status: RemoteNode remote1: UNCLEAN (offline) Online: [ 18builder 18node1 18node2 ] shooter (stonith:fence_xvm): Started 18builder remote1 (ocf::pacemaker:remote): FAILED 18node1 FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started remote1 (UNCLEAN) FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: - * Fence remote1 + * Fence (reboot) remote1 * Recover remote1 (Started 18node1) * Move FAKE2 (Started remote1 -> 18builder) * Move FAKE3 (Started 18builder -> 18node1) * Move FAKE4 (Started 18node1 -> 18node2) Executing cluster transition: * Resource action: FAKE3 stop on 18builder * Resource action: FAKE4 stop on 18node1 * Fencing remote1 (reboot) * Pseudo action: stonith_complete * Pseudo action: FAKE2_stop_0 * Resource action: FAKE3 start on 18node1 * Resource action: FAKE4 start on 18node2 * Resource action: remote1 stop on 18node1 * Resource action: FAKE2 start on 18builder * Resource action: FAKE3 monitor=60000 on 18node1 * Resource action: FAKE4 monitor=60000 on 18node2 * Pseudo action: all_stopped * Resource action: remote1 start on 18node1 * Resource action: remote1 monitor=60000 on 18node1 * Resource action: FAKE2 monitor=60000 on 18builder Revised cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18builder remote1 (ocf::pacemaker:remote): Started 18node1 FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started 18builder FAKE3 (ocf::heartbeat:Dummy): Started 18node1 FAKE4 (ocf::heartbeat:Dummy): Started 18node2 diff --git a/pengine/test10/remote-fence-unclean2.summary b/pengine/test10/remote-fence-unclean2.summary index 96f5d69076..0844c295d8 100644 --- a/pengine/test10/remote-fence-unclean2.summary +++ b/pengine/test10/remote-fence-unclean2.summary @@ -1,31 +1,31 @@ Current cluster status: Node rhel7-alt1 (1): standby Node rhel7-alt2 (2): standby RemoteNode rhel7-alt4: UNCLEAN (offline) OFFLINE: [ rhel7-alt3 ] shooter (stonith:fence_xvm): Stopped rhel7-alt4 (ocf::pacemaker:remote): Stopped fake (ocf::heartbeat:Dummy): Started rhel7-alt4 (UNCLEAN) Transition Summary: - * Fence rhel7-alt4 + * Fence (reboot) rhel7-alt4 * Stop fake (rhel7-alt4) Executing cluster transition: * Fencing rhel7-alt4 (reboot) * Pseudo action: stonith_complete * Pseudo action: fake_stop_0 * Pseudo action: all_stopped Revised cluster status: Node rhel7-alt1 (1): standby Node rhel7-alt2 (2): standby OFFLINE: [ rhel7-alt3 ] RemoteOFFLINE: [ rhel7-alt4 ] shooter (stonith:fence_xvm): Stopped rhel7-alt4 (ocf::pacemaker:remote): Stopped fake (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/remote-partial-migrate2.summary b/pengine/test10/remote-partial-migrate2.summary index 197bd8c7fe..b8b9b4c3db 100644 --- a/pengine/test10/remote-partial-migrate2.summary +++ b/pengine/test10/remote-partial-migrate2.summary @@ -1,208 +1,208 @@ Current cluster status: Node pcmk4 (4): UNCLEAN (offline) Online: [ pcmk1 pcmk2 pcmk3 ] RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote5 ] RemoteOFFLINE: [ pcmk_remote4 ] shooter (stonith:fence_docker_cts): Started pcmk3 pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote2 (ocf::pacemaker:remote): Started [ pcmk1 pcmk3 ] pcmk_remote3 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote4 (ocf::pacemaker:remote): Stopped pcmk_remote5 (ocf::pacemaker:remote): Started pcmk1 FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE5 (ocf::heartbeat:Dummy): Started pcmk1 FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE9 (ocf::heartbeat:Dummy): Started pcmk2 FAKE10 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE11 (ocf::heartbeat:Dummy): Started pcmk1 FAKE12 (ocf::heartbeat:Dummy): Started pcmk1 FAKE13 (ocf::heartbeat:Dummy): Started pcmk3 FAKE14 (ocf::heartbeat:Dummy): Started pcmk2 FAKE15 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE17 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE18 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE19 (ocf::heartbeat:Dummy): Started pcmk3 FAKE20 (ocf::heartbeat:Dummy): Started pcmk2 FAKE21 (ocf::heartbeat:Dummy): Started pcmk1 FAKE22 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE24 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE26 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE27 (ocf::heartbeat:Dummy): Started pcmk3 FAKE28 (ocf::heartbeat:Dummy): Started pcmk3 FAKE29 (ocf::heartbeat:Dummy): Started pcmk2 FAKE30 (ocf::heartbeat:Dummy): Started pcmk1 FAKE31 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE35 (ocf::heartbeat:Dummy): Started pcmk1 FAKE36 (ocf::heartbeat:Dummy): Started pcmk3 FAKE37 (ocf::heartbeat:Dummy): Started pcmk2 FAKE38 (ocf::heartbeat:Dummy): Started pcmk2 FAKE39 (ocf::heartbeat:Dummy): Started pcmk1 FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE43 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE44 (ocf::heartbeat:Dummy): Started pcmk2 FAKE45 (ocf::heartbeat:Dummy): Started pcmk3 FAKE46 (ocf::heartbeat:Dummy): Started pcmk1 FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE48 (ocf::heartbeat:Dummy): Started pcmk1 FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE50 (ocf::heartbeat:Dummy): Started pcmk_remote5 Transition Summary: - * Fence pcmk4 + * Fence (reboot) pcmk4 * Migrate pcmk_remote2 (Started pcmk3 -> pcmk1) * Start pcmk_remote4 (pcmk2) * Migrate pcmk_remote5 (Started pcmk1 -> pcmk2) * Move FAKE5 (Started pcmk1 -> pcmk_remote4) * Move FAKE9 (Started pcmk2 -> pcmk_remote4) * Move FAKE12 (Started pcmk1 -> pcmk2) * Move FAKE14 (Started pcmk2 -> pcmk_remote1) * Move FAKE17 (Started pcmk_remote1 -> pcmk_remote4) * Move FAKE25 (Started pcmk_remote1 -> pcmk_remote4) * Move FAKE28 (Started pcmk3 -> pcmk1) * Move FAKE30 (Started pcmk1 -> pcmk_remote1) * Move FAKE33 (Started pcmk_remote1 -> pcmk_remote4) * Move FAKE38 (Started pcmk2 -> pcmk_remote1) * Move FAKE39 (Started pcmk1 -> pcmk_remote2) * Move FAKE41 (Started pcmk_remote2 -> pcmk_remote4) * Move FAKE47 (Started pcmk_remote1 -> pcmk_remote2) * Move FAKE48 (Started pcmk1 -> pcmk_remote3) * Move FAKE49 (Started pcmk_remote3 -> pcmk_remote4) Executing cluster transition: * Resource action: FAKE5 stop on pcmk1 * Resource action: FAKE9 stop on pcmk2 * Resource action: FAKE12 stop on pcmk1 * Resource action: FAKE14 stop on pcmk2 * Resource action: FAKE17 stop on pcmk_remote1 * Resource action: FAKE25 stop on pcmk_remote1 * Resource action: FAKE28 stop on pcmk3 * Resource action: FAKE30 stop on pcmk1 * Resource action: FAKE33 stop on pcmk_remote1 * Resource action: FAKE38 stop on pcmk2 * Resource action: FAKE39 stop on pcmk1 * Resource action: FAKE47 stop on pcmk_remote1 * Resource action: FAKE48 stop on pcmk1 * Resource action: FAKE49 stop on pcmk_remote3 * Fencing pcmk4 (reboot) * Pseudo action: stonith_complete * Resource action: pcmk_remote2 migrate_from on pcmk1 * Resource action: pcmk_remote2 stop on pcmk3 * Resource action: pcmk_remote4 start on pcmk2 * Resource action: pcmk_remote5 migrate_to on pcmk1 * Resource action: FAKE5 start on pcmk_remote4 * Resource action: FAKE9 start on pcmk_remote4 * Resource action: FAKE12 start on pcmk2 * Resource action: FAKE14 start on pcmk_remote1 * Resource action: FAKE17 start on pcmk_remote4 * Resource action: FAKE25 start on pcmk_remote4 * Resource action: FAKE28 start on pcmk1 * Resource action: FAKE30 start on pcmk_remote1 * Resource action: FAKE33 start on pcmk_remote4 * Resource action: FAKE38 start on pcmk_remote1 * Resource action: FAKE48 start on pcmk_remote3 * Resource action: FAKE49 start on pcmk_remote4 * Pseudo action: pcmk_remote2_start_0 * Resource action: pcmk_remote4 monitor=60000 on pcmk2 * Resource action: pcmk_remote5 migrate_from on pcmk2 * Resource action: pcmk_remote5 stop on pcmk1 * Resource action: FAKE5 monitor=10000 on pcmk_remote4 * Resource action: FAKE9 monitor=10000 on pcmk_remote4 * Resource action: FAKE12 monitor=10000 on pcmk2 * Resource action: FAKE14 monitor=10000 on pcmk_remote1 * Resource action: FAKE17 monitor=10000 on pcmk_remote4 * Resource action: FAKE25 monitor=10000 on pcmk_remote4 * Resource action: FAKE28 monitor=10000 on pcmk1 * Resource action: FAKE30 monitor=10000 on pcmk_remote1 * Resource action: FAKE33 monitor=10000 on pcmk_remote4 * Resource action: FAKE38 monitor=10000 on pcmk_remote1 * Resource action: FAKE39 start on pcmk_remote2 * Resource action: FAKE41 stop on pcmk_remote2 * Resource action: FAKE47 start on pcmk_remote2 * Resource action: FAKE48 monitor=10000 on pcmk_remote3 * Resource action: FAKE49 monitor=10000 on pcmk_remote4 * Pseudo action: all_stopped * Resource action: pcmk_remote2 monitor=60000 on pcmk1 * Pseudo action: pcmk_remote5_start_0 * Resource action: FAKE39 monitor=10000 on pcmk_remote2 * Resource action: FAKE41 start on pcmk_remote4 * Resource action: FAKE47 monitor=10000 on pcmk_remote2 * Resource action: pcmk_remote5 monitor=60000 on pcmk2 * Resource action: FAKE41 monitor=10000 on pcmk_remote4 Revised cluster status: Online: [ pcmk1 pcmk2 pcmk3 ] OFFLINE: [ pcmk4 ] RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 pcmk_remote5 ] shooter (stonith:fence_docker_cts): Started pcmk3 pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote2 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote3 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote4 (ocf::pacemaker:remote): Started pcmk2 pcmk_remote5 (ocf::pacemaker:remote): Started pcmk2 FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE5 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE9 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE10 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE11 (ocf::heartbeat:Dummy): Started pcmk1 FAKE12 (ocf::heartbeat:Dummy): Started pcmk2 FAKE13 (ocf::heartbeat:Dummy): Started pcmk3 FAKE14 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE15 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE17 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE18 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE19 (ocf::heartbeat:Dummy): Started pcmk3 FAKE20 (ocf::heartbeat:Dummy): Started pcmk2 FAKE21 (ocf::heartbeat:Dummy): Started pcmk1 FAKE22 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE24 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE26 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE27 (ocf::heartbeat:Dummy): Started pcmk3 FAKE28 (ocf::heartbeat:Dummy): Started pcmk1 FAKE29 (ocf::heartbeat:Dummy): Started pcmk2 FAKE30 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE31 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE35 (ocf::heartbeat:Dummy): Started pcmk1 FAKE36 (ocf::heartbeat:Dummy): Started pcmk3 FAKE37 (ocf::heartbeat:Dummy): Started pcmk2 FAKE38 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE39 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE43 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE44 (ocf::heartbeat:Dummy): Started pcmk2 FAKE45 (ocf::heartbeat:Dummy): Started pcmk3 FAKE46 (ocf::heartbeat:Dummy): Started pcmk1 FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE48 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE50 (ocf::heartbeat:Dummy): Started pcmk_remote5 diff --git a/pengine/test10/remote-recover-all.summary b/pengine/test10/remote-recover-all.summary index 3f29f70dc6..5e4f51db3b 100644 --- a/pengine/test10/remote-recover-all.summary +++ b/pengine/test10/remote-recover-all.summary @@ -1,154 +1,154 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: - * Fence messaging-1 - * Fence galera-2 - * Fence controller-1 + * Fence (reboot) messaging-1 + * Fence (reboot) galera-2 + * Fence (reboot) controller-1 * Stop messaging-1 (controller-1) * Move galera-0 (Started controller-1 -> controller-2) * Stop galera-2 (controller-1) * Stop rabbitmq:2 (messaging-1) * Demote galera:1 (Master -> Stopped galera-2) * Stop redis:0 (controller-1) * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) * Stop haproxy:0 (controller-1) * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: galera-master_demote_0 * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Pseudo action: galera_demote_0 * Pseudo action: galera-master_demoted_0 * Pseudo action: galera-master_stop_0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Pseudo action: galera_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: galera-master_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: galera-0 monitor=20000 on controller-2 * Pseudo action: galera-2_stop_0 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: messaging-1_stop_0 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-connection.summary b/pengine/test10/remote-recover-connection.summary index 43507af6f5..79ea7da665 100644 --- a/pengine/test10/remote-recover-connection.summary +++ b/pengine/test10/remote-recover-connection.summary @@ -1,140 +1,140 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: - * Fence controller-1 + * Fence (reboot) controller-1 * Move messaging-1 (Started controller-1 -> controller-2) * Move galera-0 (Started controller-1 -> controller-2) * Move galera-2 (Started controller-1 -> controller-2) * Stop redis:0 (controller-1) * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) * Stop haproxy:0 (controller-1) * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: stonith_complete * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-2 messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-2 Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-fail.summary b/pengine/test10/remote-recover-fail.summary index ec2d70117b..313c8ace6e 100644 --- a/pengine/test10/remote-recover-fail.summary +++ b/pengine/test10/remote-recover-fail.summary @@ -1,54 +1,54 @@ Current cluster status: RemoteNode rhel7-auto4: UNCLEAN (offline) Online: [ rhel7-auto2 rhel7-auto3 ] OFFLINE: [ rhel7-auto1 ] shooter (stonith:fence_xvm): Started rhel7-auto3 rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto2 FAKE1 (ocf::heartbeat:Dummy): Stopped FAKE2 (ocf::heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) FAKE3 (ocf::heartbeat:Dummy): Started rhel7-auto2 FAKE4 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE5 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE6 (ocf::heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) Transition Summary: - * Fence rhel7-auto4 + * Fence (reboot) rhel7-auto4 * Recover rhel7-auto4 (Started rhel7-auto2) * Start FAKE1 (rhel7-auto2) * Move FAKE2 (Started rhel7-auto4 -> rhel7-auto3) * Move FAKE6 (Started rhel7-auto4 -> rhel7-auto2) Executing cluster transition: * Resource action: FAKE3 monitor=10000 on rhel7-auto2 * Resource action: FAKE4 monitor=10000 on rhel7-auto3 * Fencing rhel7-auto4 (reboot) * Pseudo action: stonith_complete * Resource action: FAKE1 start on rhel7-auto2 * Pseudo action: FAKE2_stop_0 * Pseudo action: FAKE6_stop_0 * Resource action: rhel7-auto4 stop on rhel7-auto2 * Resource action: FAKE1 monitor=10000 on rhel7-auto2 * Resource action: FAKE2 start on rhel7-auto3 * Resource action: FAKE6 start on rhel7-auto2 * Pseudo action: all_stopped * Resource action: rhel7-auto4 start on rhel7-auto2 * Resource action: FAKE2 monitor=10000 on rhel7-auto3 * Resource action: FAKE6 monitor=10000 on rhel7-auto2 * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto2 Revised cluster status: Online: [ rhel7-auto2 rhel7-auto3 ] OFFLINE: [ rhel7-auto1 ] RemoteOnline: [ rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto3 rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto2 FAKE1 (ocf::heartbeat:Dummy): Started rhel7-auto2 FAKE2 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE3 (ocf::heartbeat:Dummy): Started rhel7-auto2 FAKE4 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE5 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE6 (ocf::heartbeat:Dummy): Started rhel7-auto2 diff --git a/pengine/test10/remote-recover-no-resources.summary b/pengine/test10/remote-recover-no-resources.summary index 48355a4c79..bd27773687 100644 --- a/pengine/test10/remote-recover-no-resources.summary +++ b/pengine/test10/remote-recover-no-resources.summary @@ -1,145 +1,145 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: - * Fence messaging-1 - * Fence controller-1 + * Fence (reboot) messaging-1 + * Fence (reboot) controller-1 * Stop messaging-1 (controller-1) * Move galera-0 (Started controller-1 -> controller-2) * Stop galera-2 (controller-1) * Stop rabbitmq:2 (messaging-1) * Stop redis:0 (controller-1) * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) * Stop haproxy:0 (controller-1) * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Pseudo action: messaging-1_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-unknown.summary b/pengine/test10/remote-recover-unknown.summary index 212af4ed16..8a3d56048d 100644 --- a/pengine/test10/remote-recover-unknown.summary +++ b/pengine/test10/remote-recover-unknown.summary @@ -1,147 +1,147 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: - * Fence messaging-1 - * Fence galera-2 - * Fence controller-1 + * Fence (reboot) messaging-1 + * Fence (reboot) galera-2 + * Fence (reboot) controller-1 * Stop messaging-1 (controller-1) * Move galera-0 (Started controller-1 -> controller-2) * Stop galera-2 (controller-1) * Stop rabbitmq:2 (messaging-1) * Stop redis:0 (controller-1) * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) * Stop haproxy:0 (controller-1) * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: messaging-1_stop_0 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recovery.summary b/pengine/test10/remote-recovery.summary index 43507af6f5..79ea7da665 100644 --- a/pengine/test10/remote-recovery.summary +++ b/pengine/test10/remote-recovery.summary @@ -1,140 +1,140 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: - * Fence controller-1 + * Fence (reboot) controller-1 * Move messaging-1 (Started controller-1 -> controller-2) * Move galera-0 (Started controller-1 -> controller-2) * Move galera-2 (Started controller-1 -> controller-2) * Stop redis:0 (controller-1) * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) * Stop haproxy:0 (controller-1) * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: stonith_complete * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-2 messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-2 Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-unclean2.summary b/pengine/test10/remote-unclean2.summary index ad4af90c63..877eb4d43f 100644 --- a/pengine/test10/remote-unclean2.summary +++ b/pengine/test10/remote-unclean2.summary @@ -1,27 +1,27 @@ Current cluster status: RemoteNode rhel7-auto4: UNCLEAN (offline) Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto2 rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto1 Transition Summary: - * Fence rhel7-auto4 + * Fence (reboot) rhel7-auto4 * Recover rhel7-auto4 (Started rhel7-auto1) Executing cluster transition: * Resource action: rhel7-auto4 stop on rhel7-auto1 * Fencing rhel7-auto4 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: rhel7-auto4 start on rhel7-auto1 * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto1 Revised cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] RemoteOnline: [ rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto2 rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto1 diff --git a/pengine/test10/start-then-stop-with-unfence.summary b/pengine/test10/start-then-stop-with-unfence.summary index 4430cc09d7..ae54afc16e 100644 --- a/pengine/test10/start-then-stop-with-unfence.summary +++ b/pengine/test10/start-then-stop-with-unfence.summary @@ -1,44 +1,43 @@ Current cluster status: Online: [ rhel7-node1.example.com rhel7-node2.example.com ] mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com mpath-node1 (stonith:fence_mpath): Stopped ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com Clone Set: jrummy-clone [jrummy] Started: [ rhel7-node2.example.com ] Stopped: [ rhel7-node1.example.com ] Transition Summary: - * Fence rhel7-node2.example.com - * Fence rhel7-node1.example.com + * Fence (on) rhel7-node1.example.com * Start mpath-node1 (rhel7-node1.example.com) * Move ip1 (Started rhel7-node2.example.com -> rhel7-node1.example.com) * Start jrummy:1 (rhel7-node1.example.com) Executing cluster transition: * Pseudo action: jrummy-clone_start_0 * Fencing rhel7-node1.example.com (on) * Resource action: mpath-node2 monitor on rhel7-node1.example.com * Resource action: mpath-node1 monitor on rhel7-node1.example.com * Resource action: jrummy start on rhel7-node1.example.com * Pseudo action: jrummy-clone_running_0 * Resource action: mpath-node1 start on rhel7-node1.example.com * Resource action: ip1 stop on rhel7-node2.example.com * Resource action: jrummy monitor=10000 on rhel7-node1.example.com * Pseudo action: all_stopped * Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com * Resource action: ip1 start on rhel7-node1.example.com * Resource action: ip1 monitor=10000 on rhel7-node1.example.com Revised cluster status: Online: [ rhel7-node1.example.com rhel7-node2.example.com ] mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com mpath-node1 (stonith:fence_mpath): Started rhel7-node1.example.com ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node1.example.com ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com Clone Set: jrummy-clone [jrummy] Started: [ rhel7-node1.example.com rhel7-node2.example.com ] diff --git a/pengine/test10/stonith-0.summary b/pengine/test10/stonith-0.summary index 24008a1ab4..ee8ae1565e 100644 --- a/pengine/test10/stonith-0.summary +++ b/pengine/test10/stonith-0.summary @@ -1,111 +1,111 @@ Current cluster status: Node c001n03 (f5e1d2de-73da-432a-9d5c-37472253c2ee): UNCLEAN (online) Node c001n05 (52a5ea5e-86ee-442c-b251-0bc9825c517e): UNCLEAN (online) Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started [ c001n03 c001n05 ] heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): FAILED [ c001n03 c001n05 ] lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] Stopped: [ c001n03 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 Transition Summary: - * Fence c001n05 - * Fence c001n03 + * Fence (reboot) c001n05 + * Fence (reboot) c001n03 * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Recover ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move rsc_c001n05 (Started c001n05 -> c001n07) * Move rsc_c001n07 (Started c001n03 -> c001n07) Executing cluster transition: * Resource action: child_DoFencing:4 monitor=20000 on c001n08 * Fencing c001n05 (reboot) * Fencing c001n03 (reboot) * Pseudo action: stonith_complete * Pseudo action: group-1_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: rsc_c001n05_stop_0 * Pseudo action: rsc_c001n07_stop_0 * Pseudo action: heartbeat_192.168.100.182_stop_0 * Resource action: rsc_c001n05 start on c001n07 * Resource action: rsc_c001n07 start on c001n07 * Pseudo action: ocf_192.168.100.181_stop_0 * Pseudo action: ocf_192.168.100.181_stop_0 * Resource action: rsc_c001n05 monitor=5000 on c001n07 * Resource action: rsc_c001n07 monitor=5000 on c001n07 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] OFFLINE: [ c001n03 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] Stopped: [ c001n03 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 diff --git a/pengine/test10/stonith-1.summary b/pengine/test10/stonith-1.summary index 85560214e7..b68aca798c 100644 --- a/pengine/test10/stonith-1.summary +++ b/pengine/test10/stonith-1.summary @@ -1,113 +1,113 @@ Current cluster status: Node sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): UNCLEAN (offline) Online: [ sles-1 sles-2 sles-4 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 (UNCLEAN) rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 (UNCLEAN) rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 Clone Set: DoFencing [child_DoFencing] child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN) Started: [ sles-1 sles-2 ] Stopped: [ sles-4 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN ) ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN ) ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped Transition Summary: - * Fence sles-3 + * Fence (reboot) sles-3 * Start r192.168.100.183 (sles-1) * Move migrator (Started sles-3 -> sles-4) * Move rsc_sles-3 (Started sles-3 -> sles-4) * Move child_DoFencing:2 (Started sles-3 -> sles-4) * Start ocf_msdummy:0 (sles-4) * Start ocf_msdummy:1 (sles-1) * Move ocf_msdummy:2 (Slave sles-3 -> sles-2) * Start ocf_msdummy:3 (sles-4) * Start ocf_msdummy:4 (sles-1) * Move ocf_msdummy:5 (Slave sles-3 -> sles-2) Executing cluster transition: * Pseudo action: group-1_start_0 * Resource action: r192.168.100.182 monitor=5000 on sles-1 * Resource action: lsb_dummy monitor=5000 on sles-2 * Resource action: rsc_sles-2 monitor=5000 on sles-2 * Resource action: rsc_sles-4 monitor=5000 on sles-4 * Pseudo action: DoFencing_stop_0 * Fencing sles-3 (reboot) * Pseudo action: stonith_complete * Resource action: r192.168.100.183 start on sles-1 * Pseudo action: migrator_stop_0 * Pseudo action: rsc_sles-3_stop_0 * Pseudo action: child_DoFencing:2_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: group-1_running_0 * Resource action: r192.168.100.183 monitor=5000 on sles-1 * Resource action: migrator start on sles-4 * Resource action: rsc_sles-3 start on sles-4 * Resource action: child_DoFencing:2 start on sles-4 * Pseudo action: DoFencing_running_0 * Pseudo action: ocf_msdummy:2_stop_0 * Pseudo action: ocf_msdummy:5_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Resource action: migrator monitor=10000 on sles-4 * Resource action: rsc_sles-3 monitor=5000 on sles-4 * Resource action: child_DoFencing:2 monitor=60000 on sles-4 * Resource action: ocf_msdummy:0 start on sles-4 * Resource action: ocf_msdummy:1 start on sles-1 * Resource action: ocf_msdummy:2 start on sles-2 * Resource action: ocf_msdummy:3 start on sles-4 * Resource action: ocf_msdummy:4 start on sles-1 * Resource action: ocf_msdummy:5 start on sles-2 * Pseudo action: master_rsc_1_running_0 * Resource action: ocf_msdummy:0 monitor=5000 on sles-4 * Resource action: ocf_msdummy:1 monitor=5000 on sles-1 * Resource action: ocf_msdummy:2 monitor=5000 on sles-2 * Resource action: ocf_msdummy:3 monitor=5000 on sles-4 * Resource action: ocf_msdummy:4 monitor=5000 on sles-1 * Resource action: ocf_msdummy:5 monitor=5000 on sles-2 Revised cluster status: Online: [ sles-1 sles-2 sles-4 ] OFFLINE: [ sles-3 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-4 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-4 ] Stopped: [ sles-3 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/stonith-2.summary b/pengine/test10/stonith-2.summary index 0f7cb99ea8..eef35c4ad1 100644 --- a/pengine/test10/stonith-2.summary +++ b/pengine/test10/stonith-2.summary @@ -1,78 +1,78 @@ Current cluster status: Node sles-5 (434915c6-7b40-4d30-95ff-dc0ff3dc005a): UNCLEAN (offline) Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-5 (ocf::heartbeat:IPaddr): Stopped rsc_sles-6 (ocf::heartbeat:IPaddr): Started sles-6 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] Stopped: [ sles-5 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-3 ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave sles-6 ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave sles-6 ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave sles-3 Transition Summary: - * Fence sles-5 + * Fence (reboot) sles-5 * Start rsc_sles-5 (sles-6) Executing cluster transition: * Fencing sles-5 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: rsc_sles-5 start on sles-6 * Resource action: rsc_sles-5 monitor=5000 on sles-6 Revised cluster status: Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] OFFLINE: [ sles-5 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-5 (ocf::heartbeat:IPaddr): Started sles-6 rsc_sles-6 (ocf::heartbeat:IPaddr): Started sles-6 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] Stopped: [ sles-5 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-3 ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave sles-6 ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave sles-6 ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave sles-3 diff --git a/pengine/test10/stonith-3.summary b/pengine/test10/stonith-3.summary index cb25cc3e18..398b6bc043 100644 --- a/pengine/test10/stonith-3.summary +++ b/pengine/test10/stonith-3.summary @@ -1,37 +1,37 @@ Current cluster status: Node rh5node1 (286f4fcb-519e-4a23-b39f-9ab0017d0442): UNCLEAN (offline) Online: [ rh5node2 ] prmIpPostgreSQLDB (ocf::heartbeat:IPaddr): Stopped Clone Set: clnStonith [grpStonith] Stopped: [ rh5node1 rh5node2 ] Transition Summary: - * Fence rh5node1 + * Fence (reboot) rh5node1 * Start prmIpPostgreSQLDB (rh5node2) * Start prmStonith:0 (rh5node2) Executing cluster transition: * Resource action: prmIpPostgreSQLDB monitor on rh5node2 * Resource action: prmStonith:0 monitor on rh5node2 * Pseudo action: clnStonith_start_0 * Fencing rh5node1 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: prmIpPostgreSQLDB start on rh5node2 * Pseudo action: grpStonith:0_start_0 * Resource action: prmStonith:0 start on rh5node2 * Resource action: prmIpPostgreSQLDB monitor=30000 on rh5node2 * Pseudo action: grpStonith:0_running_0 * Pseudo action: clnStonith_running_0 Revised cluster status: Online: [ rh5node2 ] OFFLINE: [ rh5node1 ] prmIpPostgreSQLDB (ocf::heartbeat:IPaddr): Started rh5node2 Clone Set: clnStonith [grpStonith] Started: [ rh5node2 ] Stopped: [ rh5node1 ] diff --git a/pengine/test10/stonith-4.summary b/pengine/test10/stonith-4.summary index dc7cd89566..6e3e396457 100644 --- a/pengine/test10/stonith-4.summary +++ b/pengine/test10/stonith-4.summary @@ -1,40 +1,40 @@ Current cluster status: Node pcmk-10 (110): UNCLEAN (online) Node pcmk-11 (111): pending Node pcmk-2 (102): pending Node pcmk-3 (103): pending Node pcmk-5 (105): UNCLEAN (offline) Node pcmk-7 (107): UNCLEAN (online) Node pcmk-8 (108): UNCLEAN (offline) Node pcmk-9 (109): pending Online: [ pcmk-1 ] OFFLINE: [ pcmk-4 pcmk-6 ] Fencing (stonith:fence_xvm): Stopped Transition Summary: - * Fence pcmk-8 - * Fence pcmk-7 - * Fence pcmk-5 - * Fence pcmk-10 + * Fence (reboot) pcmk-8 + * Fence (reboot) pcmk-7 + * Fence (reboot) pcmk-5 + * Fence (reboot) pcmk-10 * Start Fencing (pcmk-1 - blocked) Executing cluster transition: * Fencing pcmk-10 (reboot) * Fencing pcmk-5 (reboot) * Fencing pcmk-7 (reboot) * Fencing pcmk-8 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: Node pcmk-11 (111): pending Node pcmk-2 (102): pending Node pcmk-3 (103): pending Node pcmk-9 (109): pending Online: [ pcmk-1 ] OFFLINE: [ pcmk-10 pcmk-4 pcmk-5 pcmk-6 pcmk-7 pcmk-8 ] Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/stop-failure-no-quorum.summary b/pengine/test10/stop-failure-no-quorum.summary index d864f1a4b2..02481265e2 100644 --- a/pengine/test10/stop-failure-no-quorum.summary +++ b/pengine/test10/stop-failure-no-quorum.summary @@ -1,46 +1,46 @@ Current cluster status: Node pcmk-2 (102): UNCLEAN (online) Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-2 clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked ) Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped Transition Summary: - * Fence pcmk-2 + * Fence (reboot) pcmk-2 * Start dlm:0 (pcmk-1 - blocked) * Stop clvm:0 (pcmk-2) * Start clvm:2 (pcmk-1 - blocked) * Start ClusterIP (pcmk-1 - blocked) * Start Fencing (pcmk-1 - blocked) Executing cluster transition: * Fencing pcmk-2 (reboot) * Pseudo action: stonith_complete * Pseudo action: clvm-clone_stop_0 * Pseudo action: clvm_stop_0 * Pseudo action: clvm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] OFFLINE: [ pcmk-2 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked ) Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/stop-failure-with-fencing.summary b/pengine/test10/stop-failure-with-fencing.summary index e01b6c49c9..e6c296b70c 100644 --- a/pengine/test10/stop-failure-with-fencing.summary +++ b/pengine/test10/stop-failure-with-fencing.summary @@ -1,45 +1,45 @@ Current cluster status: Node pcmk-2 (102): UNCLEAN (online) Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-2 Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped Transition Summary: - * Fence pcmk-2 + * Fence (reboot) pcmk-2 * Start dlm:0 (pcmk-1 - blocked) * Stop clvm:0 (pcmk-2) * Start clvm:1 (pcmk-1 - blocked) * Start ClusterIP (pcmk-1 - blocked) * Start Fencing (pcmk-1 - blocked) Executing cluster transition: * Resource action: Fencing monitor on pcmk-1 * Fencing pcmk-2 (reboot) * Pseudo action: stonith_complete * Pseudo action: clvm-clone_stop_0 * Pseudo action: clvm_stop_0 * Pseudo action: clvm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] OFFLINE: [ pcmk-2 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/systemhealth1.summary b/pengine/test10/systemhealth1.summary index 37b0b49a70..7301c6cbc8 100644 --- a/pengine/test10/systemhealth1.summary +++ b/pengine/test10/systemhealth1.summary @@ -1,26 +1,26 @@ Current cluster status: Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline) Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d - * Fence hs21c + * Fence (reboot) hs21d + * Fence (reboot) hs21c Executing cluster transition: * Fencing hs21d (reboot) * Fencing hs21c (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ hs21c hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealth2.summary b/pengine/test10/systemhealth2.summary index a37ce18034..c83a7e268a 100644 --- a/pengine/test10/systemhealth2.summary +++ b/pengine/test10/systemhealth2.summary @@ -1,36 +1,36 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d * Start stonith-1 (hs21c) * Start apache_1 (hs21c) * Start nfs_1 (hs21c) Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on hs21c * Resource action: apache_1 start on hs21c * Resource action: nfs_1 start on hs21c * Resource action: apache_1 monitor=10000 on hs21c * Resource action: nfs_1 monitor=20000 on hs21c Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Started hs21c apache_1 (ocf::heartbeat:apache): Started hs21c nfs_1 (ocf::heartbeat:Filesystem): Started hs21c diff --git a/pengine/test10/systemhealth3.summary b/pengine/test10/systemhealth3.summary index a37ce18034..c83a7e268a 100644 --- a/pengine/test10/systemhealth3.summary +++ b/pengine/test10/systemhealth3.summary @@ -1,36 +1,36 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d * Start stonith-1 (hs21c) * Start apache_1 (hs21c) * Start nfs_1 (hs21c) Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on hs21c * Resource action: apache_1 start on hs21c * Resource action: nfs_1 start on hs21c * Resource action: apache_1 monitor=10000 on hs21c * Resource action: nfs_1 monitor=20000 on hs21c Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Started hs21c apache_1 (ocf::heartbeat:apache): Started hs21c nfs_1 (ocf::heartbeat:Filesystem): Started hs21c diff --git a/pengine/test10/systemhealthm1.summary b/pengine/test10/systemhealthm1.summary index 37b0b49a70..7301c6cbc8 100644 --- a/pengine/test10/systemhealthm1.summary +++ b/pengine/test10/systemhealthm1.summary @@ -1,26 +1,26 @@ Current cluster status: Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline) Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d - * Fence hs21c + * Fence (reboot) hs21d + * Fence (reboot) hs21c Executing cluster transition: * Fencing hs21d (reboot) * Fencing hs21c (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ hs21c hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealthm2.summary b/pengine/test10/systemhealthm2.summary index a37ce18034..c83a7e268a 100644 --- a/pengine/test10/systemhealthm2.summary +++ b/pengine/test10/systemhealthm2.summary @@ -1,36 +1,36 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d * Start stonith-1 (hs21c) * Start apache_1 (hs21c) * Start nfs_1 (hs21c) Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on hs21c * Resource action: apache_1 start on hs21c * Resource action: nfs_1 start on hs21c * Resource action: apache_1 monitor=10000 on hs21c * Resource action: nfs_1 monitor=20000 on hs21c Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Started hs21c apache_1 (ocf::heartbeat:apache): Started hs21c nfs_1 (ocf::heartbeat:Filesystem): Started hs21c diff --git a/pengine/test10/systemhealthm3.summary b/pengine/test10/systemhealthm3.summary index b10d3f244c..fbcdec5b77 100644 --- a/pengine/test10/systemhealthm3.summary +++ b/pengine/test10/systemhealthm3.summary @@ -1,28 +1,28 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealthn1.summary b/pengine/test10/systemhealthn1.summary index 37b0b49a70..7301c6cbc8 100644 --- a/pengine/test10/systemhealthn1.summary +++ b/pengine/test10/systemhealthn1.summary @@ -1,26 +1,26 @@ Current cluster status: Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline) Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d - * Fence hs21c + * Fence (reboot) hs21d + * Fence (reboot) hs21c Executing cluster transition: * Fencing hs21d (reboot) * Fencing hs21c (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ hs21c hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealthn2.summary b/pengine/test10/systemhealthn2.summary index a37ce18034..c83a7e268a 100644 --- a/pengine/test10/systemhealthn2.summary +++ b/pengine/test10/systemhealthn2.summary @@ -1,36 +1,36 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d * Start stonith-1 (hs21c) * Start apache_1 (hs21c) * Start nfs_1 (hs21c) Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on hs21c * Resource action: apache_1 start on hs21c * Resource action: nfs_1 start on hs21c * Resource action: apache_1 monitor=10000 on hs21c * Resource action: nfs_1 monitor=20000 on hs21c Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Started hs21c apache_1 (ocf::heartbeat:apache): Started hs21c nfs_1 (ocf::heartbeat:Filesystem): Started hs21c diff --git a/pengine/test10/systemhealthn3.summary b/pengine/test10/systemhealthn3.summary index a37ce18034..c83a7e268a 100644 --- a/pengine/test10/systemhealthn3.summary +++ b/pengine/test10/systemhealthn3.summary @@ -1,36 +1,36 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d * Start stonith-1 (hs21c) * Start apache_1 (hs21c) * Start nfs_1 (hs21c) Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on hs21c * Resource action: apache_1 start on hs21c * Resource action: nfs_1 start on hs21c * Resource action: apache_1 monitor=10000 on hs21c * Resource action: nfs_1 monitor=20000 on hs21c Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Started hs21c apache_1 (ocf::heartbeat:apache): Started hs21c nfs_1 (ocf::heartbeat:Filesystem): Started hs21c diff --git a/pengine/test10/systemhealtho1.summary b/pengine/test10/systemhealtho1.summary index 37b0b49a70..7301c6cbc8 100644 --- a/pengine/test10/systemhealtho1.summary +++ b/pengine/test10/systemhealtho1.summary @@ -1,26 +1,26 @@ Current cluster status: Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline) Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d - * Fence hs21c + * Fence (reboot) hs21d + * Fence (reboot) hs21c Executing cluster transition: * Fencing hs21d (reboot) * Fencing hs21c (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ hs21c hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealtho2.summary b/pengine/test10/systemhealtho2.summary index b10d3f244c..fbcdec5b77 100644 --- a/pengine/test10/systemhealtho2.summary +++ b/pengine/test10/systemhealtho2.summary @@ -1,28 +1,28 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealtho3.summary b/pengine/test10/systemhealtho3.summary index b10d3f244c..fbcdec5b77 100644 --- a/pengine/test10/systemhealtho3.summary +++ b/pengine/test10/systemhealtho3.summary @@ -1,28 +1,28 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealthp1.summary b/pengine/test10/systemhealthp1.summary index 37b0b49a70..7301c6cbc8 100644 --- a/pengine/test10/systemhealthp1.summary +++ b/pengine/test10/systemhealthp1.summary @@ -1,26 +1,26 @@ Current cluster status: Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline) Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d - * Fence hs21c + * Fence (reboot) hs21d + * Fence (reboot) hs21c Executing cluster transition: * Fencing hs21d (reboot) * Fencing hs21c (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ hs21c hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/systemhealthp2.summary b/pengine/test10/systemhealthp2.summary index 6605e12edd..ea9465bb81 100644 --- a/pengine/test10/systemhealthp2.summary +++ b/pengine/test10/systemhealthp2.summary @@ -1,34 +1,34 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d * Start apache_1 (hs21c) * Start nfs_1 (hs21c) Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: apache_1 start on hs21c * Resource action: nfs_1 start on hs21c * Resource action: apache_1 monitor=10000 on hs21c * Resource action: nfs_1 monitor=20000 on hs21c Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Started hs21c nfs_1 (ocf::heartbeat:Filesystem): Started hs21c diff --git a/pengine/test10/systemhealthp3.summary b/pengine/test10/systemhealthp3.summary index b10d3f244c..fbcdec5b77 100644 --- a/pengine/test10/systemhealthp3.summary +++ b/pengine/test10/systemhealthp3.summary @@ -1,28 +1,28 @@ Current cluster status: Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline) Online: [ hs21c ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Fence hs21d + * Fence (reboot) hs21d Executing cluster transition: * Resource action: stonith-1 monitor on hs21c * Resource action: apache_1 monitor on hs21c * Resource action: nfs_1 monitor on hs21c * Fencing hs21d (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: Online: [ hs21c ] OFFLINE: [ hs21d ] stonith-1 (stonith:dummy): Stopped apache_1 (ocf::heartbeat:apache): Stopped nfs_1 (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/ticket-clone-21.summary b/pengine/test10/ticket-clone-21.summary index 50df6280f4..573f8c1a34 100644 --- a/pengine/test10/ticket-clone-21.summary +++ b/pengine/test10/ticket-clone-21.summary @@ -1,33 +1,33 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Fence node2 - * Fence node1 + * Fence (reboot) node2 + * Fence (reboot) node1 * Stop rsc_stonith (node1) * Stop rsc1:0 (node1) * Stop rsc1:1 (node2) Executing cluster transition: * Fencing node1 (reboot) * Pseudo action: rsc_stonith_stop_0 * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: clone1_stop_0 * Pseudo action: rsc1:1_stop_0 * Pseudo action: rsc1:0_stop_0 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ node1 node2 ] rsc_stonith (stonith:null): Stopped Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-9.summary b/pengine/test10/ticket-clone-9.summary index 50df6280f4..573f8c1a34 100644 --- a/pengine/test10/ticket-clone-9.summary +++ b/pengine/test10/ticket-clone-9.summary @@ -1,33 +1,33 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Fence node2 - * Fence node1 + * Fence (reboot) node2 + * Fence (reboot) node1 * Stop rsc_stonith (node1) * Stop rsc1:0 (node1) * Stop rsc1:1 (node2) Executing cluster transition: * Fencing node1 (reboot) * Pseudo action: rsc_stonith_stop_0 * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: clone1_stop_0 * Pseudo action: rsc1:1_stop_0 * Pseudo action: rsc1:0_stop_0 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ node1 node2 ] rsc_stonith (stonith:null): Stopped Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-group-21.summary b/pengine/test10/ticket-group-21.summary index eab14a9b9a..ff809ae3c6 100644 --- a/pengine/test10/ticket-group-21.summary +++ b/pengine/test10/ticket-group-21.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Fence node2 + * Fence (reboot) node2 * Stop rsc1 (node2) * Stop rsc2 (node2) Executing cluster transition: * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: group1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc1_stop_0 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-9.summary b/pengine/test10/ticket-group-9.summary index eab14a9b9a..ff809ae3c6 100644 --- a/pengine/test10/ticket-group-9.summary +++ b/pengine/test10/ticket-group-9.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Fence node2 + * Fence (reboot) node2 * Stop rsc1 (node2) * Stop rsc2 (node2) Executing cluster transition: * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: group1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc1_stop_0 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-master-21.summary b/pengine/test10/ticket-master-21.summary index a107a38b8d..c196c9ddf9 100644 --- a/pengine/test10/ticket-master-21.summary +++ b/pengine/test10/ticket-master-21.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Move rsc_stonith (Started node1 -> node2) * Demote rsc1:0 (Master -> Stopped node1) Executing cluster transition: * Pseudo action: rsc_stonith_stop_0 * Pseudo action: ms1_demote_0 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Resource action: rsc_stonith start on node2 * Pseudo action: rsc1:1_demote_0 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Pseudo action: rsc1:1_stop_0 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc_stonith (stonith:null): Started node2 Master/Slave Set: ms1 [rsc1] Slaves: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/ticket-master-9.summary b/pengine/test10/ticket-master-9.summary index a107a38b8d..c196c9ddf9 100644 --- a/pengine/test10/ticket-master-9.summary +++ b/pengine/test10/ticket-master-9.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: - * Fence node1 + * Fence (reboot) node1 * Move rsc_stonith (Started node1 -> node2) * Demote rsc1:0 (Master -> Stopped node1) Executing cluster transition: * Pseudo action: rsc_stonith_stop_0 * Pseudo action: ms1_demote_0 * Fencing node1 (reboot) * Pseudo action: stonith_complete * Resource action: rsc_stonith start on node2 * Pseudo action: rsc1:1_demote_0 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Pseudo action: rsc1:1_stop_0 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc_stonith (stonith:null): Started node2 Master/Slave Set: ms1 [rsc1] Slaves: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/ticket-primitive-21.summary b/pengine/test10/ticket-primitive-21.summary index 918b743f48..08a4860be3 100644 --- a/pengine/test10/ticket-primitive-21.summary +++ b/pengine/test10/ticket-primitive-21.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Fence node2 + * Fence (reboot) node2 * Stop rsc1 (node2) Executing cluster transition: * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-primitive-9.summary b/pengine/test10/ticket-primitive-9.summary index 918b743f48..08a4860be3 100644 --- a/pengine/test10/ticket-primitive-9.summary +++ b/pengine/test10/ticket-primitive-9.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Fence node2 + * Fence (reboot) node2 * Stop rsc1 (node2) Executing cluster transition: * Fencing node2 (reboot) * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/unfence-definition.summary b/pengine/test10/unfence-definition.summary index 03506a349f..9477a02cc6 100644 --- a/pengine/test10/unfence-definition.summary +++ b/pengine/test10/unfence-definition.summary @@ -1,67 +1,65 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: - * Fence virt-4 - * Fence virt-4 - * Fence virt-3 - * Fence virt-2 - * Fence virt-1 + * Fence (reboot) virt-4 + * Fence (on) virt-3 + * Fence (on) virt-1 * Restart fencing (Started virt-1) * Restart dlm:0 (Started virt-1) * Start dlm:2 (virt-3) * Restart clvmd:0 (Started virt-1) * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: dlm monitor on virt-3 * Resource action: clvmd monitor on virt-2 * Resource action: clvmd monitor on virt-3 * Pseudo action: clvmd-clone_stop_0 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Resource action: fencing monitor on virt-3 * Resource action: fencing stop on virt-1 * Resource action: clvmd stop on virt-1 * Pseudo action: clvmd-clone_stopped_0 * Resource action: fencing delete on virt-1 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-1 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: dlm-clone_start_0 * Fencing virt-1 (on) * Pseudo action: all_stopped * Resource action: fencing start on virt-1 * Resource action: dlm start on virt-1 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-1 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/unfence-parameters.summary b/pengine/test10/unfence-parameters.summary index 5cc4a4b6b6..a713cd5803 100644 --- a/pengine/test10/unfence-parameters.summary +++ b/pengine/test10/unfence-parameters.summary @@ -1,70 +1,69 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: - * Fence virt-4 - * Fence virt-4 - * Fence virt-3 - * Fence virt-2 - * Fence virt-1 + * Fence (reboot) virt-4 + * Fence (on) virt-3 + * Fence (on) virt-2 + * Fence (on) virt-1 * Restart fencing (Started virt-1) * Restart dlm:0 (Started virt-1) * Restart dlm:1 (Started virt-2) * Start dlm:2 (virt-3) * Restart clvmd:0 (Started virt-1) * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: dlm monitor on virt-3 * Resource action: clvmd monitor on virt-2 * Resource action: clvmd monitor on virt-3 * Pseudo action: clvmd-clone_stop_0 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Resource action: fencing monitor on virt-3 * Resource action: clvmd stop on virt-1 * Pseudo action: clvmd-clone_stopped_0 * Resource action: fencing stop on virt-1 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-2 * Fencing virt-2 (on) * Resource action: dlm stop on virt-1 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: dlm-clone_start_0 * Fencing virt-1 (on) * Pseudo action: all_stopped * Resource action: fencing start on virt-1 * Resource action: dlm start on virt-1 * Resource action: dlm start on virt-2 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-1 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/unfence-startup.summary b/pengine/test10/unfence-startup.summary index 6cd38ad3bc..c3f1be4719 100644 --- a/pengine/test10/unfence-startup.summary +++ b/pengine/test10/unfence-startup.summary @@ -1,52 +1,49 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: - * Fence virt-4 - * Fence virt-4 - * Fence virt-3 - * Fence virt-2 - * Fence virt-1 + * Fence (reboot) virt-4 + * Fence (on) virt-3 * Start dlm:2 (virt-3) * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: dlm monitor on virt-3 * Pseudo action: dlm-clone_start_0 * Resource action: clvmd monitor on virt-2 * Resource action: clvmd monitor on virt-3 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Pseudo action: all_stopped * Resource action: fencing monitor on virt-3 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/whitebox-fail1.summary b/pengine/test10/whitebox-fail1.summary index 5f70a66fbc..57d72ba215 100644 --- a/pengine/test10/whitebox-fail1.summary +++ b/pengine/test10/whitebox-fail1.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): FAILED lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: - * Fence lxc1 (resource: container1) + * Fence (reboot) lxc1 (resource: container1) * Recover container1 (Started 18node2) * Recover M:4 (Started lxc1) * Recover B (Started lxc1) * Restart lxc1 (Started 18node2) Executing cluster transition: * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: container1 start on 18node2 * Pseudo action: M-clone_stop_0 * Pseudo action: B_stop_0 * Resource action: lxc1 start on 18node2 * Resource action: lxc1 monitor=30000 on 18node2 * Pseudo action: M_stop_0 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: B start on lxc1 * Pseudo action: all_stopped * Resource action: M start on lxc1 * Pseudo action: M-clone_running_0 * Resource action: B monitor=10000 on lxc1 * Resource action: M monitor=10000 on lxc1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-fail2.summary b/pengine/test10/whitebox-fail2.summary index 2922f16ea7..bf1268359d 100644 --- a/pengine/test10/whitebox-fail2.summary +++ b/pengine/test10/whitebox-fail2.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): FAILED lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: - * Fence lxc1 (resource: container1) + * Fence (reboot) lxc1 (resource: container1) * Recover container1 (Started 18node2) * Recover M:4 (Started lxc1) * Recover B (Started lxc1) * Recover lxc1 (Started 18node2) Executing cluster transition: * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: container1 start on 18node2 * Pseudo action: M-clone_stop_0 * Pseudo action: B_stop_0 * Resource action: lxc1 start on 18node2 * Resource action: lxc1 monitor=30000 on 18node2 * Pseudo action: M_stop_0 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: B start on lxc1 * Pseudo action: all_stopped * Resource action: M start on lxc1 * Pseudo action: M-clone_running_0 * Resource action: B monitor=10000 on lxc1 * Resource action: M monitor=10000 on lxc1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-imply-stop-on-fence.summary b/pengine/test10/whitebox-imply-stop-on-fence.summary index 31cc4a51b3..2fb20a63a8 100644 --- a/pengine/test10/whitebox-imply-stop-on-fence.summary +++ b/pengine/test10/whitebox-imply-stop-on-fence.summary @@ -1,96 +1,96 @@ Current cluster status: Node kiff-01 (1): UNCLEAN (offline) Online: [ kiff-02 ] Containers: [ lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-02:R-lxc-02_kiff-02 ] fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN) Clone Set: dlm-clone [dlm] dlm (ocf::pacemaker:controld): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: clvmd-clone [clvmd] clvmd (ocf::heartbeat:clvm): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: shared0-clone [shared0] shared0 (ocf::heartbeat:Filesystem): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN) R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN) R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 vm-fs (ocf::heartbeat:Filesystem): FAILED lxc-01_kiff-01 Transition Summary: - * Fence lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) - * Fence lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) - * Fence kiff-01 + * Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) + * Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) + * Fence (reboot) kiff-01 * Move fence-kiff-02 (Started kiff-01 -> kiff-02) * Stop dlm:0 (kiff-01) * Stop clvmd:0 (kiff-01) * Stop shared0:0 (kiff-01) * Recover R-lxc-01_kiff-01 (Started kiff-01 -> kiff-02) * Move R-lxc-02_kiff-01 (Started kiff-01 -> kiff-02) * Recover vm-fs (Started lxc-01_kiff-01) * Move lxc-01_kiff-01 (Started kiff-01 -> kiff-02) * Move lxc-02_kiff-01 (Started kiff-01 -> kiff-02) Executing cluster transition: * Pseudo action: fence-kiff-02_stop_0 * Fencing kiff-01 (reboot) * Pseudo action: lxc-01_kiff-01_stop_0 * Pseudo action: lxc-02_kiff-01_stop_0 * Pseudo action: R-lxc-01_kiff-01_stop_0 * Pseudo action: R-lxc-02_kiff-01_stop_0 * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01 * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01 * Pseudo action: stonith_complete * Pseudo action: shared0-clone_stop_0 * Resource action: R-lxc-01_kiff-01 start on kiff-02 * Resource action: R-lxc-02_kiff-01 start on kiff-02 * Pseudo action: vm-fs_stop_0 * Resource action: lxc-01_kiff-01 start on kiff-02 * Resource action: lxc-02_kiff-01 start on kiff-02 * Pseudo action: shared0_stop_0 * Pseudo action: shared0-clone_stopped_0 * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02 * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02 * Resource action: vm-fs start on lxc-01_kiff-01 * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 * Pseudo action: clvmd-clone_stop_0 * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 * Pseudo action: clvmd_stop_0 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Pseudo action: dlm_stop_0 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: all_stopped * Resource action: fence-kiff-02 start on kiff-02 * Resource action: fence-kiff-02 monitor=60000 on kiff-02 Revised cluster status: Online: [ kiff-02 ] OFFLINE: [ kiff-01 ] Containers: [ lxc-01_kiff-01:R-lxc-01_kiff-01 lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-01:R-lxc-02_kiff-01 lxc-02_kiff-02:R-lxc-02_kiff-02 ] fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02 Clone Set: dlm-clone [dlm] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: clvmd-clone [clvmd] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: shared0-clone [shared0] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 vm-fs (ocf::heartbeat:Filesystem): Started lxc-01_kiff-01 diff --git a/pengine/test10/whitebox-ms-ordering.summary b/pengine/test10/whitebox-ms-ordering.summary index fcdef66e79..2230d0f827 100644 --- a/pengine/test10/whitebox-ms-ordering.summary +++ b/pengine/test10/whitebox-ms-ordering.summary @@ -1,73 +1,73 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] shooter (stonith:fence_xvm): Started 18node2 container1 (ocf::heartbeat:VirtualDomain): FAILED container2 (ocf::heartbeat:VirtualDomain): FAILED Master/Slave Set: lxc-ms-master [lxc-ms] Stopped: [ 18node1 18node2 18node3 ] Transition Summary: - * Fence lxc2 (resource: container2) - * Fence lxc1 (resource: container1) + * Fence (reboot) lxc2 (resource: container2) + * Fence (reboot) lxc1 (resource: container1) * Start container1 (18node1) * Start container2 (18node1) * Recover lxc-ms:0 (Master lxc1) * Recover lxc-ms:1 (Slave lxc2) * Start lxc1 (18node1) * Start lxc2 (18node1) Executing cluster transition: * Resource action: container1 monitor on 18node3 * Resource action: container1 monitor on 18node2 * Resource action: container1 monitor on 18node1 * Resource action: container2 monitor on 18node3 * Resource action: container2 monitor on 18node2 * Resource action: container2 monitor on 18node1 * Resource action: lxc-ms monitor on 18node3 * Resource action: lxc-ms monitor on 18node2 * Resource action: lxc-ms monitor on 18node1 * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on 18node3 * Resource action: lxc1 monitor on 18node2 * Resource action: lxc1 monitor on 18node1 * Resource action: lxc2 monitor on 18node3 * Resource action: lxc2 monitor on 18node2 * Resource action: lxc2 monitor on 18node1 * Pseudo action: stonith-lxc2-reboot on lxc2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: container1 start on 18node1 * Resource action: container2 start on 18node1 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc1 start on 18node1 * Resource action: lxc2 start on 18node1 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 monitor=30000 on 18node1 * Resource action: lxc2 monitor=30000 on 18node1 * Pseudo action: all_stopped * Resource action: lxc-ms start on lxc1 * Resource action: lxc-ms start on lxc2 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc-ms monitor=10000 on lxc2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] shooter (stonith:fence_xvm): Started 18node2 container1 (ocf::heartbeat:VirtualDomain): Started 18node1 container2 (ocf::heartbeat:VirtualDomain): Started 18node1 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] diff --git a/pengine/test10/whitebox-unexpectedly-running.summary b/pengine/test10/whitebox-unexpectedly-running.summary index ed0a5bdd9e..eabeb4de92 100644 --- a/pengine/test10/whitebox-unexpectedly-running.summary +++ b/pengine/test10/whitebox-unexpectedly-running.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ 18builder ] FAKE (ocf::pacemaker:Dummy): FAILED 18builder Transition Summary: - * Fence remote1 (resource: FAKE) + * Fence (reboot) remote1 (resource: FAKE) * Recover FAKE (Started 18builder) * Start remote1 (18builder) Executing cluster transition: * Resource action: FAKE stop on 18builder * Resource action: remote1 monitor on 18builder * Pseudo action: stonith-remote1-reboot on remote1 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: FAKE start on 18builder * Resource action: remote1 start on 18builder * Resource action: FAKE monitor=60000 on 18builder * Resource action: remote1 monitor=30000 on 18builder Revised cluster status: Online: [ 18builder ] Containers: [ remote1:FAKE ] FAKE (ocf::pacemaker:Dummy): Started 18builder