diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index bc343232d2..222a3e2867 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2261 +1,2262 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include pe_working_set_t *pe_dataset = NULL; extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled); /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return TRUE if node can be fenced, FALSE otherwise * * \note This function should only be called for cluster nodes and baremetal * remote nodes; guest nodes are fenced by stopping their container * resource, so fence execution requirements do not apply to them. */ bool pe_can_fence(pe_working_set_t * data_set, node_t *node) { if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) { return FALSE; /* Turned off */ } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) { return FALSE; /* No devices */ } else if (is_set(data_set->flags, pe_flag_have_quorum)) { return TRUE; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return TRUE; } else if(node == NULL) { return FALSE; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return TRUE; } crm_trace("Cannot fence %s", node->details->uname); return FALSE; } node_t * node_copy(const node_t *this_node) { node_t *new_node = NULL; CRM_CHECK(this_node != NULL, return NULL); new_node = calloc(1, sizeof(node_t)); CRM_ASSERT(new_node != NULL); crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = merge_weights(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { node_t *new_node = node_copy(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } GHashTable * node_hash_from_list(GListPtr list) { GListPtr gIter = list; GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *n = node_copy(node); g_hash_table_insert(result, (gpointer) n->details->id, n); } return result; } GListPtr node_list_dup(GListPtr list1, gboolean reset, gboolean filter) { GListPtr result = NULL; GListPtr gIter = list1; for (; gIter != NULL; gIter = gIter->next) { node_t *new_node = NULL; node_t *this_node = (node_t *) gIter->data; if (filter && this_node->weight < 0) { continue; } new_node = node_copy(this_node); if (reset) { new_node->weight = 0; } if (new_node != NULL) { result = g_list_prepend(result, new_node); } } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { const node_t *node_a = a; const node_t *node_b = b; return strcmp(node_a->details->uname, node_b->details->uname); } void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes) { GHashTable *hash = nodes; GHashTableIter iter; node_t *node = NULL; if (rsc) { hash = rsc->allowed_nodes; } if (rsc && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't show the allocation scores for orphans */ return; } if (level == 0) { char score[128]; int len = sizeof(score); /* For now we want this in sorted order to keep the regression tests happy */ GListPtr gIter = NULL; GListPtr list = g_hash_table_get_values(hash); list = g_list_sort(list, sort_node_uname); gIter = list; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } else if (hash) { char score[128]; int len = sizeof(score); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { do_crm_log_alias(LOG_TRACE, file, function, line, "%s: %s allocation score on %s: %s", comment, rsc->id, node->details->uname, score); } else { do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment, node->details->uname, score); } } } if (rsc && rsc->children) { GListPtr gIter = NULL; gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; dump_node_scores_worker(level, file, function, line, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; int len = 0; char *new_text = NULL; len = strlen(*dump_text) + strlen(" ") + strlen(key) + strlen("=") + strlen(value) + 1; new_text = calloc(1, len); sprintf(new_text, "%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, node_t * node) { int len = 0; char *dump_text = NULL; len = strlen(comment) + strlen(": ") + strlen(node->details->uname) + strlen(" capacity:") + 1; dump_text = calloc(1, len); sprintf(dump_text, "%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node) { int len = 0; char *dump_text = NULL; len = strlen(comment) + strlen(": ") + strlen(rsc->id) + strlen(" utilization on ") + strlen(node->details->uname) + strlen(":") + 1; dump_text = calloc(1, len); sprintf(dump_text, "%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } action_t * custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { if (g_list_length(possible_matches) > 1) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found existing action (%d) %s for %s on %s", action->id, task, rsc ? rsc->id : "", on_node ? on_node->details->uname : ""); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating%s action %d: %s for %s on %s %d", optional ? "" : " mandatory", data_set->action_id, key, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", optional); } action = calloc(1, sizeof(action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; CRM_ASSERT(task != NULL); action->task = strdup(task); if (on_node) { action->node = node_copy(on_node); } action->uuid = strdup(key); pe_set_action_bit(action, pe_action_runnable); if (optional) { pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); } else { pe_clear_action_bit(action, pe_action_optional); pe_rsc_trace(rsc, "Unset optional on %s", action->uuid); } /* Implied by calloc()... action->actions_before = NULL; action->actions_after = NULL; action->pseudo = FALSE; action->dumped = FALSE; action->processed = FALSE; action->seen_count = 0; */ action->extra = crm_str_table_new(); action->meta = crm_str_table_new(); action->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); action->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } if (save_action) { pe_rsc_trace(rsc, "Action %d created", action->id); } } if (optional == FALSE) { pe_rsc_trace(rsc, "Unset optional on %s", action->uuid); pe_clear_action_bit(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { pe_set_action_bit(action, pe_action_have_node_attrs); unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS, action->node->details->attrs, action->extra, NULL, FALSE, data_set->now); } if (is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid); pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "node availability", pe_action_runnable, TRUE); } else if (is_not_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL) == NULL) { crm_debug("Action %s (unmanaged)", action->uuid); pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); /* action->runnable = FALSE; */ } else if (action->node->details->online == FALSE && (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)", action->uuid, action->node->details->uname); if (is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable"); } } else if (action->node->details->pending) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid); free(action->reason); action->reason = NULL; pe_set_action_bit(action, pe_action_runnable); #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_stop) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)", action->node->details->uname, action->uuid); } } else { pe_rsc_trace(rsc, "Action %s is runnable", action->uuid); free(action->reason); action->reason = NULL; pe_set_action_bit(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: set_bit(rsc->flags, pe_rsc_stopping); break; case start_rsc: clear_bit(rsc->flags, pe_rsc_starting); if (is_set(action->flags, pe_action_runnable)) { set_bit(rsc->flags, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static const char * unpack_operation_on_fail(action_t * action) { const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id); return NULL; } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval = NULL; const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = __xml_first_child(action->rsc->ops_xml); operation && !value; operation = __xml_next_element(operation)) { if (!crm_str_eq((const char *)operation->name, "op", TRUE)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) { continue; } else if (crm_get_interval(interval) <= 0) { continue; } value = on_fail; } } return value; } static xmlNode * find_min_interval_mon(resource_t * rsc, gboolean include_disabled) { int number = 0; int min_interval = -1; const char *name = NULL; const char *value = NULL; const char *interval = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (safe_str_neq(name, RSC_STATUS)) { continue; } number = crm_get_interval(interval); if (number < 0) { continue; } if (min_interval < 0 || number < min_interval) { min_interval = number; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } static int unpack_interval_origin(const char *value, GHashTable *meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now) { int start_delay = 0; if (interval > 0 && value) { crm_time_t *origin = crm_time_new(value); if (origin && now) { crm_time_t *delay = NULL; int rc = crm_time_compare(origin, now); long long delay_s = 0; int interval_s = (interval / 1000); crm_trace("Origin: %s, interval: %d", value, interval_s); /* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */ while(rc > 0) { crm_time_add_seconds(origin, -interval_s); rc = crm_time_compare(origin, now); } /* Now find the first "multiple" that occurs after 'now' */ while (rc < 0) { crm_time_add_seconds(origin, interval_s); rc = crm_time_compare(origin, now); } delay = crm_time_calculate_duration(origin, now); crm_time_log(LOG_TRACE, "origin", origin, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "now", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration); delay_s = crm_time_get_seconds(delay); CRM_CHECK(delay_s >= 0, delay_s = 0); start_delay = delay_s * 1000; if (xml_obj) { crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj)); } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } crm_time_free(origin); crm_time_free(delay); } else if (!origin && xml_obj) { crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s", ID(xml_obj), value); } } return start_delay; } static int unpack_timeout(const char *value, action_t *action, xmlNode *xml_obj, unsigned long long interval, GHashTable *config_hash) { int timeout = 0; if (value == NULL && xml_obj == NULL && action && safe_str_eq(action->task, RSC_STATUS) && interval == 0) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); pe_rsc_trace(action->rsc, "\t%s uses the timeout value '%s' from the minimum interval monitor", action->uuid, value); } } if (value == NULL && config_hash) { value = pe_pref(config_hash, "default-action-timeout"); } timeout = crm_get_msec(value); if (timeout < 0) { timeout = 0; } return timeout; } static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = __xml_first_child(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) { for (attr = __xml_first_child(attrs); attr != NULL; attr = __xml_next_element(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) { int start_delay = unpack_interval_origin(value, NULL, xml_obj, interval, now); crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) { int timeout = unpack_timeout(value, NULL, NULL, 0, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout); } } } } void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set) { unsigned long long interval = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; const char *field = NULL; CRM_CHECK(action->rsc != NULL, return); unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); if (xml_obj) { xmlAttrPtr xIter = NULL; for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, action->meta, NULL, FALSE, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, action->versioned_parameters, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->versioned_meta, data_set->now); g_hash_table_remove(action->meta, "id"); field = XML_LRM_ATTR_INTERVAL; value = g_hash_table_lookup(action->meta, field); if (value != NULL) { interval = crm_get_interval(value); if (interval > 0) { value_ms = crm_itoa(interval); g_hash_table_replace(action->meta, strdup(field), value_ms); } else { g_hash_table_remove(action->meta, field); } } /* @COMPAT data sets < 1.1.10 ("requires" on start action not resource) */ value = g_hash_table_lookup(action->meta, "requires"); if (safe_str_neq(action->task, RSC_START) && safe_str_neq(action->task, RSC_PROMOTE)) { action->needs = rsc_req_nothing; value = "nothing (not start/promote)"; } else if (safe_str_eq(value, "nothing")) { action->needs = rsc_req_nothing; } else if (safe_str_eq(value, "quorum")) { action->needs = rsc_req_quorum; } else if (safe_str_eq(value, "unfencing")) { action->needs = rsc_req_stonith; set_bit(action->rsc->flags, pe_rsc_needs_unfencing); if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_notice("%s requires unfencing but fencing is disabled", action->rsc->id); } } else if (is_set(data_set->flags, pe_flag_stonith_enabled) && safe_str_eq(value, "fencing")) { action->needs = rsc_req_stonith; if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_notice("%s requires fencing but fencing is disabled", action->rsc->id); } /* @COMPAT end compatibility code */ } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing (resource)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum (resource)"; } else { action->needs = rsc_req_nothing; value = "nothing (resource)"; } pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (safe_str_eq(value, "block")) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); } else if (safe_str_eq(value, "fence")) { action->on_fail = action_fail_fence; value = "node fencing"; if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense"); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (safe_str_eq(value, "standby")) { action->on_fail = action_fail_standby; value = "node standby"; } else if (safe_str_eq(value, "ignore") || safe_str_eq(value, "nothing")) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (safe_str_eq(value, "migrate")) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (safe_str_eq(value, "stop")) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (safe_str_eq(value, "restart")) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (safe_str_eq(value, "restart-container")) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* for baremetal remote nodes, ensure that any failure that results in * dropping an active connection to a remote node results in fencing of * the remote node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) && (is_rsc_baremetal_remote_node(action->rsc, data_set) && !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && interval == 0) && (safe_str_neq(action->task, CRMD_ACTION_START)))) { if (!is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged baremetal remote node (enforcing default)"; } else { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence baremetal remote node (default)"; } else { value = "recover baremetal remote node connection (default)"; } if (action->rsc->remote_reconnect_interval) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task, role2text(action->fail_role)); field = XML_OP_ATTR_START_DELAY; value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); unpack_interval_origin(value, action->meta, xml_obj, interval, data_set->now); } field = XML_ATTR_TIMEOUT; value = g_hash_table_lookup(action->meta, field); timeout = unpack_timeout(value, action, xml_obj, interval, data_set->config_hash); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout)); unpack_versioned_meta(action->versioned_meta, xml_obj, interval, data_set->now); } static xmlNode * find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled) { unsigned long long number = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } number = crm_get_interval(interval); match_key = generate_op_key(rsc->id, name, number); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = generate_op_key(rsc->clone_name, name, number); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = generate_op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = generate_op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; print_resource(LOG_DEBUG_4, "\t\t", rsc, FALSE); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details) { long options = pe_print_log | pe_print_pending; if (rsc == NULL) { do_crm_log(log_level - 1, "%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } if (details) { options |= pe_print_details; } rsc->fns->print(rsc, pre_text, options, &log_level); } void pe_free_action(action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } if (action->versioned_parameters) { free_xml(action->versioned_parameters); } if (action->versioned_meta) { free_xml(action->versioned_meta); } free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); if (value == NULL) { /* skip */ } else if (safe_str_eq(value, "0")) { /* skip */ } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; break; default: break; } } return task; } action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (uuid != NULL && safe_str_neq(uuid, action->uuid)) { continue; } else if (task != NULL && safe_str_neq(task, action->task)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(key, action->uuid)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = node_copy(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GListPtr find_actions_exact(GListPtr input, const char *key, node_t * on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("Matching %s against %s", key, action->uuid); if (safe_str_neq(key, action->uuid)) { crm_trace("Key mismatch: %s vs. %s", key, action->uuid); continue; } else if (on_node == NULL || action->node == NULL) { crm_trace("on_node=%p, action->node=%p", on_node, action->node); continue; } else if (safe_str_eq(on_node->details->id, action->node->details->id)) { result = g_list_prepend(result, action); } crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id); } return result; } static void resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag) { node_t *match = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = merge_weights(match->weight, score); } void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node_iter = (node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value_const(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value_const(xml_b, XML_ATTR_ID); if (safe_str_eq(a_xml_id, b_xml_id)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unliklely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_const_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_const_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ int last_a = -1; int last_b = -1; crm_element_value_const_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_const_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %d vs %d", last_a, last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; int dummy = -1; const char *a_magic = crm_element_value_const(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value_const(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic a"); } if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (ie. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (value == NULL || safe_str_eq("started", value) || safe_str_eq("default", value)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (uber_parent(rsc)->variant == pe_master) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { crm_config_err("%s is not part of a master/slave resource, a %s of '%s' makes no sense", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *after = (action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = NULL; /* order |= pe_order_implies_then; */ /* order ^= pe_order_implies_then; */ wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_pseudo); set_bit(op->flags, pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { ticket_t *ticket = NULL; if (ticket_id == NULL || strlen(ticket_id) == 0) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = crm_str_table_new(); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } static void filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) { int len = 0; char *name = NULL; char *match = NULL; if (param_set == NULL) { return; } if (param_set && param_string) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; name = NULL; len = strlen(prop_name) + 3; name = malloc(len); if(name) { sprintf(name, " %s ", prop_name); name[len - 1] = 0; match = strstr(param_string, name); } if (need_present && match == NULL) { crm_trace("%s not found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } else if (need_present == FALSE && match) { crm_trace("%s found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } free(name); } } } bool fix_remote_addr(resource_t * rsc) { const char *name; const char *value; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; const char *value_list[] = { "remote", "ocf", "pacemaker" }; if(rsc == NULL) { return FALSE; } name = "addr"; value = g_hash_table_lookup(rsc->parameters, name); if (safe_str_eq(value, "#uname") == FALSE) { return FALSE; } for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) { name = attr_list[lpc]; value = crm_element_value(rsc->xml, attr_list[lpc]); if (safe_str_eq(value, value_list[lpc]) == FALSE) { return FALSE; } } return TRUE; } static void append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) { GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { crm_xml_add(params, key, value); } g_hash_table_destroy(hash); } static op_digest_cache_t * rsc_action_digest(resource_t * rsc, const char *task, const char *key, node_t * node, xmlNode * xml_op, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; data = g_hash_table_lookup(node->details->digest_cache, key); if (data == NULL) { GHashTable *local_rsc_params = crm_str_table_new(); action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); const char *op_version; const char *ra_version = NULL; const char *restart_list = NULL; const char *secure_list = " passwd password "; data = calloc(1, sizeof(op_digest_cache_t)); CRM_ASSERT(data != NULL); get_rsc_attributes(local_rsc_params, rsc, node, data_set); pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); if (fix_remote_addr(rsc)) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside crm_xml_add(data->params_all, "addr", node->details->uname); crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); g_hash_table_foreach(action->extra, hash2field, data->params_all); g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); g_hash_table_foreach(action->meta, hash2metafield, data->params_all); if(xml_op) { secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); } else { op_version = CRM_FEATURE_SET; } append_versioned_params(local_versioned_params, ra_version, data->params_all); append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); append_versioned_params(action->versioned_parameters, ra_version, data->params_all); filter_action_parameters(data->params_all, op_version); g_hash_table_destroy(local_rsc_params); pe_free_action(action); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); if (is_set(data_set->flags, pe_flag_sanitized)) { data->params_secure = copy_xml(data->params_all); if(secure_list) { filter_parameters(data->params_secure, secure_list, FALSE); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { data->params_restart = copy_xml(data->params_all); if (restart_list) { filter_parameters(data->params_restart, restart_list, TRUE); } data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); } g_hash_table_insert(node->details->digest_cache, strdup(key), data); } return data; } op_digest_cache_t * rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; char *key = NULL; int interval = 0; const char *interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_all; const char *digest_restart; CRM_ASSERT(node != NULL); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); interval = crm_parse_int(interval_s, "0"); key = generate_op_key(rsc->id, task, interval); data = rsc_action_digest(rsc, task, key, node, xml_op, data_set); data->rc = RSC_DIGEST_MATCH; if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { data->rc = RSC_DIGEST_RESTART; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { data->rc = RSC_DIGEST_ALL; } free(key); return data; } #define STONITH_DIGEST_TASK "stonith-on" static op_digest_cache_t * fencing_action_digest_cmp(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0); op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, data_set); const char *digest_all = g_hash_table_lookup(node->details->attrs, "digests-all"); const char *digest_secure = g_hash_table_lookup(node->details->attrs, "digests-secure"); /* No 'reloads' for fencing device changes * * We use the resource id + agent + digest so that we can detect * changes to the agent and/or the parameters used */ char *search_all = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); char *search_secure = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); data->rc = RSC_DIGEST_ALL; if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strstr(digest_all, search_all)) { data->rc = RSC_DIGEST_MATCH; } else if(digest_secure && data->digest_secure_calc) { if(strstr(digest_secure, search_secure)) { fprintf(stdout, "Only 'private' parameters to %s for unfencing %s changed\n", rsc->id, node->details->uname); data->rc = RSC_DIGEST_MATCH; } } if (data->rc == RSC_DIGEST_ALL && is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) { fprintf(stdout, "Parameters to %s for unfencing %s changed, try '%s:%s:%s'\n", rsc->id, node->details->uname, rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); } free(key); free(search_all); free(search_secure); return data; } const char *rsc_printable_id(resource_t *rsc) { if (is_not_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void clear_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; clear_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; clear_bit_recursive(child_rsc, flag); } } void set_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; set_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_bit_recursive(child_rsc, flag); } } static GListPtr find_unfencing_devices(GListPtr candidates, GListPtr matches) { for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { resource_t *candidate = gIter->data; const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES); const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); if(candidate->children) { matches = find_unfencing_devices(candidate->children, matches); } else if (is_not_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) { matches = g_list_prepend(matches, candidate); } } return matches; } #define STONITH_DIGEST_TASK "stonith-on" action_t * pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set) { char *op_key = NULL; action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, op_key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if(is_remote_node(node) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the check_action_definition() based stuff works fine. * * Use "stonith-on" to avoid creating cache entries for * operations check_action_definition() would look for. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = malloc(max); char *digests_secure = malloc(max); GListPtr matches = find_unfencing_devices(data_set->resources, NULL); for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) { resource_t *match = gIter->data; op_digest_cache_t *data = fencing_action_digest_cmp(match, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (is_set(data_set->flags, pe_flag_sanitized)) { /* Extra detail for those running from the commandline */ fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_secure_calc); } add_hash_param(stonith_op->meta, strdup("digests-all"), digests_all); add_hash_param(stonith_op->meta, strdup("digests-secure"), digests_secure); } } else { free(op_key); } if(optional == FALSE && pe_can_fence(data_set, node)) { pe_action_required(stonith_op, NULL, reason); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set) { if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { action_t *unfence = pe_fence_op(node, "on", FALSE, reason, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (crm_str_eq(existing_ref, obj_ref, TRUE)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite) { bool unset = FALSE; bool update = FALSE; const char *change = NULL; if(is_set(flags, pe_action_runnable)) { unset = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_optional)) { unset = TRUE; change = "required"; } else if(is_set(flags, pe_action_failure_is_fatal)) { change = "fatally failed"; } else if(is_set(flags, pe_action_migrate_runnable)) { unset = TRUE; overwrite = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_dangle)) { change = "dangling"; } else if(is_set(flags, pe_action_requires_any)) { change = "required"; } else { crm_err("Unknown flag change to %s by %s: 0x%.16x", flags, action->uuid, reason->uuid); } if(unset) { if(is_set(action->flags, flags)) { action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } else { if(is_not_set(action->flags, flags)) { action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } if((change && update) || text) { char *reason_text = NULL; if(reason == NULL) { pe_action_set_reason(action, text, overwrite); } else if(reason->rsc == NULL) { reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:""); } else { reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA"); } if(reason_text && action->rsc != reason->rsc) { pe_action_set_reason(action, reason_text, overwrite); } free(reason_text); } } void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { if(action->reason == NULL || overwrite) { free(action->reason); if(reason) { + crm_trace("Set %s reason to '%s'", action->uuid, reason); action->reason = strdup(reason); } else { action->reason = NULL; } } } diff --git a/pengine/native.c b/pengine/native.c index 63666fdfad..5281f36507 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,3330 +1,3359 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include /* #define DELETE_THEN_REFRESH 1 // The crmd will remove the resource from the CIB itself, making this redundant */ #define INFINITY_HACK (INFINITY * -100) #define VARIANT_NATIVE 1 #include gboolean update_action(action_t * then); void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set); gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); /* *INDENT-OFF* */ enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, }, }; /* *INDENT-ON* */ static action_t * get_first_named_action(resource_t * rsc, const char *action, gboolean only_valid, node_t * current); static gboolean native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; gboolean result = FALSE; process_utilization(rsc, &prefer, data_set); length = g_hash_table_size(rsc->allowed_nodes); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to ? TRUE : FALSE; } if(rsc->allowed_nodes) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, g_list_nth_data(rsc->running_on, 0)); } if (prefer) { node_t *best = g_list_nth_data(nodes, 0); chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen && chosen->weight >= 0 && chosen->weight >= best->weight /* Possible alternative: (chosen->weight >= INFINITY || best->weight < INFINITY) */ && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Using preferred node %s for %s instead of choosing from %d candidates", chosen->details->uname, rsc->id, length); } else if (chosen && chosen->weight < 0) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else if (chosen && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); } } if (chosen == NULL && rsc->allowed_nodes) { chosen = g_list_nth_data(nodes, 0); pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates", chosen ? chosen->details->uname : "", rsc->id, length); if (chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if (running && can_run_resources(running) == FALSE) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, running->details->uname); running = NULL; } for (lpc = 1; lpc < length && running; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if (tmp->weight == chosen->weight) { multiple++; if (tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if (multiple > 1) { int log_level = LOG_INFO; static char score[33]; score2char_stack(chosen->weight, score, sizeof(score)); if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } static int node_list_attr_score(GHashTable * list, const char *attr, const char *value) { GHashTableIter iter; node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { int weight = node->weight; if (can_run_resources(node) == FALSE) { weight = -INFINITY; } if (weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if (safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } } if (safe_str_neq(attr, "#" XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node ? best_node : "", best_score); } return best_score; } static void node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor, gboolean only_positive) { int score = 0; int new_score = 0; GHashTableIter iter; node_t *node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list1); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { CRM_LOG_ASSERT(node != NULL); if(node == NULL) { continue; }; score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); new_score = merge_weights(factor * score, node->weight); if (factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO - Decide if we want to filter only if weight == -INFINITY * */ crm_trace("%s: Filtering %d + %f*%d (factor * score)", node->details->uname, node->weight, factor, score); } else if (node->weight == INFINITY_HACK) { crm_trace("%s: Filtering %d + %f*%d (node < 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight > 0) { node->weight = INFINITY_HACK; crm_trace("%s: Filtering %d + %f*%d (score > 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight == 0) { crm_trace("%s: Filtering %d + %f*%d (score == 0)", node->details->uname, node->weight, factor, score); } else { crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score); node->weight = new_score; } } } GHashTable * node_hash_dup(GHashTable * hash) { /* Hack! */ GListPtr list = g_hash_table_get_values(hash); GHashTable *result = node_hash_from_list(list); g_list_free(list); return result; } GHashTable * native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } GHashTable * rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { GHashTable *work = NULL; int multiplier = 1; if (factor < 0) { multiplier = -1; } if (is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); if (is_set(flags, pe_weights_init)) { if (rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last); work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags); } else { work = node_hash_dup(rsc->allowed_nodes); } clear_bit(flags, pe_weights_init); } else if (rsc->variant == pe_group && rsc->children) { GListPtr iter = rsc->children; pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id); work = node_hash_dup(nodes); for(iter = rsc->children; iter->next != NULL; iter = iter->next) { work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags); } } else { pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id); work = node_hash_dup(nodes); node_hash_update(work, rsc->allowed_nodes, attr, factor, is_set(flags, pe_weights_positive)); } if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id); g_hash_table_destroy(work); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if (can_run_any(work)) { GListPtr gIter = NULL; if (is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; crm_trace("Checking %d additional colocation constraints", g_list_length(gIter)); } else if(rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } gIter = ((resource_t*)last->data)->rsc_cons_lhs; crm_trace("Checking %d additional optional group colocation constraints from %s", g_list_length(gIter), ((resource_t*)last->data)->id); } else { gIter = rsc->rsc_cons_lhs; crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id); } for (; gIter != NULL; gIter = gIter->next) { resource_t *other = NULL; rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else { other = constraint->rsc_lh; } pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id); work = rsc_merge_weights(other, rhs, work, constraint->node_attribute, multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback); dump_node_scores(LOG_TRACE, NULL, rhs, work); } } if (is_set(flags, pe_weights_positive)) { node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->weight == INFINITY_HACK) { node->weight = 1; } } } if (nodes) { g_hash_table_destroy(nodes); } clear_bit(rsc->flags, pe_rsc_merging); return work; } node_t * native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr gIter = NULL; int alloc_details = scores_log_level + 1; if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; GHashTable *archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if (constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_hash_dup(rsc->allowed_nodes); } rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); if (archive && can_run_any(rsc->allowed_nodes) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive) { g_hash_table_destroy(archive); } } dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_rollback); } print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id); /* make sure it doesn't come up again */ resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } else if(rsc->next_role > rsc->role && is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); rsc->next_role = rsc->role; } dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; rsc->next_role = rsc->role; if (rsc->running_on == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if (is_set(rsc->flags, pe_rsc_failed)) { assign_to = rsc->running_on->data; reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to ? assign_to->details->uname : "'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if (is_set(data_set->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if (is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set)) { pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if (rsc->allocated_to == NULL) { if (is_not_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); if (rsc->is_remote_node) { node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); CRM_ASSERT(remote_node != NULL); if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) { crm_trace("Setting remote node %s to ONLINE", remote_node->details->id); remote_node->details->online = TRUE; /* We shouldn't consider an unseen remote-node unclean if we are going * to try and connect to it. Otherwise we get an unnecessary fence */ if (remote_node->details->unseen == TRUE) { remote_node->details->unclean = FALSE; } } else { crm_trace("Setting remote node %s to SHUTDOWN. next role = %s, allocated=%s", remote_node->details->id, role2text(rsc->next_role), rsc->allocated_to ? "true" : "false"); remote_node->details->shutdown = TRUE; } } return rsc->allocated_to; } static gboolean is_op_dup(resource_t * rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; CRM_ASSERT(rsc); for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { value = crm_element_value(operation, "name"); if (safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (value == NULL) { value = "0"; } if (safe_str_neq(value, interval)) { continue; } if (id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err ("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } } } return dup; } void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; /* Only process for the operations without role="Stopped" */ value = crm_element_value(operation, "role"); if (value && text2role(value) == RSC_ROLE_STOPPED) { return; } CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node ? node->details->uname : "n/a"); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } if (start != NULL) { pe_rsc_trace(rsc, "Marking %s %s due to %s", key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory", start->uuid); is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional); } else { pe_rsc_trace(rsc, "Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches == NULL) { is_optional = FALSE; pe_rsc_trace(rsc, "Marking %s mandatory: not active", key); } else { GListPtr gIter = NULL; for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_reschedule)) { is_optional = FALSE; break; } } g_list_free(possible_matches); } if ((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if (is_optional) { char *local_key = strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* it's running : cancel it */ mon = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(mon->task); free(mon->cancel_task); mon->task = strdup(RSC_CANCEL); mon->cancel_task = strdup(name); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch (rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if (rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if (rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if (local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result, key, value ? value : role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); free(key); return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if (is_optional) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } else if (node == NULL || node->details->online == FALSE || node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } else if (is_set(mon->flags, pe_action_optional) == FALSE) { pe_rsc_info(rsc, " Start recurring %s (%llus) for %s on %s", mon->task, interval_ms / 1000, rsc->id, crm_str(node_uname)); } if (rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); free(running_master); } if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); if (rsc->next_role == RSC_ROLE_MASTER) { custom_action_order(rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } else if (rsc->role == RSC_ROLE_MASTER) { custom_action_order(rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } } } void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp(rsc, start, node, operation, data_set); } } } } void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* TODO: Support of non-unique clone */ if (is_set(rsc->flags, pe_rsc_unique) == FALSE) { return; } /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } pe_rsc_trace(rsc, "Creating recurring actions %s for %s in role %s on nodes where it'll not be running", ID(operation), rsc->id, role2text(rsc->next_role)); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } /* if the monitor exists on the node where the resource will be running, cancel it */ if (node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches) { action_t *cancel_op = NULL; char *local_key = strdup(key); g_list_free(possible_matches); cancel_op = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(cancel_op->task); free(cancel_op->cancel_task); cancel_op->task = strdup(RSC_CANCEL); cancel_op->cancel_task = strdup(name); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), crm_str(node_uname)); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *stop_node = (node_t *) gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; char *stop_op_key = NULL; if (node_uname && safe_str_eq(stop_node_uname, node_uname)) { continue; } pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if (possible_matches == NULL) { pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); free(rc_inactive); if (is_set(rsc->flags, pe_rsc_managed)) { char *probe_key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0); GListPtr probes = find_actions(rsc->actions, probe_key, stop_node); GListPtr pIter = NULL; for (pIter = probes; pIter != NULL; pIter = pIter->next) { action_t *probe = (action_t *) pIter->data; order_actions(probe, stopped_mon, pe_order_runnable_left); crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname); } g_list_free(probes); free(probe_key); } if (probe_complete_ops) { g_list_free(probe_complete_ops); } stop_op_key = stop_key(rsc); stop_ops = find_actions_exact(rsc->actions, stop_op_key, stop_node); for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *stop = (action_t *) local_gIter->data; if (is_set(stop->flags, pe_action_optional) == FALSE) { stop_is_optional = FALSE; } if (is_set(stop->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, strdup(stop_op_key), stop, NULL, strdup(key), stopped_mon, pe_order_implies_then | pe_order_runnable_left, data_set); } } if (stop_ops) { g_list_free(stop_ops); } free(stop_op_key); if (is_optional == FALSE && probe_is_optional && stop_is_optional && is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__); } if (is_set(stopped_mon->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if (stop_node->details->online == FALSE || stop_node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(stopped_mon->flags, pe_action_runnable) && is_set(stopped_mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", stopped_mon->task, interval_ms / 1000, rsc->id, crm_str(stop_node_uname)); } } free(key); } void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } static void handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set) { action_t *migrate_to = NULL; action_t *migrate_from = NULL; action_t *start = NULL; action_t *stop = NULL; gboolean partial = rsc->partial_migration_target ? TRUE : FALSE; pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s", rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE"); start = start_action(rsc, chosen, TRUE); stop = stop_action(rsc, current, TRUE); if (partial == FALSE) { migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set); } migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set); if ((migrate_to && migrate_from) || (migrate_from && partial)) { set_bit(start->flags, pe_action_migrate_runnable); set_bit(stop->flags, pe_action_migrate_runnable); update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */ /* order probes before migrations */ if (partial) { set_bit(migrate_from->flags, pe_action_migrate_runnable); migrate_from->needs = start->needs; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set); } else { set_bit(migrate_from->flags, pe_action_migrate_runnable); set_bit(migrate_to->flags, pe_action_migrate_runnable); migrate_to->needs = start->needs; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set); } custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional | pe_order_implies_first_migratable | pe_order_pseudo_left, data_set); } if (migrate_to) { add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); /* pcmk remote connections don't require pending to be recorded in cib. * We can optimize cib writes by only setting PENDING for non pcmk remote * connection resources */ if (rsc->is_remote_node == FALSE) { /* migrate_to takes place on the source node, but can * have an effect on the target node depending on how * the agent is written. Because of this, we have to maintain * a record that the migrate_to occurred incase the source node * loses membership while the migrate_to action is still in-flight. */ add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true"); } } if (migrate_from) { add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); } } void native_create_actions(resource_t * rsc, pe_working_set_t * data_set) { action_t *start = NULL; node_t *chosen = NULL; node_t *current = NULL; gboolean need_stop = FALSE; gboolean is_moving = FALSE; gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE; GListPtr gIter = NULL; int num_active_nodes = 0; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; CRM_ASSERT(rsc); chosen = rsc->allocated_to; if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } else if (rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc, role2text(rsc->role), role2text(rsc->next_role)); if (rsc->running_on) { current = rsc->running_on->data; } for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *n = (node_t *) gIter->data; if (rsc->partial_migration_source && (n->details == rsc->partial_migration_source->details)) { current = rsc->partial_migration_source; } num_active_nodes++; } for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop = stop_action(rsc, current, FALSE); set_bit(stop->flags, pe_action_dangle); pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s", rsc->id, current->details->uname); if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, FALSE, data_set); } } if (num_active_nodes > 1) { if (num_active_nodes == 2 && chosen && rsc->partial_migration_target && rsc->partial_migration_source && (current->details == rsc->partial_migration_source->details) && (chosen->details == rsc->partial_migration_target->details)) { /* Here the chosen node is still the migration target from a partial * migration. Attempt to continue the migration instead of recovering * by stopping the resource everywhere and starting it on a single node. */ pe_rsc_trace(rsc, "Will attempt to continue with a partial migration to target %s from %s", rsc->partial_migration_target->details->id, rsc->partial_migration_source->details->id); } else { const char *type = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->partial_migration_target && rsc->partial_migration_source) { crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too", rsc->id, rsc->partial_migration_target->details->uname, rsc->partial_migration_source->details->uname); } else { pe_proc_err("Resource %s (%s::%s) is active on %d nodes %s", rsc->id, class, type, num_active_nodes, recovery2text(rsc->recovery_type)); crm_warn("See %s for more information.", "http://clusterlabs.org/wiki/FAQ#Resource_is_Too_Active"); } if (rsc->recovery_type == recovery_stop_start) { need_stop = TRUE; } /* If by chance a partial migration is in process, * but the migration target is not chosen still, clear all * partial migration data. */ rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = FALSE; } } if (is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, chosen, TRUE); set_bit(start->flags, pe_action_print_always); } if (current && chosen && current->details != chosen->details) { pe_rsc_trace(rsc, "Moving %s", rsc->id); is_moving = TRUE; need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Recovering %s", rsc->id); need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Block %s", rsc->id); need_stop = TRUE; } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) { /* Recovery of a promoted resource */ start = start_action(rsc, chosen, TRUE); if (is_set(start->flags, pe_action_optional) == FALSE) { pe_rsc_trace(rsc, "Forced start %s", rsc->id); need_stop = TRUE; } } pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); /* Create any additional actions required when bringing resource down and * back up to same level. */ role = rsc->role; while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop ? " required" : ""); if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) { break; } role = next_role; } while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) { next_role = rsc_state_matrix[role][rsc->role]; pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop ? " required" : ""); if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { break; } role = next_role; } role = rsc->role; /* Required steps from this role to the next */ while (role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA"); if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } if(is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "No monitor additional ops for blocked resource"); } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Monitor ops for active resource"); start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { pe_rsc_trace(rsc, "Monitor ops for in-active resource"); Recurring_Stopped(rsc, NULL, NULL, data_set); } /* if we are stuck in a partial migration, where the target * of the partial migration no longer matches the chosen target. * A full stop/start is required */ if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) { pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id); allow_migrate = FALSE; } else if (is_moving == FALSE || is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || (current->details->unclean == TRUE) || rsc->next_role < RSC_ROLE_STARTED) { allow_migrate = FALSE; } if (allow_migrate) { handle_migration_actions(rsc, current, chosen, data_set); } } static void rsc_avoids_remote_nodes(resource_t *rsc) { GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->details->remote_rsc) { node->weight = -INFINITY; } } } void native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { /* This function is on the critical path and worth optimizing as much as possible */ resource_t *top = uber_parent(rsc); int type = pe_order_optional | pe_order_implies_then | pe_order_restart; gboolean is_stonith = is_set(rsc->flags, pe_rsc_fence_device); custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, type, data_set); if (top->variant == pe_master || rsc->role > RSC_ROLE_SLAVE) { custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_implies_first_master, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } if (is_stonith == FALSE && is_set(data_set->flags, pe_flag_enable_unfencing) && is_set(rsc->flags, pe_rsc_needs_unfencing)) { /* Check if the node needs to be unfenced first */ node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); crm_debug("Ordering any stops of %s before %s, and any starts after", rsc->id, unfence->uuid); /* * It would be more efficient to order clone resources once, * rather than order each instance, but ordering the instance * allows us to avoid unnecessary dependencies that might conflict * with user constraints. * * @TODO: This constraint can still produce a transition loop if the * resource has a stop scheduled on the node being unfenced, and * there is a user ordering constraint to start some other resource * (which will be ordered after the unfence) before stopping this * resource. An example is "start some slow-starting cloned service * before stopping an associated virtual IP that may be moving to * it": * stop this -> unfencing -> start that -> stop this */ custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(unfence->uuid), unfence, pe_order_optional|pe_order_same_node, data_set); custom_action_order(NULL, strdup(unfence->uuid), unfence, rsc, start_key(rsc), NULL, pe_order_implies_then_on_node|pe_order_same_node, data_set); } } if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(all_stopped->task), all_stopped, pe_order_implies_then | pe_order_runnable_left, data_set); } if (g_hash_table_size(rsc->utilization) > 0 && safe_str_neq(data_set->placement_strategy, "default")) { GHashTableIter iter; node_t *next = NULL; GListPtr gIter = NULL; pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(current); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } custom_action_order(rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_load, data_set); } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&next)) { char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(next); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, start_key(rsc), NULL, pe_order_load, data_set); custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_load, data_set); free(load_stopped_task); } } if (rsc->container) { resource_t *remote_rsc = NULL; /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else if (rsc->is_remote_node == FALSE) { remote_rsc = rsc_contains_remote_node(data_set, rsc->container); } if (remote_rsc) { /* The container represents a Pacemaker Remote node, so force the * resource on the Pacemaker Remote node instead of colocating the * resource with the container resource. */ GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); custom_action_order(rsc->container, generate_op_key(rsc->container->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL, pe_order_implies_first, data_set); if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } rsc_colocation_new("resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, data_set); } } if (rsc->is_remote_node || is_stonith) { /* don't allow remote nodes to run stonith devices * or remote connection resources.*/ rsc_avoids_remote_nodes(rsc); } /* If this is a guest node's implicit remote connection, do not allow the * guest resource to live on a Pacemaker Remote node, to avoid nesting * remotes. However, allow bundles to run on remote nodes. */ if (rsc->is_remote_node && rsc->container && is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) { rsc_avoids_remote_nodes(rsc->container); } } void native_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } enum filter_colocation_res filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint, gboolean preview) { if (constraint->score == 0) { return influence_nothing; } /* rh side must be allocated before we can process constraint */ if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) { return influence_nothing; } if ((constraint->role_lh >= RSC_ROLE_SLAVE) && rsc_lh->parent && rsc_lh->parent->variant == pe_master && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* LH and RH resources have already been allocated, place the correct * priority oh LH rsc for the given multistate resource role */ return influence_rsc_priority; } if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if ((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return influence_nothing; } details_rh = rsc_rh->allocated_to ? rsc_rh->allocated_to->details : NULL; details_lh = rsc_lh->allocated_to ? rsc_lh->allocated_to->details : NULL; if (constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh ? details_lh->uname : "n/a", details_rh ? details_rh->uname : "n/a"); } else if (constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh ? details_rh->uname : "n/a"); } return influence_nothing; } if (constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s", role2text(constraint->role_lh), role2text(rsc_lh->next_role)); return influence_nothing; } if (constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if (constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { crm_trace("LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_lh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { crm_trace("RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } return influence_rsc_location; } static void influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *rh_value = NULL; const char *lh_value = NULL; const char *attribute = "#id"; int score_multiplier = 1; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { return; } lh_value = g_hash_table_lookup(rsc_lh->allocated_to->details->attrs, attribute); rh_value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); if (!safe_str_eq(lh_value, rh_value)) { if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) { rsc_lh->priority = -INFINITY; } return; } if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) { return; } if (constraint->role_lh == RSC_ROLE_SLAVE) { score_multiplier = -1; } rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority); } static void colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = "#id"; GHashTable *work = NULL; gboolean do_check = FALSE; GHashTableIter iter; node_t *node = NULL; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (rsc_rh->allocated_to) { value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if (constraint->score < 0) { /* nothing to do: * anti-colocation with something that is not running */ return; } work = node_hash_dup(rsc_lh->allowed_nodes); g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { tmp = g_hash_table_lookup(node->details->attrs, attribute); if (do_check && safe_str_eq(tmp, value)) { if (constraint->score < INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights(constraint->score, node->weight); } } else if (do_check == FALSE || constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check ? "failed" : "unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } } if (can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { static char score[33]; score2char_stack(constraint->score, score, sizeof(score)); pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); } if (work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { enum filter_colocation_res filter_results; CRM_ASSERT(rsc_lh); CRM_ASSERT(rsc_rh); filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE); pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results); switch (filter_results) { case influence_rsc_priority: influence_priority(rsc_lh, rsc_rh, constraint); break; case influence_rsc_location: pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); colocation_match(rsc_lh, rsc_rh, constraint); break; case influence_nothing: default: return; } } static gboolean filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket) { if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) { pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter", role2text(rsc_ticket->role_lh)); return FALSE; } return TRUE; } void rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set) { if (rsc_ticket == NULL) { pe_err("rsc_ticket was NULL"); return; } if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", rsc_ticket->id); return; } if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) { return; } if (rsc_lh->children) { GListPtr gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); } return; } pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)", rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); if (rsc_ticket->ticket->granted == FALSE && g_list_length(rsc_lh->running_on) > 0) { GListPtr gIter = NULL; switch (rsc_ticket->loss_policy) { case loss_ticket_stop: resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); break; case loss_ticket_demote: /*Promotion score will be set to -INFINITY in master_promotion_order() */ if (rsc_ticket->role_lh != RSC_ROLE_MASTER) { resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); } break; case loss_ticket_fence: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pe_fence_node(data_set, node, "deadman ticket was lost"); } break; case loss_ticket_freeze: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } if (g_list_length(rsc_lh->running_on) > 0) { clear_bit(rsc_lh->flags, pe_rsc_managed); set_bit(rsc_lh->flags, pe_rsc_block); } break; } } else if (rsc_ticket->ticket->granted == FALSE) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); } } else if (rsc_ticket->ticket->standby) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set); } } } enum pe_action_flags native_action_flags(action_t * action, node_t * node) { return action->flags; } enum pe_graph_flags native_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { /* flags == get_action_flags(first, then_node) called from update_action() */ enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, then->uuid, then->flags); if (type & pe_order_asymmetrical) { resource_t *then_rsc = then->rsc; enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0; if (!then_rsc) { /* ignore */ } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) { /* ignore... if 'then' is supposed to be stopped after 'first', but * then is already stopped, there is nothing to be done when non-symmetrical. */ } else if ((then_rsc_role >= RSC_ROLE_STARTED) && safe_str_eq(then->task, RSC_START) && then->node && then_rsc->running_on && g_list_length(then_rsc->running_on) == 1 && then->node->details == ((node_t *) then_rsc->running_on->data)->details) { /* ignore... if 'then' is supposed to be started after 'first', but * then is already started, there is nothing to be done when non-symmetrical. */ } else if (!(first->flags & pe_action_runnable)) { /* prevent 'then' action from happening if 'first' is not runnable and * 'then' has not yet occurred. */ pe_action_implies(then, first, pe_action_optional); pe_action_implies(then, first, pe_action_runnable); pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid); } else { /* ignore... then is allowed to start/stop if it wants to. */ } } if (type & pe_order_implies_first) { if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) { // Needs is_set(first_flags, pe_action_optional) too? pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } if (is_set(flags, pe_action_migrate_runnable) && is_set(then->flags, pe_action_migrate_runnable) == FALSE && is_set(then->flags, pe_action_optional) == FALSE) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_migrate_runnable); } } if (type & pe_order_implies_first_master) { if ((filter & pe_action_optional) && ((then->flags & pe_action_optional) == FALSE) && then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) { pe_action_implies(first, then, pe_action_optional); if (is_set(first->flags, pe_action_migrate_runnable) && is_set(then->flags, pe_action_migrate_runnable) == FALSE) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_migrate_runnable); } pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); } } if ((type & pe_order_implies_first_migratable) && is_set(filter, pe_action_optional)) { if (((then->flags & pe_action_migrate_runnable) == FALSE) || ((then->flags & pe_action_runnable) == FALSE)) { pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_runnable); } if ((then->flags & pe_action_optional) == 0) { pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } } if ((type & pe_order_pseudo_left) && is_set(filter, pe_action_optional)) { if ((first->flags & pe_action_runnable) == FALSE) { pe_action_implies(then, first, pe_action_migrate_runnable); pe_clear_action_bit(then, pe_action_pseudo); pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid); } } if (is_set(type, pe_order_runnable_left) && is_set(filter, pe_action_runnable) && is_set(then->flags, pe_action_runnable) && is_set(flags, pe_action_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid); pe_action_implies(then, first, pe_action_runnable); pe_action_implies(then, first, pe_action_migrate_runnable); } if (is_set(type, pe_order_implies_then) && is_set(filter, pe_action_optional) && is_set(then->flags, pe_action_optional) && is_set(flags, pe_action_optional) == FALSE) { /* in this case, treat migrate_runnable as if first is optional */ if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid); pe_action_implies(then, first, pe_action_optional); } } if (is_set(type, pe_order_restart)) { const char *reason = NULL; CRM_ASSERT(first->rsc && first->rsc->variant == pe_native); CRM_ASSERT(then->rsc && then->rsc->variant == pe_native); if ((filter & pe_action_runnable) && (then->flags & pe_action_runnable) == 0 && (then->rsc->flags & pe_rsc_managed)) { reason = "shutdown"; } if ((filter & pe_action_optional) && (then->flags & pe_action_optional) == 0) { reason = "recover"; } if (reason && is_set(first->flags, pe_action_optional)) { if (is_set(first->flags, pe_action_runnable) || is_not_set(then->flags, pe_action_optional)) { pe_rsc_trace(first->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } } if (reason && is_not_set(first->flags, pe_action_optional) && is_not_set(first->flags, pe_action_runnable)) { pe_rsc_trace(then->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); pe_action_implies(then, first, pe_action_runnable); } if (reason && is_not_set(first->flags, pe_action_optional) && is_set(first->flags, pe_action_migrate_runnable) && is_not_set(then->flags, pe_action_migrate_runnable)) { pe_action_implies(first, then, pe_action_migrate_runnable); } } if (then_flags != then->flags) { changed |= pe_graph_updated_then; pe_rsc_trace(then->rsc, "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", then->uuid, then->node ? then->node->details->uname : "[none]", then->flags, then_flags, first->uuid, first->flags); if(then->rsc && then->rsc->parent) { /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */ update_action(then); } } if (first_flags != first->flags) { changed |= pe_graph_updated_first; pe_rsc_trace(first->rsc, "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, first_flags, then->uuid, then->flags); } return changed; } void native_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { GListPtr gIter = NULL; GHashTableIter iter; node_t *node = NULL; if (constraint == NULL) { pe_err("Constraint is NULL"); return; } else if (rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) { pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)", constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role)); return; } else if (is_active(constraint) == FALSE) { pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id); return; } if (constraint->node_list_rh == NULL) { pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id); return; } for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *other_node = NULL; other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (other_node != NULL) { pe_rsc_trace(rsc, "%s + %s: %d + %d", node->details->uname, other_node->details->uname, node->weight, other_node->weight); other_node->weight = merge_weights(other_node->weight, node->weight); } else { other_node = node_copy(node); pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode); g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node); } if (other_node->rsc_discover_mode < constraint->discover_mode) { if (constraint->discover_mode == discover_exclusive) { rsc->exclusive_discover = TRUE; } /* exclusive > never > always... always is default */ other_node->rsc_discover_mode = constraint->discover_mode; } } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight); } } void native_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } #define log_change(a, fmt, args...) do { \ if(a && a->reason && terminal) { \ printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \ } else if(a && a->reason) { \ crm_notice(fmt" \tdue to %s", ##args, a->reason); \ } else if(terminal) { \ printf(" * "fmt"\n", ##args); \ } else { \ crm_notice(fmt, ##args); \ } \ } while(0) #define STOP_SANITY_ASSERT(lineno) do { \ if(current && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if(stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \ CRM_ASSERT(stop != NULL); \ } else if(is_set(stop->flags, pe_action_optional)) { \ crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \ CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \ } \ } while(0) +static int rsc_width = 5; +static int detail_width = 5; +static void +LogAction(const char *change, resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal) +{ + int len = 0; + char *reason = NULL; + char *details = NULL; + bool same_host = FALSE; + bool same_role = FALSE; + bool need_role = FALSE; + + CRM_ASSERT(action); + CRM_ASSERT(destination != NULL || origin != NULL); + + if(source == NULL) { + source = action; + } + + len = strlen(rsc->id); + if(len > rsc_width) { + rsc_width = len + 2; + } + + if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) { + need_role = TRUE; + } + + if(origin != NULL && destination != NULL && origin->details == destination->details) { + same_host = TRUE; + } + + if(rsc->role == rsc->next_role) { + same_role = TRUE; + } + + if(need_role && origin == NULL) { + /* Promoting from Stopped */ + details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname); + + } else if(need_role && destination == NULL) { + /* Demoting a Master or Stopping a Slave */ + details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); + + } else if(origin == NULL || destination == NULL) { + /* Starting or stopping a resource */ + details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname); + + } else if(need_role && same_role && same_host) { + /* Recovering or Restarting a Master/Slave resource */ + details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); + + } else if(same_role && same_host) { + /* Recovering or Restarting a normal resource */ + details = crm_strdup_printf("%s", origin->details->uname); + + } else if(same_role && need_role) { + /* Moving a Master/Slave resource */ + details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role)); + + } else if(same_role) { + /* Moving a normal resource */ + details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname); + + } else if(same_host) { + /* Promoting or Demoting a Master/Slave resource */ + details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname); + + } else { + /* Moving and promoting/demoting */ + details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname); + } + + len = strlen(details); + if(len > detail_width) { + detail_width = len; + } + + if(source->reason && is_not_set(action->flags, pe_action_runnable)) { + reason = crm_strdup_printf(" due to %s (blocked)", source->reason); + + } else if(source->reason) { + reason = crm_strdup_printf(" due to %s", source->reason); + + } else if(is_not_set(action->flags, pe_action_runnable)) { + reason = strdup(" blocked"); + + } else { + reason = strdup(""); + } + + if(terminal) { + printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason); + } else { + crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason); + } + + free(details); + free(reason); +} + + void LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { node_t *next = NULL; node_t *current = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *demote = NULL; action_t *promote = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if(rsc->variant == pe_container) { container_LogActions(rsc, data_set, terminal); return; } if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; LogActions(child_rsc, data_set, terminal); } return; } next = rsc->allocated_to; if (rsc->running_on) { if (g_list_length(rsc->running_on) > 1 && rsc->partial_migration_source) { current = rsc->partial_migration_source; } else { current = rsc->running_on->data; } if (rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if (is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed) ? " unmanaged" : ""); return; } if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } key = start_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = stop_key(rsc); if(start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { possible_matches = find_actions(rsc->actions, key, NULL); } else { possible_matches = find_actions(rsc->actions, key, current); } - free(key); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } + free(key); key = promote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } key = demote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { action_t *migrate_to = NULL; key = generate_op_key(rsc->id, RSC_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { migrate_to = possible_matches->data; } CRM_CHECK(next != NULL,); if (next == NULL) { } else if (migrate_to && is_set(migrate_to->flags, pe_action_runnable) && current) { - log_change(start, "Migrate %s\t(%s %s -> %s)", - rsc->id, role2text(rsc->role), current->details->uname, - next->details->uname); + LogAction("Migrate", rsc, current, next, start, NULL, terminal); } else if (is_set(rsc->flags, pe_rsc_reload)) { - log_change(start, "Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); + LogAction("Reload", rsc, current, next, start, NULL, terminal); } else if (start == NULL || is_set(start->flags, pe_action_optional)) { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { - log_change(start, "Stop %s\t(%s %s%s)", rsc->id, role2text(rsc->role), current?current->details->uname:"N/A", - stop && is_not_set(stop->flags, pe_action_runnable) ? " - blocked" : ""); + LogAction("Stop", rsc, current, NULL, stop, start, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { - log_change(stop, "%s %s\t(%s %s -> %s)", - is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move ", - rsc->id, role2text(rsc->role), - current->details->uname, next->details->uname); + LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move", + rsc, current, next, stop, NULL, terminal); } else if (is_set(rsc->flags, pe_rsc_failed)) { - log_change(stop, "Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); + LogAction("Recover", rsc, current, NULL, stop, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else { - log_change(start, "Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); + LogAction("Restart", rsc, current, next, start, NULL, terminal); /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */ } g_list_free(possible_matches); return; } - if (rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { - CRM_CHECK(current != NULL,); - if (current != NULL) { - gboolean allowed = FALSE; - - if (demote != NULL && (demote->flags & pe_action_runnable)) { - allowed = TRUE; - } - - log_change(demote, "Demote %s\t(%s -> %s %s%s)", - rsc->id, - role2text(rsc->role), - role2text(rsc->next_role), - current->details->uname, allowed ? "" : " - blocked"); - - if (stop != NULL && is_not_set(stop->flags, pe_action_optional) - && rsc->next_role > RSC_ROLE_STOPPED && moving == FALSE) { - if (is_set(rsc->flags, pe_rsc_failed)) { - log_change(stop, "Recover %s\t(%s %s)", - rsc->id, role2text(rsc->role), next->details->uname); - STOP_SANITY_ASSERT(__LINE__); - - } else if (is_set(rsc->flags, pe_rsc_reload)) { - log_change(start, "Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), - next->details->uname); - - } else { - log_change(start, "Restart %s\t(%s %s)", - rsc->id, role2text(rsc->next_role), next->details->uname); - STOP_SANITY_ASSERT(__LINE__); - } - } - } + if(stop + && (rsc->next_role == RSC_ROLE_STOPPED + || (start && is_not_set(start->flags, pe_action_runnable)))) { - } else if (rsc->next_role == RSC_ROLE_STOPPED) { GListPtr gIter = NULL; - CRM_CHECK(current != NULL,); - key = stop_key(rsc); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; action_t *stop_op = NULL; - gboolean allowed = FALSE; possible_matches = find_actions(rsc->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op && (stop_op->flags & pe_action_runnable)) { STOP_SANITY_ASSERT(__LINE__); - allowed = TRUE; } - log_change(start, "Stop %s\t(%s%s) %s", rsc->id, node->details->uname, - allowed ? "" : " - blocked", stop->reason?stop->reason:""); + LogAction("Stop", rsc, node, NULL, stop_op, start, terminal); } free(key); - } - if (moving) { - log_change(stop, "Move %s\t(%s %s -> %s)", - rsc->id, role2text(rsc->next_role), current->details->uname, - next->details->uname); + } else if (stop && is_set(rsc->flags, pe_rsc_failed)) { + /* 'stop' may be NULL if the failure was ignored */ + LogAction("Recover", rsc, current, next, stop, start, terminal); STOP_SANITY_ASSERT(__LINE__); - } - if (rsc->role == RSC_ROLE_STOPPED) { - gboolean allowed = FALSE; + } else if (moving) { + LogAction("Move", rsc, current, next, stop, NULL, terminal); + STOP_SANITY_ASSERT(__LINE__); - if (start && (start->flags & pe_action_runnable)) { - allowed = TRUE; - } + } else if (is_set(rsc->flags, pe_rsc_reload)) { + LogAction("Reload", rsc, current, next, start, NULL, terminal); - CRM_CHECK(next != NULL,); - if (next != NULL) { - log_change(start, "Start %s\t(%s%s)", rsc->id, next->details->uname, - allowed ? "" : " - blocked"); - } - if (allowed == FALSE) { - return; - } - } + } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) { + LogAction("Restart", rsc, current, next, start, NULL, terminal); + STOP_SANITY_ASSERT(__LINE__); - if (rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { - gboolean allowed = FALSE; + } else if (rsc->role == RSC_ROLE_MASTER) { + CRM_LOG_ASSERT(current != NULL); + LogAction("Demote", rsc, current, next, demote, NULL, terminal); + } else if(rsc->next_role == RSC_ROLE_MASTER) { CRM_LOG_ASSERT(next); - if (stop != NULL && is_not_set(stop->flags, pe_action_optional) - && rsc->role > RSC_ROLE_STOPPED) { - if (is_set(rsc->flags, pe_rsc_failed)) { - log_change(stop, "Recover %s\t(%s %s)", - rsc->id, role2text(rsc->role), next?next->details->uname:NULL); - STOP_SANITY_ASSERT(__LINE__); - - } else if (is_set(rsc->flags, pe_rsc_reload)) { - log_change(start, "Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), - next?next->details->uname:NULL); - STOP_SANITY_ASSERT(__LINE__); - - } else { - log_change(start, "Restart %s\t(%s %s)", - rsc->id, role2text(rsc->role), next?next->details->uname:NULL); - STOP_SANITY_ASSERT(__LINE__); - } - } - - if (promote && (promote->flags & pe_action_runnable)) { - allowed = TRUE; - } + LogAction("Promote", rsc, current, next, promote, NULL, terminal); - log_change(promote, "Promote %s\t(%s -> %s %s%s)", - rsc->id, - role2text(rsc->role), - role2text(rsc->next_role), - next?next->details->uname:NULL, - allowed ? "" : " - blocked"); + } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) { + LogAction("Start", rsc, current, next, start, NULL, terminal); } } gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop; if (rsc->partial_migration_target) { if (rsc->partial_migration_target->details == current->details) { pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname, next->details->uname, rsc->id); continue; } else { pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id); optional = FALSE; } } pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname); stop = stop_action(rsc, current, optional); if (is_not_set(rsc->flags, pe_rsc_managed)) { update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set); const char *unfenced = g_hash_table_lookup(current->details->attrs, XML_NODE_IS_UNFENCED); order_actions(stop, unfence, pe_order_implies_first); if (unfenced == NULL || safe_str_eq("0", unfenced)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname); } } } return TRUE; } gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { action_t *start = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s on %s %d", rsc->id, next ? next->details->uname : "N/A", optional); start = start_action(rsc, next, TRUE); if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(next, "on", TRUE, NULL, data_set); const char *unfenced = g_hash_table_lookup(next->details->attrs, XML_NODE_IS_UNFENCED); order_actions(unfence, start, pe_order_implies_then); if (unfenced == NULL || safe_str_eq("0", unfenced)) { char *reason = crm_strdup_printf("Required by %s", rsc->id); trigger_unfencing(NULL, next, reason, NULL, data_set); free(reason); } } if (is_set(start->flags, pe_action_runnable) && optional == FALSE) { update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } return TRUE; } gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; CRM_ASSERT(rsc); CRM_CHECK(next != NULL, return FALSE); pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *start = (action_t *) gIter->data; if (is_set(start->flags, pe_action_runnable) == FALSE) { runnable = FALSE; } } g_list_free(action_list); if (runnable) { promote_action(rsc, next, optional); return TRUE; } pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *promote = (action_t *) gIter->data; update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A"); demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A"); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if (node == NULL) { pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if (node->details->unclean || node->details->online == FALSE) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional ? pe_order_implies_then : pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional ? pe_order_implies_then : pe_order_optional, data_set); return TRUE; } #include <../lib/pengine/unpack.h> #define set_char(x) last_rsc_id[lpc] = x; complete = TRUE; static char * increment_clone(char *last_rsc_id) { int lpc = 0; int len = 0; char *tmp = NULL; gboolean complete = FALSE; CRM_CHECK(last_rsc_id != NULL, return NULL); if (last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len - 1; while (complete == FALSE && lpc > 0) { switch (last_rsc_id[lpc]) { case 0: lpc--; break; case '0': set_char('1'); break; case '1': set_char('2'); break; case '2': set_char('3'); break; case '3': set_char('4'); break; case '4': set_char('5'); break; case '5': set_char('6'); break; case '6': set_char('7'); break; case '7': set_char('8'); break; case '8': set_char('9'); break; case '9': last_rsc_id[lpc] = '0'; lpc--; break; case ':': tmp = last_rsc_id; last_rsc_id = calloc(1, len + 2); memcpy(last_rsc_id, tmp, len); last_rsc_id[++lpc] = '1'; last_rsc_id[len] = '0'; last_rsc_id[len + 1] = 0; complete = TRUE; free(tmp); break; default: crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc); return NULL; break; } } return last_rsc_id; } static node_t * probe_grouped_clone(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { node_t *running = NULL; resource_t *top = uber_parent(rsc); if (running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() * * This code desperately needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=10: * No probes O(25s) * Detection without clone loop O(3m) * Detection with clone loop O(8m) ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while (peer && running == NULL) { running = pe_hash_table_lookup(peer->known_on, node->details->id); if (running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping active clone: %s", rsc->id); free(clone_id); return running; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } free(clone_id); } return running; } gboolean native_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { enum pe_ordering flags = pe_order_optional; char *key = NULL; action_t *probe = NULL; node_t *running = NULL; node_t *allowed = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if (rc_inactive == NULL) { rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id); return FALSE; } else if (force == FALSE && is_container_remote_node(node)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s on container %s", rsc->id, node->details->id); return FALSE; } if (is_remote_node(node)) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) { pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes do not run stonith agents.", rsc->id, node->details->id); return FALSE; } else if (rsc_contains_remote_node(data_set, rsc)) { pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes can not run resources that contain connection resources.", rsc->id, node->details->id); return FALSE; } else if (rsc->is_remote_node) { pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes can not run connection resources", rsc->id, node->details->id); return FALSE; } } if (rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set) || any_created; } return any_created; } else if ((rsc->container) && (!rsc->is_remote_node)) { pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id); return FALSE; } if (is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id); return FALSE; } running = g_hash_table_lookup(rsc->known_on, node->details->id); if (running == NULL && is_set(rsc->flags, pe_rsc_unique) == FALSE) { /* Anonymous clones */ if (rsc->parent == top) { running = g_hash_table_lookup(rsc->parent->known_on, node->details->id); } else { /* Grouped anonymous clones need extra special handling */ running = probe_grouped_clone(rsc, node, data_set); } } if (force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname); return FALSE; } allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (rsc->exclusive_discover || top->exclusive_discover) { if (allowed == NULL) { /* exclusive discover is enabled and this node is not in the allowed list. */ return FALSE; } else if (allowed->rsc_discover_mode != discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on */ return FALSE; } } if (allowed && allowed->rsc_discover_mode == discover_never) { /* this resource is marked as not needing to be discovered on this node */ return FALSE; } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); /* If enabled, require unfencing before probing any fence devices * but ensure it happens after any resources that require * unfencing have been probed. * * Doing it the other way (requiring unfencing after probing * resources that need it) would result in the node being * unfenced, and all its resources being stopped, whenever a new * resource is added. Which would be highly suboptimal. * * So essentially, at the point the fencing device(s) have been * probed, we know the state of all resources that require * unfencing and that unfencing occurred. */ if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); order_actions(unfence, probe, pe_order_optional); } /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if (running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if (rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role), is_set(probe->flags, pe_action_runnable), rsc->running_on); if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) { top = rsc; } else if (pe_rsc_is_clone(top) == FALSE) { top = rsc; } else { crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id); } if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) { /* Prevent the start from occuring if rsc isn't active, but * don't cause it to stop if it was active already */ flags |= pe_order_runnable_left; } custom_action_order(rsc, NULL, probe, top, generate_op_key(top->id, RSC_START, 0), NULL, flags, data_set); /* Before any reloads, if they exist */ custom_action_order(rsc, NULL, probe, top, reload_key(rsc), NULL, pe_order_optional, data_set); if (node->details->shutdown == FALSE) { custom_action_order(rsc, NULL, probe, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional, data_set); } if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Normally rsc.start depends on probe complete which depends * on rsc.probe. But this can't be the case in this scenario as * it would create graph loops. * * So instead we explicitly order 'rsc.probe then rsc.start' */ } else { order_actions(probe, complete, pe_order_implies_then); } return TRUE; } static void native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { node_t *target; GListPtr gIter = NULL; action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if(action->needs == rsc_req_nothing) { /* Anything other than start or promote requires nothing */ } else if (action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_optional); } else if (safe_str_eq(action->task, RSC_START) && NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) { /* if known == NULL, then we don't know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * it's analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explanation is that the * DC died and took its status with it */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_optional | pe_order_runnable_left); } } } static void native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; GListPtr action_list = NULL; action_t *start = NULL; resource_t *top = uber_parent(rsc); node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Check whether the resource has a pending start action */ start = find_first_action(rsc->actions, NULL, CRMD_ACTION_START, NULL); /* Get a list of stop actions potentially implied by the fencing */ key = stop_key(rsc); action_list = find_actions(rsc->actions, key, target); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The stop would never complete and is now implied by the fencing, * so convert it into a pseudo-action. */ update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_implied_by_stonith, __FUNCTION__, __LINE__); if(start == NULL || start->needs > rsc_req_quorum) { enum pe_ordering flags = pe_order_optional; action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); if (target->details->remote_rsc) { /* User constraints must not order a resource in a guest node * relative to the guest node container resource. This flag * marks constraints as generated by the cluster and thus * immune to that check. */ flags |= pe_order_preserve; } order_actions(stonith_op, action, flags); order_actions(stonith_op, parent_stop, flags); } if (is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ create_secondary_notification(action, rsc, stonith_op, data_set); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ key = demote_key(rsc); action_list = find_actions(rsc->actions, key, target); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online == FALSE || action->node->details->unclean == TRUE || is_set(rsc->flags, pe_rsc_failed)) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); if (start == NULL || start->needs > rsc_req_quorum) { order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } } else if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); } else { native_start_constraints(rsc, stonith_op, data_set); native_stop_constraints(rsc, stonith_op, data_set); } } enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static action_t * get_first_named_action(resource_t * rsc, const char *action, gboolean only_valid, node_t * current) { action_t *a = NULL; GListPtr action_list = NULL; char *key = generate_op_key(rsc->id, action, 0); action_list = find_actions(rsc->actions, key, current); if (action_list == NULL || action_list->data == NULL) { crm_trace("%s: no %s action", rsc->id, action); free(key); return NULL; } a = action_list->data; g_list_free(action_list); if (only_valid && is_set(a->flags, pe_action_pseudo)) { crm_trace("%s: pseudo", key); a = NULL; } else if (only_valid && is_not_set(a->flags, pe_action_runnable)) { crm_trace("%s: runnable", key); a = NULL; } free(key); return a; } void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set) { GListPtr gIter = NULL; action_t *other = NULL; action_t *reload = NULL; if (rsc->children) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; ReloadRsc(child_rsc, node, data_set); } return; } else if (rsc->variant > pe_native) { /* Complex resource with no children */ return; } else if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); return; } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) { pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */ return; } else if (node == NULL) { pe_rsc_trace(rsc, "%s: not active", rsc->id); return; } pe_rsc_trace(rsc, "Processing %s", rsc->id); set_bit(rsc->flags, pe_rsc_reload); reload = custom_action( rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set); /* stop = stop_action(rsc, node, optional); */ other = get_first_named_action(rsc, RSC_STOP, TRUE, node); if (other != NULL) { order_actions(reload, other, pe_order_optional); } other = get_first_named_action(rsc, RSC_DEMOTE, TRUE, node); if (other != NULL) { order_actions(reload, other, pe_order_optional); } } void native_append_meta(resource_t * rsc, xmlNode * xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); resource_t *iso_parent, *last_parent, *parent; if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container) { crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id); } } last_parent = iso_parent = rsc; while (iso_parent != NULL) { char *name = NULL; char *iso = NULL; if (iso_parent->isolation_wrapper == NULL) { last_parent = iso_parent; iso_parent = iso_parent->parent; continue; } /* name of wrapper script this resource is routed through. */ name = crm_meta_name(XML_RSC_ATTR_ISOLATION_WRAPPER); crm_xml_add(xml, name, iso_parent->isolation_wrapper); free(name); /* instance name for isolated environment */ name = crm_meta_name(XML_RSC_ATTR_ISOLATION_INSTANCE); if (pe_rsc_is_clone(iso_parent)) { /* if isolation is set at the clone/master level, we have to * give this resource the unique isolation instance associated * with the clone child (last_parent)*/ /* Example: cloned group. group is container * clone myclone - iso_parent * group mygroup - last_parent (this is the iso environment) * rsc myrsc1 - rsc * rsc myrsc2 * The group is what is isolated in example1. We have to make * sure myrsc1 and myrsc2 launch in the same isolated environment. * * Example: cloned primitives. rsc primitive is container * clone myclone iso_parent * rsc myrsc1 - last_parent == rsc (this is the iso environment) * The individual cloned primitive instances are isolated */ value = g_hash_table_lookup(last_parent->meta, XML_RSC_ATTR_INCARNATION); CRM_ASSERT(value != NULL); iso = crm_concat(crm_element_value(last_parent->xml, XML_ATTR_ID), value, '_'); crm_xml_add(xml, name, iso); free(iso); } else { /* * Example: cloned group of containers * clone myclone * group mygroup * rsc myrsc1 - iso_parent (this is the iso environment) * rsc myrsc2 * * Example: group of containers * group mygroup * rsc myrsc1 - iso_parent (this is the iso environment) * rsc myrsc2 * * Example: group is container * group mygroup - iso_parent ( this is iso environment) * rsc myrsc1 * rsc myrsc2 * * Example: single primitive * rsc myrsc1 - iso_parent (this is the iso environment) */ value = g_hash_table_lookup(iso_parent->meta, XML_RSC_ATTR_INCARNATION); if (value) { crm_xml_add(xml, name, iso_parent->id); iso = crm_concat(crm_element_value(iso_parent->xml, XML_ATTR_ID), value, '_'); crm_xml_add(xml, name, iso); free(iso); } else { crm_xml_add(xml, name, iso_parent->id); } } free(name); break; } } diff --git a/pengine/test10/1-a-then-bm-move-b.summary b/pengine/test10/1-a-then-bm-move-b.summary index 571feda600..f4d6207afc 100644 --- a/pengine/test10/1-a-then-bm-move-b.summary +++ b/pengine/test10/1-a-then-bm-move-b.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: - * Migrate B (Started 18node2 -> 18node1) + * Migrate B ( 18node2 -> 18node1 ) Executing cluster transition: * Resource action: B migrate_to on 18node2 * Resource action: B migrate_from on 18node1 * Resource action: B stop on 18node2 * Pseudo action: all_stopped * Pseudo action: B_start_0 * Resource action: B monitor=60000 on 18node1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/10-a-then-bm-b-move-a-clone.summary b/pengine/test10/10-a-then-bm-b-move-a-clone.summary index b39963f5da..ee7697f89f 100644 --- a/pengine/test10/10-a-then-bm-b-move-a-clone.summary +++ b/pengine/test10/10-a-then-bm-b-move-a-clone.summary @@ -1,32 +1,32 @@ Current cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node1 f20node2 ] vm (ocf::heartbeat:Dummy): Started f20node1 Transition Summary: * Stop myclone:1 (f20node1) due to node availability - * Migrate vm (Started f20node1 -> f20node2) + * Migrate vm ( f20node1 -> f20node2 ) Executing cluster transition: * Resource action: vm migrate_to on f20node1 * Resource action: vm migrate_from on f20node2 * Resource action: vm stop on f20node1 * Pseudo action: myclone-clone_stop_0 * Pseudo action: vm_start_0 * Resource action: myclone stop on f20node1 * Pseudo action: myclone-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node2 ] Stopped: [ f20node1 ] vm (ocf::heartbeat:Dummy): Started f20node2 diff --git a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary index 498cd2b055..9138c81e54 100644 --- a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary +++ b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary @@ -1,35 +1,35 @@ Current cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node1 ] Stopped: [ f20node2 ] vm (ocf::heartbeat:Dummy): Started f20node1 Transition Summary: - * Move myclone:0 (Started f20node1 -> f20node2) - * Move vm (Started f20node1 -> f20node2) due to unrunnable myclone-clone stop + * Move myclone:0 ( f20node1 -> f20node2 ) + * Move vm ( f20node1 -> f20node2 ) due to unrunnable myclone-clone stop Executing cluster transition: * Resource action: myclone monitor on f20node2 * Resource action: vm stop on f20node1 * Pseudo action: myclone-clone_stop_0 * Resource action: myclone stop on f20node1 * Pseudo action: myclone-clone_stopped_0 * Pseudo action: myclone-clone_start_0 * Pseudo action: all_stopped * Resource action: myclone start on f20node2 * Pseudo action: myclone-clone_running_0 * Resource action: vm start on f20node2 Revised cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node2 ] Stopped: [ f20node1 ] vm (ocf::heartbeat:Dummy): Started f20node2 diff --git a/pengine/test10/1360.summary b/pengine/test10/1360.summary index 04ec94194a..acff1b8f76 100644 --- a/pengine/test10/1360.summary +++ b/pengine/test10/1360.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ ssgtest1a ssgtest1b ] Resource Group: ClusterAlias VIP (ocf::testing:VIP-RIP.sh): Started ssgtest1a Clone Set: dolly [dollies] Started: [ ssgtest1a ] Transition Summary: - * Move dollies:0 (Started ssgtest1a -> ssgtest1b) + * Move dollies:0 ( ssgtest1a -> ssgtest1b ) Executing cluster transition: * Pseudo action: dolly_stop_0 * Resource action: dollies:0 stop on ssgtest1a * Pseudo action: dolly_stopped_0 * Pseudo action: dolly_start_0 * Pseudo action: all_stopped * Resource action: dollies:0 start on ssgtest1b * Pseudo action: dolly_running_0 Revised cluster status: Online: [ ssgtest1a ssgtest1b ] Resource Group: ClusterAlias VIP (ocf::testing:VIP-RIP.sh): Started ssgtest1a Clone Set: dolly [dollies] Started: [ ssgtest1b ] diff --git a/pengine/test10/2-am-then-b-move-a.summary b/pengine/test10/2-am-then-b-move-a.summary index 1d74081921..a238cf67b0 100644 --- a/pengine/test10/2-am-then-b-move-a.summary +++ b/pengine/test10/2-am-then-b-move-a.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: - * Migrate A (Started 18node1 -> 18node2) + * Migrate A ( 18node1 -> 18node2 ) Executing cluster transition: * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: A_start_0 * Resource action: A monitor=60000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node2 diff --git a/pengine/test10/3-am-then-bm-both-migrate.summary b/pengine/test10/3-am-then-bm-both-migrate.summary index 6e1e33ae90..d938449a10 100644 --- a/pengine/test10/3-am-then-bm-both-migrate.summary +++ b/pengine/test10/3-am-then-bm-both-migrate.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: - * Migrate A (Started 18node1 -> 18node2) - * Migrate B (Started 18node2 -> 18node1) + * Migrate A ( 18node1 -> 18node2 ) + * Migrate B ( 18node2 -> 18node1 ) Executing cluster transition: * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: B migrate_to on 18node2 * Resource action: B migrate_from on 18node1 * Resource action: B stop on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: A_start_0 * Pseudo action: B_start_0 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/4-am-then-bm-b-not-migratable.summary b/pengine/test10/4-am-then-bm-b-not-migratable.summary index 2283c8b0f7..16ce1498c9 100644 --- a/pengine/test10/4-am-then-bm-b-not-migratable.summary +++ b/pengine/test10/4-am-then-bm-b-not-migratable.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: - * Migrate A (Started 18node1 -> 18node2) - * Move B (Started 18node2 -> 18node1) + * Migrate A ( 18node1 -> 18node2 ) + * Move B ( 18node2 -> 18node1 ) Executing cluster transition: * Resource action: B stop on 18node2 * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: A_start_0 * Resource action: B start on 18node1 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/5-am-then-bm-a-not-migratable.summary b/pengine/test10/5-am-then-bm-a-not-migratable.summary index 7e95dbb124..fa1dc33a70 100644 --- a/pengine/test10/5-am-then-bm-a-not-migratable.summary +++ b/pengine/test10/5-am-then-bm-a-not-migratable.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: - * Move A (Started 18node1 -> 18node2) - * Move B (Started 18node2 -> 18node1) due to unrunnable A stop + * Move A ( 18node1 -> 18node2 ) + * Move B ( 18node2 -> 18node1 ) due to unrunnable A stop Executing cluster transition: * Resource action: B stop on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Resource action: A start on 18node2 * Resource action: B start on 18node1 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/594.summary b/pengine/test10/594.summary index de1d179137..4a36789222 100644 --- a/pengine/test10/594.summary +++ b/pengine/test10/594.summary @@ -1,56 +1,56 @@ Current cluster status: Node hadev3 (879e65f8-4b38-4c56-9552-4752ad436669): UNCLEAN (offline) Online: [ hadev1 hadev2 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 child_DoFencing:1 (stonith:ssh): Started hadev1 child_DoFencing:2 (stonith:ssh): Started hadev1 Transition Summary: * Shutdown hadev2 * Fence (reboot) hadev3 'peer is no longer part of the cluster' - * Move DcIPaddr (Started hadev2 -> hadev1) - * Move rsc_hadev2 (Started hadev2 -> hadev1) + * Move DcIPaddr ( hadev2 -> hadev1 ) + * Move rsc_hadev2 ( hadev2 -> hadev1 ) * Stop child_DoFencing:0 (hadev2) due to node availability * Stop child_DoFencing:2 (hadev1) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on hadev1 * Resource action: rsc_hadev3 monitor on hadev2 * Resource action: rsc_hadev2 monitor on hadev1 * Resource action: child_DoFencing:0 monitor on hadev1 * Resource action: child_DoFencing:2 monitor on hadev2 * Pseudo action: DoFencing_stop_0 * Fencing hadev3 (reboot) * Resource action: DcIPaddr stop on hadev2 * Resource action: rsc_hadev2 stop on hadev2 * Resource action: child_DoFencing:0 stop on hadev2 * Resource action: child_DoFencing:2 stop on hadev1 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on hadev2 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: DcIPaddr start on hadev1 * Resource action: rsc_hadev2 start on hadev1 * Resource action: DcIPaddr monitor=5000 on hadev1 * Resource action: rsc_hadev2 monitor=5000 on hadev1 Revised cluster status: Online: [ hadev1 hadev2 ] OFFLINE: [ hadev3 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started hadev1 child_DoFencing:2 (stonith:ssh): Stopped diff --git a/pengine/test10/6-migrate-group.summary b/pengine/test10/6-migrate-group.summary index 3c4e7c6791..c7c9f6d19d 100644 --- a/pengine/test10/6-migrate-group.summary +++ b/pengine/test10/6-migrate-group.summary @@ -1,44 +1,44 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Resource Group: thegroup A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node1 C (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: - * Migrate A (Started 18node1 -> 18node2) - * Migrate B (Started 18node1 -> 18node2) - * Migrate C (Started 18node1 -> 18node2) + * Migrate A ( 18node1 -> 18node2 ) + * Migrate B ( 18node1 -> 18node2 ) + * Migrate C ( 18node1 -> 18node2 ) Executing cluster transition: * Pseudo action: thegroup_stop_0 * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: B migrate_to on 18node1 * Resource action: B migrate_from on 18node2 * Resource action: C migrate_to on 18node1 * Resource action: C migrate_from on 18node2 * Resource action: C stop on 18node1 * Resource action: B stop on 18node1 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: thegroup_stopped_0 * Pseudo action: thegroup_start_0 * Pseudo action: A_start_0 * Pseudo action: B_start_0 * Pseudo action: C_start_0 * Pseudo action: thegroup_running_0 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node2 * Resource action: C monitor=60000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Resource Group: thegroup A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node2 C (ocf::heartbeat:Dummy): Started 18node2 diff --git a/pengine/test10/662.summary b/pengine/test10/662.summary index 4a9d911bc5..a19f71ea35 100644 --- a/pengine/test10/662.summary +++ b/pengine/test10/662.summary @@ -1,67 +1,67 @@ Current cluster status: Online: [ c001n02 c001n03 c001n04 c001n09 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n09 (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n02 child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n04 child_DoFencing:3 (stonith:ssh): Started c001n09 Transition Summary: * Shutdown c001n02 - * Move rsc_c001n02 (Started c001n02 -> c001n03) + * Move rsc_c001n02 ( c001n02 -> c001n03 ) * Stop child_DoFencing:0 (c001n02) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n04 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n02 * Resource action: rsc_c001n09 monitor on c001n04 * Resource action: rsc_c001n09 monitor on c001n03 * Resource action: rsc_c001n09 monitor on c001n02 * Resource action: rsc_c001n02 monitor on c001n09 * Resource action: rsc_c001n02 monitor on c001n04 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n03 monitor on c001n09 * Resource action: rsc_c001n03 monitor on c001n04 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n04 monitor on c001n09 * Resource action: rsc_c001n04 monitor on c001n03 * Resource action: child_DoFencing:0 monitor on c001n09 * Resource action: child_DoFencing:0 monitor on c001n04 * Resource action: child_DoFencing:1 monitor on c001n04 * Resource action: child_DoFencing:1 monitor on c001n02 * Resource action: child_DoFencing:2 monitor on c001n09 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n04 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Pseudo action: DoFencing_stop_0 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: child_DoFencing:0 stop on c001n02 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Resource action: rsc_c001n02 start on c001n03 * Resource action: rsc_c001n02 monitor=5000 on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n04 c001n09 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n09 (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n04 child_DoFencing:3 (stonith:ssh): Started c001n09 diff --git a/pengine/test10/696.summary b/pengine/test10/696.summary index af16c933f5..78e40d1729 100644 --- a/pengine/test10/696.summary +++ b/pengine/test10/696.summary @@ -1,61 +1,61 @@ Current cluster status: Online: [ hadev1 hadev2 hadev3 ] DcIPaddr (ocf::heartbeat:IPaddr): Starting hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev3 (Monitoring) rsc_hadev2 (ocf::heartbeat:IPaddr): Starting hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3 (Monitoring) Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 (Monitoring) child_DoFencing:1 (stonith:ssh): Started hadev3 (Monitoring) child_DoFencing:2 (stonith:ssh): Stopped Transition Summary: - * Move rsc_hadev1 (Started hadev3 -> hadev1) + * Move rsc_hadev1 ( hadev3 -> hadev1 ) * Start child_DoFencing:2 (hadev1) Executing cluster transition: * Resource action: DcIPaddr monitor on hadev3 * Resource action: DcIPaddr monitor on hadev1 * Resource action: rsc_hadev1 monitor on hadev2 * Resource action: rsc_hadev1 monitor on hadev1 * Resource action: rsc_hadev2 monitor on hadev3 * Resource action: rsc_hadev2 monitor on hadev1 * Resource action: rsc_hadev3 monitor=5000 on hadev3 * Resource action: rsc_hadev3 monitor on hadev2 * Resource action: rsc_hadev3 monitor on hadev1 * Resource action: child_DoFencing:0 monitor=5000 on hadev2 * Resource action: child_DoFencing:0 monitor on hadev3 * Resource action: child_DoFencing:0 monitor on hadev1 * Resource action: child_DoFencing:1 monitor=5000 on hadev3 * Resource action: child_DoFencing:1 monitor on hadev2 * Resource action: child_DoFencing:1 monitor on hadev1 * Resource action: child_DoFencing:2 monitor on hadev3 * Resource action: child_DoFencing:2 monitor on hadev2 * Resource action: child_DoFencing:2 monitor on hadev1 * Pseudo action: DoFencing_start_0 * Resource action: DcIPaddr start on hadev2 * Resource action: rsc_hadev1 stop on hadev3 * Resource action: rsc_hadev2 start on hadev2 * Resource action: child_DoFencing:2 start on hadev1 * Pseudo action: DoFencing_running_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on hadev2 * Resource action: rsc_hadev1 start on hadev1 * Resource action: rsc_hadev2 monitor=5000 on hadev2 * Resource action: child_DoFencing:2 monitor=5000 on hadev1 * Resource action: rsc_hadev1 monitor=5000 on hadev1 Revised cluster status: Online: [ hadev1 hadev2 hadev3 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 child_DoFencing:1 (stonith:ssh): Started hadev3 child_DoFencing:2 (stonith:ssh): Started hadev1 diff --git a/pengine/test10/7-migrate-group-one-unmigratable.summary b/pengine/test10/7-migrate-group-one-unmigratable.summary index cf1b370c2f..6c6b127891 100644 --- a/pengine/test10/7-migrate-group-one-unmigratable.summary +++ b/pengine/test10/7-migrate-group-one-unmigratable.summary @@ -1,40 +1,40 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Resource Group: thegroup A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node1 C (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: - * Migrate A (Started 18node1 -> 18node2) - * Move B (Started 18node1 -> 18node2) - * Move C (Started 18node1 -> 18node2) due to unrunnable B stop + * Migrate A ( 18node1 -> 18node2 ) + * Move B ( 18node1 -> 18node2 ) + * Move C ( 18node1 -> 18node2 ) due to unrunnable B stop Executing cluster transition: * Pseudo action: thegroup_stop_0 * Resource action: C stop on 18node1 * Resource action: B stop on 18node1 * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: thegroup_stopped_0 * Pseudo action: thegroup_start_0 * Pseudo action: A_start_0 * Resource action: B start on 18node2 * Resource action: C start on 18node2 * Pseudo action: thegroup_running_0 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node2 * Resource action: C monitor=60000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Resource Group: thegroup A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node2 C (ocf::heartbeat:Dummy): Started 18node2 diff --git a/pengine/test10/726.summary b/pengine/test10/726.summary index 920aefee27..b3cd4e47e8 100644 --- a/pengine/test10/726.summary +++ b/pengine/test10/726.summary @@ -1,88 +1,88 @@ Current cluster status: Online: [ ibm1 sgi2 test02 test03 ] DcIPaddr (ocf::heartbeat:IPaddr): Started test03 (Monitoring) rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped rsc_ibm1 (ocf::heartbeat:IPaddr): Started test03 (Monitoring) rsc_test02 (ocf::heartbeat:IPaddr): Stopped rsc_test03 (ocf::heartbeat:IPaddr): Started test03 (Monitoring) Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Starting test02 child_DoFencing:1 (stonith:ssh): Starting test03 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: * Start rsc_sgi2 (sgi2) - * Move rsc_ibm1 (Started test03 -> ibm1) + * Move rsc_ibm1 ( test03 -> ibm1 ) * Start rsc_test02 (test02) * Start child_DoFencing:2 (ibm1) * Start child_DoFencing:3 (sgi2) Executing cluster transition: * Resource action: DcIPaddr monitor=5000 on test03 * Resource action: DcIPaddr monitor on test02 * Resource action: DcIPaddr monitor on sgi2 * Resource action: DcIPaddr monitor on ibm1 * Resource action: rsc_sgi2 monitor on test03 * Resource action: rsc_sgi2 monitor on test02 * Resource action: rsc_sgi2 monitor on sgi2 * Resource action: rsc_sgi2 monitor on ibm1 * Resource action: rsc_ibm1 monitor on test02 * Resource action: rsc_ibm1 monitor on sgi2 * Resource action: rsc_ibm1 monitor on ibm1 * Resource action: rsc_test02 monitor on test03 * Resource action: rsc_test02 monitor on test02 * Resource action: rsc_test02 monitor on sgi2 * Resource action: rsc_test02 monitor on ibm1 * Resource action: rsc_test03 monitor=5000 on test03 * Resource action: rsc_test03 monitor on test02 * Resource action: rsc_test03 monitor on sgi2 * Resource action: rsc_test03 monitor on ibm1 * Resource action: child_DoFencing:0 monitor on sgi2 * Resource action: child_DoFencing:0 monitor on ibm1 * Resource action: child_DoFencing:1 monitor on test02 * Resource action: child_DoFencing:1 monitor on sgi2 * Resource action: child_DoFencing:1 monitor on ibm1 * Resource action: child_DoFencing:2 monitor on test03 * Resource action: child_DoFencing:2 monitor on test02 * Resource action: child_DoFencing:2 monitor on sgi2 * Resource action: child_DoFencing:2 monitor on ibm1 * Resource action: child_DoFencing:3 monitor on test03 * Resource action: child_DoFencing:3 monitor on test02 * Resource action: child_DoFencing:3 monitor on sgi2 * Resource action: child_DoFencing:3 monitor on ibm1 * Pseudo action: DoFencing_start_0 * Resource action: rsc_sgi2 start on sgi2 * Resource action: rsc_ibm1 stop on test03 * Resource action: rsc_test02 start on test02 * Resource action: child_DoFencing:0 start on test02 * Resource action: child_DoFencing:1 start on test03 * Resource action: child_DoFencing:2 start on ibm1 * Resource action: child_DoFencing:3 start on sgi2 * Pseudo action: DoFencing_running_0 * Pseudo action: all_stopped * Resource action: rsc_sgi2 monitor=5000 on sgi2 * Resource action: rsc_ibm1 start on ibm1 * Resource action: rsc_test02 monitor=5000 on test02 * Resource action: child_DoFencing:0 monitor=5000 on test02 * Resource action: child_DoFencing:1 monitor=5000 on test03 * Resource action: child_DoFencing:2 monitor=5000 on ibm1 * Resource action: child_DoFencing:3 monitor=5000 on sgi2 * Resource action: rsc_ibm1 monitor=5000 on ibm1 Revised cluster status: Online: [ ibm1 sgi2 test02 test03 ] DcIPaddr (ocf::heartbeat:IPaddr): Started test03 rsc_sgi2 (ocf::heartbeat:IPaddr): Started sgi2 rsc_ibm1 (ocf::heartbeat:IPaddr): Started ibm1 rsc_test02 (ocf::heartbeat:IPaddr): Started test02 rsc_test03 (ocf::heartbeat:IPaddr): Started test03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started test02 child_DoFencing:1 (stonith:ssh): Started test03 child_DoFencing:2 (stonith:ssh): Started ibm1 child_DoFencing:3 (stonith:ssh): Started sgi2 diff --git a/pengine/test10/735.summary b/pengine/test10/735.summary index 69538de590..2db520ad39 100644 --- a/pengine/test10/735.summary +++ b/pengine/test10/735.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ hadev2 hadev3 ] OFFLINE: [ hadev1 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Starting hadev2 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Starting Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Starting child_DoFencing:1 (stonith:ssh): Stopped child_DoFencing:2 (stonith:ssh): Stopped Transition Summary: - * Move rsc_hadev1 (Started hadev2 -> hadev3) + * Move rsc_hadev1 ( hadev2 -> hadev3 ) * Start rsc_hadev3 (hadev3) * Start child_DoFencing:0 (hadev2) * Start child_DoFencing:1 (hadev3) Executing cluster transition: * Resource action: DcIPaddr monitor on hadev3 * Resource action: rsc_hadev1 stop on hadev2 * Resource action: rsc_hadev1 start on hadev3 * Resource action: rsc_hadev2 monitor on hadev3 * Resource action: rsc_hadev3 start on hadev3 * Resource action: child_DoFencing:0 monitor on hadev3 * Resource action: child_DoFencing:2 monitor on hadev3 * Pseudo action: DoFencing_start_0 * Pseudo action: all_stopped * Resource action: rsc_hadev1 monitor=5000 on hadev3 * Resource action: rsc_hadev3 monitor=5000 on hadev3 * Resource action: child_DoFencing:0 start on hadev2 * Resource action: child_DoFencing:1 start on hadev3 * Pseudo action: DoFencing_running_0 * Resource action: child_DoFencing:0 monitor=5000 on hadev2 * Resource action: child_DoFencing:1 monitor=5000 on hadev3 Revised cluster status: Online: [ hadev2 hadev3 ] OFFLINE: [ hadev1 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 child_DoFencing:1 (stonith:ssh): Started hadev3 child_DoFencing:2 (stonith:ssh): Stopped diff --git a/pengine/test10/764.summary b/pengine/test10/764.summary index 0d5c612f5b..ea62931963 100644 --- a/pengine/test10/764.summary +++ b/pengine/test10/764.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ posic041 posic043 ] OFFLINE: [ posic042 posic044 ] DcIPaddr (ocf::heartbeat:IPaddr): Started posic043 rsc_posic041 (ocf::heartbeat:IPaddr): Started posic041 rsc_posic042 (ocf::heartbeat:IPaddr): Started posic041 rsc_posic043 (ocf::heartbeat:IPaddr): Started posic043 rsc_posic044 (ocf::heartbeat:IPaddr): Starting posic041 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started posic043 child_DoFencing:1 (stonith:ssh): Started posic041 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: - * Stop DcIPaddr (Started posic043) due to no quorum - * Stop rsc_posic041 (Started posic041) due to no quorum - * Stop rsc_posic042 (Started posic041) due to no quorum - * Stop rsc_posic043 (Started posic043) due to no quorum - * Stop rsc_posic044 (Started posic041) due to no quorum + * Stop DcIPaddr ( posic043 ) due to no quorum + * Stop rsc_posic041 ( posic041 ) due to no quorum + * Stop rsc_posic042 ( posic041 ) due to no quorum + * Stop rsc_posic043 ( posic043 ) due to no quorum + * Stop rsc_posic044 ( posic041 ) due to no quorum Executing cluster transition: * Resource action: DcIPaddr monitor on posic041 * Resource action: rsc_posic041 monitor on posic043 * Resource action: rsc_posic042 monitor on posic043 * Resource action: rsc_posic043 monitor on posic041 * Resource action: rsc_posic044 monitor on posic043 * Resource action: child_DoFencing:0 monitor=5000 on posic043 * Resource action: child_DoFencing:1 monitor=5000 on posic041 * Resource action: child_DoFencing:1 monitor on posic043 * Resource action: child_DoFencing:2 monitor on posic041 * Resource action: child_DoFencing:3 monitor on posic041 * Resource action: DcIPaddr stop on posic043 * Resource action: rsc_posic041 stop on posic041 * Resource action: rsc_posic042 stop on posic041 * Resource action: rsc_posic043 stop on posic043 * Resource action: rsc_posic044 stop on posic041 * Pseudo action: all_stopped Revised cluster status: Online: [ posic041 posic043 ] OFFLINE: [ posic042 posic044 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped rsc_posic041 (ocf::heartbeat:IPaddr): Stopped rsc_posic042 (ocf::heartbeat:IPaddr): Stopped rsc_posic043 (ocf::heartbeat:IPaddr): Stopped rsc_posic044 (ocf::heartbeat:IPaddr): Started posic041 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started posic043 child_DoFencing:1 (stonith:ssh): Started posic041 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/797.summary b/pengine/test10/797.summary index 9e9400359e..61afbf1410 100644 --- a/pengine/test10/797.summary +++ b/pengine/test10/797.summary @@ -1,73 +1,73 @@ Current cluster status: Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline) Online: [ c001n01 c001n02 c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started (Monitoring)[ c001n01 c001n03 ] child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: * Shutdown c001n02 - * Stop DcIPaddr (Started c001n03) due to no quorum - * Stop rsc_c001n08 (Started c001n02) due to no quorum - * Stop rsc_c001n02 (Started c001n02) due to no quorum - * Stop rsc_c001n03 (Started c001n03) due to no quorum - * Stop rsc_c001n01 (Started c001n01) due to no quorum - * Restart child_DoFencing:0 (Started c001n01) + * Stop DcIPaddr ( c001n03 ) due to no quorum + * Stop rsc_c001n08 ( c001n02 ) due to no quorum + * Stop rsc_c001n02 ( c001n02 ) due to no quorum + * Stop rsc_c001n03 ( c001n03 ) due to no quorum + * Stop rsc_c001n01 ( c001n01 ) due to no quorum + * Restart child_DoFencing:0 ( c001n01 ) * Stop child_DoFencing:1 (c001n02) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n02 * Resource action: DcIPaddr monitor on c001n01 * Resource action: DcIPaddr stop on c001n03 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n01 * Pseudo action: DoFencing_stop_0 * Resource action: DcIPaddr delete on c001n03 * Resource action: rsc_c001n08 stop on c001n02 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n03 stop on c001n03 * Resource action: rsc_c001n01 stop on c001n01 * Resource action: child_DoFencing:0 stop on c001n03 * Resource action: child_DoFencing:0 stop on c001n01 * Resource action: child_DoFencing:1 stop on c001n02 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Resource action: child_DoFencing:0 start on c001n01 * Resource action: child_DoFencing:0 monitor=5000 on c001n01 * Pseudo action: DoFencing_running_0 Revised cluster status: Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline) Online: [ c001n01 c001n02 c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 child_DoFencing:1 (stonith:ssh): Stopped child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/8-am-then-bm-a-migrating-b-stopping.summary b/pengine/test10/8-am-then-bm-a-migrating-b-stopping.summary index 2aa3f07dee..06a1356914 100644 --- a/pengine/test10/8-am-then-bm-a-migrating-b-stopping.summary +++ b/pengine/test10/8-am-then-bm-a-migrating-b-stopping.summary @@ -1,27 +1,27 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 ( disabled ) Transition Summary: - * Migrate A (Started 18node1 -> 18node2) + * Migrate A ( 18node1 -> 18node2 ) * Stop B (18node2) Executing cluster transition: * Resource action: B stop on 18node2 * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: A_start_0 * Resource action: A monitor=60000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/829.summary b/pengine/test10/829.summary index feca908f08..8cc27dc667 100644 --- a/pengine/test10/829.summary +++ b/pengine/test10/829.summary @@ -1,64 +1,64 @@ Current cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n01 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 (UNCLEAN) rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n08 Transition Summary: * Fence (reboot) c001n02 'peer is no longer part of the cluster' - * Move rsc_c001n02 (Started c001n02 -> c001n01) + * Move rsc_c001n02 ( c001n02 -> c001n01 ) * Stop child_DoFencing:0 (c001n02) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:0 monitor on c001n01 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Fencing c001n02 (reboot) * Pseudo action: rsc_c001n02_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: stonith_complete * Resource action: rsc_c001n02 start on c001n01 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: all_stopped * Resource action: rsc_c001n02 monitor=5000 on c001n01 Revised cluster status: Online: [ c001n01 c001n03 c001n08 ] OFFLINE: [ c001n02 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n08 diff --git a/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary b/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary index 44fc1a2261..250d888d2b 100644 --- a/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary +++ b/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary @@ -1,23 +1,23 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 ( disabled ) B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: * Stop A (18node1) due to node availability - * Stop B (Started 18node2) due to unrunnable A start + * Stop B ( 18node2 ) due to unrunnable A start Executing cluster transition: * Resource action: B stop on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Stopped ( disabled ) B (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/994-2.summary b/pengine/test10/994-2.summary index a1d477f5c4..0f7def7cc8 100644 --- a/pengine/test10/994-2.summary +++ b/pengine/test10/994-2.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ paul ] Resource Group: group_1 datadisk_1 (heartbeat:datadisk): Started paul Filesystem_2 (ocf::heartbeat:Filesystem): Started paul IPaddr_5 (ocf::heartbeat:IPaddr): Started paul postfix_9 (lsb:postfix): FAILED paul depends (lsb:postfix): Started paul Transition Summary: - * Recover postfix_9 (Started paul) - * Restart depends (Started paul) due to required group_1 running + * Recover postfix_9 ( paul ) + * Restart depends ( paul ) due to required group_1 running Executing cluster transition: * Resource action: depends stop on paul * Pseudo action: group_1_stop_0 * Resource action: postfix_9 stop on paul * Pseudo action: all_stopped * Pseudo action: group_1_stopped_0 * Pseudo action: group_1_start_0 * Resource action: postfix_9 start on paul * Resource action: postfix_9 monitor=120000 on paul * Pseudo action: group_1_running_0 * Resource action: depends start on paul Revised cluster status: Online: [ paul ] Resource Group: group_1 datadisk_1 (heartbeat:datadisk): Started paul Filesystem_2 (ocf::heartbeat:Filesystem): Started paul IPaddr_5 (ocf::heartbeat:IPaddr): Started paul postfix_9 (lsb:postfix): Started paul depends (lsb:postfix): Started paul diff --git a/pengine/test10/994.summary b/pengine/test10/994.summary index 6e8e4a2c16..4f0c42f5c2 100644 --- a/pengine/test10/994.summary +++ b/pengine/test10/994.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ paul ] Resource Group: group_1 datadisk_1 (heartbeat:datadisk): Started paul Filesystem_2 (ocf::heartbeat:Filesystem): Started paul IPaddr_5 (ocf::heartbeat:IPaddr): Started paul postfix_9 (lsb:postfix): FAILED paul Transition Summary: - * Recover postfix_9 (Started paul) + * Recover postfix_9 ( paul ) Executing cluster transition: * Pseudo action: group_1_stop_0 * Resource action: postfix_9 stop on paul * Pseudo action: all_stopped * Pseudo action: group_1_stopped_0 * Pseudo action: group_1_start_0 * Resource action: postfix_9 start on paul * Resource action: postfix_9 monitor=120000 on paul * Pseudo action: group_1_running_0 Revised cluster status: Online: [ paul ] Resource Group: group_1 datadisk_1 (heartbeat:datadisk): Started paul Filesystem_2 (ocf::heartbeat:Filesystem): Started paul IPaddr_5 (ocf::heartbeat:IPaddr): Started paul postfix_9 (lsb:postfix): Started paul diff --git a/pengine/test10/a-demote-then-b-migrate.summary b/pengine/test10/a-demote-then-b-migrate.summary index 84b250ef2a..9e461e8dfc 100644 --- a/pengine/test10/a-demote-then-b-migrate.summary +++ b/pengine/test10/a-demote-then-b-migrate.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Demote rsc1:0 (Master -> Slave node1) + * Demote rsc1:0 ( Master -> Slave node1 ) * Promote rsc1:1 (Slave -> Master node2) - * Migrate rsc2 (Started node1 -> node2) + * Migrate rsc2 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1:1 cancel=5000 on node1 * Resource action: rsc1:0 cancel=10000 on node2 * Pseudo action: ms1_pre_notify_demote_0 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-pre_notify_demote_0 * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_post_notify_demoted_0 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-post_notify_demoted_0 * Pseudo action: ms1_pre_notify_promote_0 * Resource action: rsc2 migrate_to on node1 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-pre_notify_promote_0 * Resource action: rsc2 migrate_from on node2 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Pseudo action: rsc2_start_0 * Pseudo action: ms1_promote_0 * Resource action: rsc2 monitor=5000 on node2 * Resource action: rsc1:0 promote on node2 * Pseudo action: ms1_promoted_0 * Pseudo action: ms1_post_notify_promoted_0 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-post_notify_promoted_0 * Resource action: rsc1:1 monitor=10000 on node1 * Resource action: rsc1:0 monitor=5000 on node2 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Masters: [ node2 ] Slaves: [ node1 ] rsc2 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/a-promote-then-b-migrate.summary b/pengine/test10/a-promote-then-b-migrate.summary index c7d791b104..166b7b0b09 100644 --- a/pengine/test10/a-promote-then-b-migrate.summary +++ b/pengine/test10/a-promote-then-b-migrate.summary @@ -1,41 +1,41 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Promote rsc1:1 (Slave -> Master node2) - * Migrate rsc2 (Started node1 -> node2) + * Migrate rsc2 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1:1 cancel=10000 on node2 * Pseudo action: ms1_pre_notify_promote_0 * Resource action: rsc1:0 notify on node1 * Resource action: rsc1:1 notify on node2 * Pseudo action: ms1_confirmed-pre_notify_promote_0 * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node2 * Pseudo action: ms1_promoted_0 * Pseudo action: ms1_post_notify_promoted_0 * Resource action: rsc1:0 notify on node1 * Resource action: rsc1:1 notify on node2 * Pseudo action: ms1_confirmed-post_notify_promoted_0 * Resource action: rsc2 migrate_to on node1 * Resource action: rsc1:1 monitor=5000 on node2 * Resource action: rsc2 migrate_from on node2 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Pseudo action: rsc2_start_0 * Resource action: rsc2 monitor=5000 on node2 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Masters: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/anti-colocation-master.summary b/pengine/test10/anti-colocation-master.summary index 31fd635c46..df4c4ed991 100644 --- a/pengine/test10/anti-colocation-master.summary +++ b/pengine/test10/anti-colocation-master.summary @@ -1,37 +1,37 @@ Using the original execution date of: 2016-04-29 09:06:59Z Current cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-2 dummy1 (ocf::pacemaker:Dummy): Started sle12sp2-2 Master/Slave Set: ms1 [state1] Masters: [ sle12sp2-1 ] Slaves: [ sle12sp2-2 ] Transition Summary: - * Move dummy1 (Started sle12sp2-2 -> sle12sp2-1) + * Move dummy1 ( sle12sp2-2 -> sle12sp2-1 ) * Promote state1:0 (Slave -> Master sle12sp2-2) - * Demote state1:1 (Master -> Slave sle12sp2-1) + * Demote state1:1 ( Master -> Slave sle12sp2-1 ) Executing cluster transition: * Resource action: dummy1 stop on sle12sp2-2 * Pseudo action: ms1_demote_0 * Pseudo action: all_stopped * Resource action: state1 demote on sle12sp2-1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_promote_0 * Resource action: dummy1 start on sle12sp2-1 * Resource action: state1 promote on sle12sp2-2 * Pseudo action: ms1_promoted_0 Using the original execution date of: 2016-04-29 09:06:59Z Revised cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-2 dummy1 (ocf::pacemaker:Dummy): Started sle12sp2-1 Master/Slave Set: ms1 [state1] Masters: [ sle12sp2-2 ] Slaves: [ sle12sp2-1 ] diff --git a/pengine/test10/anti-colocation-order.summary b/pengine/test10/anti-colocation-order.summary index 052043a5fb..4f03a687ce 100644 --- a/pengine/test10/anti-colocation-order.summary +++ b/pengine/test10/anti-colocation-order.summary @@ -1,44 +1,44 @@ Current cluster status: Node node1: standby Online: [ node2 ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Resource Group: group2 rsc3 (ocf::pacemaker:Dummy): Started node2 rsc4 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) * Stop rsc3 (node2) due to node availability * Stop rsc4 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node1 * Pseudo action: group2_stop_0 * Resource action: rsc4 stop on node2 * Resource action: rsc1 stop on node1 * Resource action: rsc3 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 * Pseudo action: group2_stopped_0 * Pseudo action: group1_start_0 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Pseudo action: group1_running_0 Revised cluster status: Node node1: standby Online: [ node2 ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc3 (ocf::pacemaker:Dummy): Stopped rsc4 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/anti-colocation-slave.summary b/pengine/test10/anti-colocation-slave.summary index 9179e9faa0..0d77064db7 100644 --- a/pengine/test10/anti-colocation-slave.summary +++ b/pengine/test10/anti-colocation-slave.summary @@ -1,35 +1,35 @@ Current cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-1 Master/Slave Set: ms1 [state1] Masters: [ sle12sp2-1 ] Slaves: [ sle12sp2-2 ] dummy1 (ocf::pacemaker:Dummy): Started sle12sp2-1 Transition Summary: - * Demote state1:0 (Master -> Slave sle12sp2-1) + * Demote state1:0 ( Master -> Slave sle12sp2-1 ) * Promote state1:1 (Slave -> Master sle12sp2-2) - * Move dummy1 (Started sle12sp2-1 -> sle12sp2-2) + * Move dummy1 ( sle12sp2-1 -> sle12sp2-2 ) Executing cluster transition: * Resource action: dummy1 stop on sle12sp2-1 * Pseudo action: all_stopped * Pseudo action: ms1_demote_0 * Resource action: state1 demote on sle12sp2-1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_promote_0 * Resource action: state1 promote on sle12sp2-2 * Pseudo action: ms1_promoted_0 * Resource action: dummy1 start on sle12sp2-2 Revised cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-1 Master/Slave Set: ms1 [state1] Masters: [ sle12sp2-2 ] Slaves: [ sle12sp2-1 ] dummy1 (ocf::pacemaker:Dummy): Started sle12sp2-2 diff --git a/pengine/test10/asymmetrical-order-move.summary b/pengine/test10/asymmetrical-order-move.summary index 503813cb40..34869d595b 100644 --- a/pengine/test10/asymmetrical-order-move.summary +++ b/pengine/test10/asymmetrical-order-move.summary @@ -1,25 +1,25 @@ Using the original execution date of: 2016-04-28 11:50:29Z 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-2 dummy1 (ocf::pacemaker:Dummy): Stopped ( disabled ) dummy2 (ocf::pacemaker:Dummy): Started sle12sp2-1 Transition Summary: - * Stop dummy2 (Started sle12sp2-1) due to unrunnable dummy1 start + * Stop dummy2 ( sle12sp2-1 ) due to unrunnable dummy1 start Executing cluster transition: * Resource action: dummy2 stop on sle12sp2-1 * Pseudo action: all_stopped Using the original execution date of: 2016-04-28 11:50:29Z Revised cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-2 dummy1 (ocf::pacemaker:Dummy): Stopped ( disabled ) dummy2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/bug-1572-1.summary b/pengine/test10/bug-1572-1.summary index 6a2461412f..771598057b 100644 --- a/pengine/test10/bug-1572-1.summary +++ b/pengine/test10/bug-1572-1.summary @@ -1,85 +1,85 @@ Current cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Masters: [ arc-tkincaidlx.wsicorp.com ] Slaves: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com Transition Summary: * Shutdown arc-dknightlx - * Stop rsc_drbd_7788:0 (arc-dknightlx) due to node availability + * Stop rsc_drbd_7788:0 ( Slave arc-dknightlx ) due to node availability * Restart rsc_drbd_7788:1 (Master arc-tkincaidlx.wsicorp.com) - * Restart fs_mirror (Started arc-tkincaidlx.wsicorp.com) due to required ms_drbd_7788 notified - * Restart pgsql_5555 (Started arc-tkincaidlx.wsicorp.com) due to required fs_mirror start - * Restart IPaddr_147_81_84_133 (Started arc-tkincaidlx.wsicorp.com) due to required pgsql_5555 start + * Restart fs_mirror ( arc-tkincaidlx.wsicorp.com ) due to required ms_drbd_7788 notified + * Restart pgsql_5555 ( arc-tkincaidlx.wsicorp.com ) due to required fs_mirror start + * Restart IPaddr_147_81_84_133 ( arc-tkincaidlx.wsicorp.com ) due to required pgsql_5555 start Executing cluster transition: * Pseudo action: ms_drbd_7788_pre_notify_demote_0 * Pseudo action: grp_pgsql_mirror_stop_0 * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_stopped_0 * Pseudo action: ms_drbd_7788_demote_0 * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_demoted_0 * Pseudo action: ms_drbd_7788_post_notify_demoted_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_7788_pre_notify_stop_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_7788_stop_0 * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx * Resource action: rsc_drbd_7788:1 stop on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_stopped_0 * Cluster action: do_shutdown on arc-dknightlx * Pseudo action: ms_drbd_7788_post_notify_stopped_0 * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_7788_pre_notify_start_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_7788_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_7788_start_0 * Resource action: rsc_drbd_7788:1 start on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_running_0 * Pseudo action: ms_drbd_7788_post_notify_running_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_running_0 * Pseudo action: ms_drbd_7788_pre_notify_promote_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_7788_promote_0 * Resource action: rsc_drbd_7788:1 promote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_promoted_0 * Pseudo action: ms_drbd_7788_post_notify_promoted_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_promoted_0 * Pseudo action: grp_pgsql_mirror_start_0 * Resource action: fs_mirror start on arc-tkincaidlx.wsicorp.com * Resource action: pgsql_5555 start on arc-tkincaidlx.wsicorp.com * Resource action: pgsql_5555 monitor=30000 on arc-tkincaidlx.wsicorp.com * Resource action: IPaddr_147_81_84_133 start on arc-tkincaidlx.wsicorp.com * Resource action: IPaddr_147_81_84_133 monitor=25000 on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_running_0 Revised cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Masters: [ arc-tkincaidlx.wsicorp.com ] Stopped: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com diff --git a/pengine/test10/bug-1572-2.summary b/pengine/test10/bug-1572-2.summary index 96574cff2e..9d2b8854d3 100644 --- a/pengine/test10/bug-1572-2.summary +++ b/pengine/test10/bug-1572-2.summary @@ -1,61 +1,61 @@ Current cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Masters: [ arc-tkincaidlx.wsicorp.com ] Slaves: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com Transition Summary: * Shutdown arc-dknightlx - * Stop rsc_drbd_7788:0 (arc-dknightlx) due to node availability + * Stop rsc_drbd_7788:0 ( Slave arc-dknightlx ) due to node availability * Demote rsc_drbd_7788:1 (Master -> Slave arc-tkincaidlx.wsicorp.com) * Stop fs_mirror (arc-tkincaidlx.wsicorp.com) due to node availability * Stop pgsql_5555 (arc-tkincaidlx.wsicorp.com) due to node availability * Stop IPaddr_147_81_84_133 (arc-tkincaidlx.wsicorp.com) due to node availability Executing cluster transition: * Pseudo action: ms_drbd_7788_pre_notify_demote_0 * Pseudo action: grp_pgsql_mirror_stop_0 * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_stopped_0 * Pseudo action: ms_drbd_7788_demote_0 * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_demoted_0 * Pseudo action: ms_drbd_7788_post_notify_demoted_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_7788_pre_notify_stop_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_7788_stop_0 * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx * Pseudo action: ms_drbd_7788_stopped_0 * Cluster action: do_shutdown on arc-dknightlx * Pseudo action: ms_drbd_7788_post_notify_stopped_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Slaves: [ arc-tkincaidlx.wsicorp.com ] Stopped: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Stopped pgsql_5555 (ocf::heartbeat:pgsql): Stopped IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Stopped diff --git a/pengine/test10/bug-1718.summary b/pengine/test10/bug-1718.summary index b539e4e856..aec182ee78 100644 --- a/pengine/test10/bug-1718.summary +++ b/pengine/test10/bug-1718.summary @@ -1,42 +1,42 @@ 2 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ biggame.ds9 heartbeat.ds9 ops.ds9 ] OFFLINE: [ defiant.ds9 warbird.ds9 ] Resource Group: Web_Group Apache_IP (ocf::heartbeat:IPaddr): Started heartbeat.ds9 resource_IP2 (ocf::heartbeat:IPaddr): Stopped ( disabled ) resource_dummyweb (ocf::heartbeat:Dummy): Stopped Resource Group: group_fUN resource_IP3 (ocf::heartbeat:IPaddr): Started ops.ds9 resource_dummy (ocf::heartbeat:Dummy): Started ops.ds9 Transition Summary: - * Stop resource_IP3 (Started ops.ds9) due to unrunnable Web_Group running - * Stop resource_dummy (Started ops.ds9) due to required resource_IP3 start + * Stop resource_IP3 ( ops.ds9 ) due to unrunnable Web_Group running + * Stop resource_dummy ( ops.ds9 ) due to required resource_IP3 start Executing cluster transition: * Pseudo action: group_fUN_stop_0 * Resource action: resource_dummy stop on ops.ds9 * Resource action: OpenVPN_IP delete on ops.ds9 * Resource action: OpenVPN_IP delete on heartbeat.ds9 * Resource action: Apache delete on biggame.ds9 * Resource action: Apache delete on ops.ds9 * Resource action: Apache delete on heartbeat.ds9 * Resource action: resource_IP3 stop on ops.ds9 * Pseudo action: all_stopped * Pseudo action: group_fUN_stopped_0 Revised cluster status: Online: [ biggame.ds9 heartbeat.ds9 ops.ds9 ] OFFLINE: [ defiant.ds9 warbird.ds9 ] Resource Group: Web_Group Apache_IP (ocf::heartbeat:IPaddr): Started heartbeat.ds9 resource_IP2 (ocf::heartbeat:IPaddr): Stopped ( disabled ) resource_dummyweb (ocf::heartbeat:Dummy): Stopped Resource Group: group_fUN resource_IP3 (ocf::heartbeat:IPaddr): Stopped resource_dummy (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/bug-1820-1.summary b/pengine/test10/bug-1820-1.summary index db41ac4e44..2b0673a4a1 100644 --- a/pengine/test10/bug-1820-1.summary +++ b/pengine/test10/bug-1820-1.summary @@ -1,44 +1,44 @@ Current cluster status: Online: [ star world ] p1 (ocf::heartbeat:Xen): Stopped Resource Group: gr1 test1 (ocf::heartbeat:Xen): Started star test2 (ocf::heartbeat:Xen): Started star Transition Summary: * Shutdown star * Start p1 (world) - * Migrate test1 (Started star -> world) - * Migrate test2 (Started star -> world) + * Migrate test1 ( star -> world ) + * Migrate test2 ( star -> world ) Executing cluster transition: * Resource action: p1 monitor on world * Resource action: p1 monitor on star * Pseudo action: gr1_stop_0 * Resource action: test1 migrate_to on star * Resource action: p1 start on world * Resource action: test1 migrate_from on world * Resource action: test2 migrate_to on star * Resource action: test2 migrate_from on world * Resource action: test2 stop on star * Resource action: test1 stop on star * Cluster action: do_shutdown on star * Pseudo action: all_stopped * Pseudo action: gr1_stopped_0 * Pseudo action: gr1_start_0 * Pseudo action: test1_start_0 * Pseudo action: test2_start_0 * Pseudo action: gr1_running_0 * Resource action: test1 monitor=10000 on world * Resource action: test2 monitor=10000 on world Revised cluster status: Online: [ star world ] p1 (ocf::heartbeat:Xen): Started world Resource Group: gr1 test1 (ocf::heartbeat:Xen): Started world test2 (ocf::heartbeat:Xen): Started world diff --git a/pengine/test10/bug-1820.summary b/pengine/test10/bug-1820.summary index 6d9c021c2f..e13f8ad06c 100644 --- a/pengine/test10/bug-1820.summary +++ b/pengine/test10/bug-1820.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ star world ] Resource Group: gr1 test1 (ocf::heartbeat:Xen): Started star test2 (ocf::heartbeat:Xen): Started star Transition Summary: * Shutdown star - * Migrate test1 (Started star -> world) - * Migrate test2 (Started star -> world) + * Migrate test1 ( star -> world ) + * Migrate test2 ( star -> world ) Executing cluster transition: * Pseudo action: gr1_stop_0 * Resource action: test1 migrate_to on star * Resource action: test1 migrate_from on world * Resource action: test2 migrate_to on star * Resource action: test2 migrate_from on world * Resource action: test2 stop on star * Resource action: test1 stop on star * Cluster action: do_shutdown on star * Pseudo action: all_stopped * Pseudo action: gr1_stopped_0 * Pseudo action: gr1_start_0 * Pseudo action: test1_start_0 * Pseudo action: test2_start_0 * Pseudo action: gr1_running_0 * Resource action: test1 monitor=10000 on world * Resource action: test2 monitor=10000 on world Revised cluster status: Online: [ star world ] Resource Group: gr1 test1 (ocf::heartbeat:Xen): Started world test2 (ocf::heartbeat:Xen): Started world diff --git a/pengine/test10/bug-1822.summary b/pengine/test10/bug-1822.summary index 325e408231..5bf91b9858 100644 --- a/pengine/test10/bug-1822.summary +++ b/pengine/test10/bug-1822.summary @@ -1,44 +1,44 @@ Current cluster status: Online: [ process1a process2b ] Master/Slave Set: ms-sf [ms-sf_group] (unique) Resource Group: ms-sf_group:0 master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Slave process2b master_slave_procdctl:0 (ocf::heartbeat:procdctl): Stopped Resource Group: ms-sf_group:1 master_slave_Stateful:1 (ocf::heartbeat:Dummy-statful): Master process1a master_slave_procdctl:1 (ocf::heartbeat:procdctl): Master process1a Transition Summary: * Shutdown process1a - * Demote master_slave_Stateful:1 (Master -> Stopped process1a) - * Demote master_slave_procdctl:1 (Master -> Stopped process1a) + * Stop master_slave_Stateful:1 ( Master process1a ) due to node availability + * Stop master_slave_procdctl:1 ( Master process1a ) due to node availability Executing cluster transition: * Pseudo action: ms-sf_demote_0 * Pseudo action: ms-sf_group:1_demote_0 * Resource action: master_slave_Stateful:1 demote on process1a * Resource action: master_slave_procdctl:1 demote on process1a * Pseudo action: ms-sf_group:1_demoted_0 * Pseudo action: ms-sf_demoted_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: ms-sf_group:1_stop_0 * Resource action: master_slave_Stateful:1 stop on process1a * Resource action: master_slave_procdctl:1 stop on process1a * Cluster action: do_shutdown on process1a * Pseudo action: all_stopped * Pseudo action: ms-sf_group:1_stopped_0 * Pseudo action: ms-sf_stopped_0 Revised cluster status: Online: [ process1a process2b ] Master/Slave Set: ms-sf [ms-sf_group] (unique) Resource Group: ms-sf_group:0 master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Slave process2b master_slave_procdctl:0 (ocf::heartbeat:procdctl): Stopped Resource Group: ms-sf_group:1 master_slave_Stateful:1 (ocf::heartbeat:Dummy-statful): Stopped master_slave_procdctl:1 (ocf::heartbeat:procdctl): Stopped diff --git a/pengine/test10/bug-5007-masterslave_colocation.summary b/pengine/test10/bug-5007-masterslave_colocation.summary index 06a81e19af..adbc1f1430 100644 --- a/pengine/test10/bug-5007-masterslave_colocation.summary +++ b/pengine/test10/bug-5007-masterslave_colocation.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MS_DUMMY [DUMMY] Masters: [ fc16-builder ] Slaves: [ fc16-builder2 ] SLAVE_IP (ocf::pacemaker:Dummy): Started fc16-builder MASTER_IP (ocf::pacemaker:Dummy): Started fc16-builder2 Transition Summary: - * Move SLAVE_IP (Started fc16-builder -> fc16-builder2) - * Move MASTER_IP (Started fc16-builder2 -> fc16-builder) + * Move SLAVE_IP ( fc16-builder -> fc16-builder2 ) + * Move MASTER_IP ( fc16-builder2 -> fc16-builder ) Executing cluster transition: * Resource action: SLAVE_IP stop on fc16-builder * Resource action: MASTER_IP stop on fc16-builder2 * Pseudo action: all_stopped * Resource action: SLAVE_IP start on fc16-builder2 * Resource action: MASTER_IP start on fc16-builder Revised cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MS_DUMMY [DUMMY] Masters: [ fc16-builder ] Slaves: [ fc16-builder2 ] SLAVE_IP (ocf::pacemaker:Dummy): Started fc16-builder2 MASTER_IP (ocf::pacemaker:Dummy): Started fc16-builder diff --git a/pengine/test10/bug-5014-A-stopped-B-stopped.summary b/pengine/test10/bug-5014-A-stopped-B-stopped.summary index 95e5b60e71..ba0a5e179a 100644 --- a/pengine/test10/bug-5014-A-stopped-B-stopped.summary +++ b/pengine/test10/bug-5014-A-stopped-B-stopped.summary @@ -1,21 +1,21 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped Transition Summary: - * Start ClusterIP2 (fc16-builder - blocked) due to unrunnable ClusterIP start + * Start ClusterIP2 ( fc16-builder ) due to unrunnable ClusterIP start (blocked) Executing cluster transition: * Resource action: ClusterIP monitor on fc16-builder * Resource action: ClusterIP2 monitor on fc16-builder Revised cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary b/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary index fe12fe63f2..d15d1b228a 100644 --- a/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary +++ b/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary @@ -1,25 +1,25 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped ClusterIP3 (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Transition Summary: - * Start ClusterIP (fc16-builder - blocked) due to unrunnable ClusterIP3 start - * Start ClusterIP2 (fc16-builder - blocked) due to unrunnable ClusterIP start + * Start ClusterIP ( fc16-builder ) due to unrunnable ClusterIP3 start (blocked) + * Start ClusterIP2 ( fc16-builder ) due to unrunnable ClusterIP start (blocked) Executing cluster transition: * Resource action: ClusterIP monitor on fc16-builder * Resource action: ClusterIP2 monitor on fc16-builder * Resource action: ClusterIP3 monitor on fc16-builder Revised cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped ClusterIP3 (ocf::heartbeat:IPaddr2): Stopped ( disabled ) diff --git a/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary b/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary index f9b64485af..aa4b69949a 100644 --- a/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary +++ b/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary @@ -1,23 +1,23 @@ 2 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] Resource Group: group1 ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Resource Group: group2 ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped Transition Summary: - * Start ClusterIP2 (fc16-builder - blocked) due to unrunnable group1 running + * Start ClusterIP2 ( fc16-builder ) due to unrunnable group1 running (blocked) Executing cluster transition: Revised cluster status: Online: [ fc16-builder ] Resource Group: group1 ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Resource Group: group2 ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary b/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary index 7d94d606ff..7c28340dd5 100644 --- a/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary +++ b/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary @@ -1,27 +1,27 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop A (Started fc16-builder) due to required C start + * Stop A ( fc16-builder ) due to required C start * Stop C (fc16-builder) due to node availability Executing cluster transition: * Resource action: A stop on fc16-builder * Resource action: C stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/bug-5025-1.summary b/pengine/test10/bug-5025-1.summary index 9f9baa2fda..34f7b340bc 100644 --- a/pengine/test10/bug-5025-1.summary +++ b/pengine/test10/bug-5025-1.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 fc16-builder3 ] virt-fencing (stonith:fence_xvm): Started fc16-builder A (ocf::pacemaker:Dummy): Started fc16-builder Transition Summary: - * Reload A (Started fc16-builder) + * Reload A ( fc16-builder ) Executing cluster transition: * Cluster action: clear_failcount for A on fc16-builder * Resource action: A reload on fc16-builder * Resource action: A monitor=30000 on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 fc16-builder3 ] virt-fencing (stonith:fence_xvm): Started fc16-builder A (ocf::pacemaker:Dummy): Started fc16-builder diff --git a/pengine/test10/bug-5025-3.summary b/pengine/test10/bug-5025-3.summary index 0d843d2485..9072771802 100644 --- a/pengine/test10/bug-5025-3.summary +++ b/pengine/test10/bug-5025-3.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 fc16-builder3 ] virt-fencing (stonith:fence_xvm): Stopped A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder Transition Summary: - * Restart A (Started fc16-builder) + * Restart A ( fc16-builder ) Executing cluster transition: * Resource action: A stop on fc16-builder * Cluster action: clear_failcount for A on fc16-builder * Resource action: A start on fc16-builder * Resource action: A monitor=30000 on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 fc16-builder3 ] virt-fencing (stonith:fence_xvm): Stopped A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder diff --git a/pengine/test10/bug-5028.summary b/pengine/test10/bug-5028.summary index a85f75b403..f1c6f63279 100644 --- a/pengine/test10/bug-5028.summary +++ b/pengine/test10/bug-5028.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ bl460g6a bl460g6b ] Resource Group: dummy-g dummy01 (ocf::heartbeat:Dummy): Started bl460g6a dummy02 (ocf::heartbeat:Dummy-stop-NG): FAILED bl460g6a ( blocked ) Transition Summary: * Shutdown bl460g6a - * Stop dummy01 (Started bl460g6a - blocked) + * Stop dummy01 ( bl460g6a ) blocked Executing cluster transition: * Pseudo action: dummy-g_stop_0 * Pseudo action: dummy-g_start_0 Revised cluster status: Online: [ bl460g6a bl460g6b ] Resource Group: dummy-g dummy01 (ocf::heartbeat:Dummy): Started bl460g6a dummy02 (ocf::heartbeat:Dummy-stop-NG): FAILED bl460g6a ( blocked ) diff --git a/pengine/test10/bug-5059.summary b/pengine/test10/bug-5059.summary index 36a5c67d5b..3122cf9d56 100644 --- a/pengine/test10/bug-5059.summary +++ b/pengine/test10/bug-5059.summary @@ -1,76 +1,75 @@ Current cluster status: Node gluster03.h: standby Online: [ gluster01.h gluster02.h ] OFFLINE: [ gluster04.h ] Master/Slave Set: ms_stateful [g_stateful] Resource Group: g_stateful:0 p_stateful1 (ocf::pacemaker:Stateful): Slave gluster01.h p_stateful2 (ocf::pacemaker:Stateful): Stopped Resource Group: g_stateful:1 p_stateful1 (ocf::pacemaker:Stateful): Slave gluster02.h p_stateful2 (ocf::pacemaker:Stateful): Stopped Stopped: [ gluster03.h gluster04.h ] Clone Set: c_dummy [p_dummy1] Started: [ gluster01.h gluster02.h ] Transition Summary: * Promote p_stateful1:0 (Slave -> Master gluster01.h) - * Start p_stateful2:0 (gluster01.h) * Promote p_stateful2:0 (Stopped -> Master gluster01.h) * Start p_stateful2:1 (gluster02.h) Executing cluster transition: * Pseudo action: ms_stateful_pre_notify_start_0 * Resource action: iptest delete on gluster02.h * Resource action: ipsrc2 delete on gluster02.h * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 * Pseudo action: ms_stateful_start_0 * Pseudo action: g_stateful:0_start_0 * Resource action: p_stateful2:0 start on gluster01.h * Pseudo action: g_stateful:1_start_0 * Resource action: p_stateful2:1 start on gluster02.h * Pseudo action: g_stateful:0_running_0 * Pseudo action: g_stateful:1_running_0 * Pseudo action: ms_stateful_running_0 * Pseudo action: ms_stateful_post_notify_running_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-post_notify_running_0 * Pseudo action: ms_stateful_pre_notify_promote_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-pre_notify_promote_0 * Pseudo action: ms_stateful_promote_0 * Pseudo action: g_stateful:0_promote_0 * Resource action: p_stateful1:0 promote on gluster01.h * Resource action: p_stateful2:0 promote on gluster01.h * Pseudo action: g_stateful:0_promoted_0 * Pseudo action: ms_stateful_promoted_0 * Pseudo action: ms_stateful_post_notify_promoted_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-post_notify_promoted_0 * Resource action: p_stateful1:1 monitor=10000 on gluster02.h * Resource action: p_stateful2:1 monitor=10000 on gluster02.h Revised cluster status: Node gluster03.h: standby Online: [ gluster01.h gluster02.h ] OFFLINE: [ gluster04.h ] Master/Slave Set: ms_stateful [g_stateful] Masters: [ gluster01.h ] Slaves: [ gluster02.h ] Clone Set: c_dummy [p_dummy1] Started: [ gluster01.h gluster02.h ] diff --git a/pengine/test10/bug-5186-partial-migrate.summary b/pengine/test10/bug-5186-partial-migrate.summary index cd01706253..4d7f0bf3ae 100644 --- a/pengine/test10/bug-5186-partial-migrate.summary +++ b/pengine/test10/bug-5186-partial-migrate.summary @@ -1,91 +1,91 @@ Current cluster status: Node bl460g1n7 (3232261593): UNCLEAN (offline) Online: [ bl460g1n6 bl460g1n8 ] prmDummy (ocf::pacemaker:Dummy): Started bl460g1n7 (UNCLEAN) prmVM2 (ocf::heartbeat:VirtualDomain): Migrating bl460g1n7 (UNCLEAN) Resource Group: grpStonith6 prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 Resource Group: grpStonith7 prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 Resource Group: grpStonith8 prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n7 (UNCLEAN) prmStonith8-2 (stonith:external/ipmi): Started bl460g1n7 (UNCLEAN) Clone Set: clnDiskd1 [prmDiskd1] prmDiskd1 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Clone Set: clnDiskd2 [prmDiskd2] prmDiskd2 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Clone Set: clnPing [prmPing] prmPing (ocf::pacemaker:ping): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Transition Summary: * Fence (reboot) bl460g1n7 'prmDummy is thought to be active there' - * Move prmDummy (Started bl460g1n7 -> bl460g1n6) - * Move prmVM2 (Started bl460g1n7 -> bl460g1n8) - * Move prmStonith8-1 (Started bl460g1n7 -> bl460g1n6) - * Move prmStonith8-2 (Started bl460g1n7 -> bl460g1n6) + * Move prmDummy ( bl460g1n7 -> bl460g1n6 ) + * Move prmVM2 ( bl460g1n7 -> bl460g1n8 ) + * Move prmStonith8-1 ( bl460g1n7 -> bl460g1n6 ) + * Move prmStonith8-2 ( bl460g1n7 -> bl460g1n6 ) * Stop prmDiskd1:0 (bl460g1n7) due to node availability * Stop prmDiskd2:0 (bl460g1n7) due to node availability * Stop prmPing:0 (bl460g1n7) due to node availability Executing cluster transition: * Resource action: prmVM2 stop on bl460g1n6 * Pseudo action: grpStonith8_stop_0 * Pseudo action: prmStonith8-2_stop_0 * Fencing bl460g1n7 (reboot) * Pseudo action: prmDummy_stop_0 * Pseudo action: prmVM2_stop_0 * Pseudo action: prmStonith8-1_stop_0 * Pseudo action: clnDiskd1_stop_0 * Pseudo action: clnDiskd2_stop_0 * Pseudo action: clnPing_stop_0 * Pseudo action: stonith_complete * Resource action: prmDummy start on bl460g1n6 * Resource action: prmVM2 start on bl460g1n8 * Pseudo action: grpStonith8_stopped_0 * Pseudo action: grpStonith8_start_0 * Resource action: prmStonith8-1 start on bl460g1n6 * Resource action: prmStonith8-2 start on bl460g1n6 * Pseudo action: prmDiskd1_stop_0 * Pseudo action: clnDiskd1_stopped_0 * Pseudo action: prmDiskd2_stop_0 * Pseudo action: clnDiskd2_stopped_0 * Pseudo action: prmPing_stop_0 * Pseudo action: clnPing_stopped_0 * Pseudo action: all_stopped * Resource action: prmVM2 monitor=10000 on bl460g1n8 * Pseudo action: grpStonith8_running_0 * Resource action: prmStonith8-1 monitor=10000 on bl460g1n6 * Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6 Revised cluster status: Online: [ bl460g1n6 bl460g1n8 ] OFFLINE: [ bl460g1n7 ] prmDummy (ocf::pacemaker:Dummy): Started bl460g1n6 prmVM2 (ocf::heartbeat:VirtualDomain): Started bl460g1n8 Resource Group: grpStonith6 prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 Resource Group: grpStonith7 prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 Resource Group: grpStonith8 prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith8-2 (stonith:external/ipmi): Started bl460g1n6 Clone Set: clnDiskd1 [prmDiskd1] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] Clone Set: clnDiskd2 [prmDiskd2] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] Clone Set: clnPing [prmPing] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] diff --git a/pengine/test10/bug-cl-5212.summary b/pengine/test10/bug-cl-5212.summary index 92d3af3254..1800f06e51 100644 --- a/pengine/test10/bug-cl-5212.summary +++ b/pengine/test10/bug-cl-5212.summary @@ -1,67 +1,67 @@ Current cluster status: Node srv01 (3232238280): UNCLEAN (offline) Node srv02 (3232238290): UNCLEAN (offline) Online: [ srv03 ] Resource Group: grpStonith1 prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith2 prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Resource Group: grpStonith3 prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Master/Slave Set: msPostgresql [pgsql] pgsql (ocf::pacemaker:Stateful): Slave srv02 ( UNCLEAN ) pgsql (ocf::pacemaker:Stateful): Master srv01 (UNCLEAN) Slaves: [ srv03 ] Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): Started srv02 (UNCLEAN) prmPingd (ocf::pacemaker:ping): Started srv01 (UNCLEAN) Started: [ srv03 ] Transition Summary: - * Stop prmStonith1-1 (Started srv02 - blocked) - * Stop prmStonith2-1 (Started srv01 - blocked) - * Stop prmStonith3-1 (srv01 - blocked) due to node availability - * Stop pgsql:0 (srv02 - blocked) due to node availability - * Demote pgsql:1 (Master -> Stopped srv01 - blocked) - * Stop prmPingd:0 (srv02 - blocked) due to node availability - * Stop prmPingd:1 (srv01 - blocked) due to node availability + * Stop prmStonith1-1 ( srv02 ) blocked + * Stop prmStonith2-1 ( srv01 ) blocked + * Stop prmStonith3-1 ( srv01 ) due to node availability (blocked) + * Stop pgsql:0 ( Slave srv02 ) due to node availability (blocked) + * Stop pgsql:1 ( Master srv01 ) due to node availability (blocked) + * Stop prmPingd:0 ( srv02 ) due to node availability (blocked) + * Stop prmPingd:1 ( srv01 ) due to node availability (blocked) Executing cluster transition: * Pseudo action: grpStonith1_stop_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stop_0 * Pseudo action: grpStonith2_start_0 * Pseudo action: grpStonith3_stop_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: clnPingd_stop_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: clnPingd_stopped_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: msPostgresql_post_notify_stopped_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 Revised cluster status: Node srv01 (3232238280): UNCLEAN (offline) Node srv02 (3232238290): UNCLEAN (offline) Online: [ srv03 ] Resource Group: grpStonith1 prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith2 prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Resource Group: grpStonith3 prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Master/Slave Set: msPostgresql [pgsql] pgsql (ocf::pacemaker:Stateful): Slave srv02 ( UNCLEAN ) pgsql (ocf::pacemaker:Stateful): Master srv01 (UNCLEAN) Slaves: [ srv03 ] Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): Started srv02 (UNCLEAN) prmPingd (ocf::pacemaker:ping): Started srv01 (UNCLEAN) Started: [ srv03 ] diff --git a/pengine/test10/bug-cl-5247.summary b/pengine/test10/bug-cl-5247.summary index 0f328cc7fe..3e5511db9c 100644 --- a/pengine/test10/bug-cl-5247.summary +++ b/pengine/test10/bug-cl-5247.summary @@ -1,101 +1,101 @@ Using the original execution date of: 2015-08-12 02:53:40Z Current cluster status: Online: [ bl460g8n3 bl460g8n4 ] Containers: [ pgsr01:prmDB1 ] prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 prmDB2 (ocf::heartbeat:VirtualDomain): FAILED bl460g8n4 Resource Group: grpStonith1 prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 Resource Group: grpStonith2 prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 Resource Group: master-group vip-master (ocf::heartbeat:Dummy): FAILED pgsr02 vip-rep (ocf::heartbeat:Dummy): FAILED pgsr02 Master/Slave Set: msPostgresql [pgsql] Masters: [ pgsr01 ] Stopped: [ bl460g8n3 bl460g8n4 ] Transition Summary: * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean' * Stop prmDB2 (bl460g8n4) due to node availability - * Restart prmStonith1-2 (Started bl460g8n4) - * Restart prmStonith2-2 (Started bl460g8n3) - * Recover vip-master (Started pgsr02 -> pgsr01) - * Recover vip-rep (Started pgsr02 -> pgsr01) - * Demote pgsql:0 (Master -> Stopped pgsr02) + * Restart prmStonith1-2 ( bl460g8n4 ) + * Restart prmStonith2-2 ( bl460g8n3 ) + * Recover vip-master ( pgsr02 -> pgsr01 ) + * Recover vip-rep ( pgsr02 -> pgsr01 ) + * Stop pgsql:0 ( Master pgsr02 ) due to node availability * Stop pgsr02 (bl460g8n4) Executing cluster transition: * Pseudo action: grpStonith1_stop_0 * Resource action: prmStonith1-2 stop on bl460g8n4 * Pseudo action: grpStonith2_stop_0 * Resource action: prmStonith2-2 stop on bl460g8n3 * Pseudo action: msPostgresql_pre_notify_demote_0 * Resource action: pgsr01 monitor on bl460g8n4 * Resource action: pgsr02 monitor on bl460g8n3 * Pseudo action: grpStonith1_stopped_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stopped_0 * Pseudo action: grpStonith2_start_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 * Pseudo action: msPostgresql_demote_0 * Resource action: pgsr02 stop on bl460g8n4 * Resource action: prmDB2 stop on bl460g8n4 * Pseudo action: stonith-pgsr02-off on pgsr02 * Pseudo action: stonith_complete * Pseudo action: pgsql_post_notify_stop_0 * Pseudo action: pgsql_demote_0 * Pseudo action: msPostgresql_demoted_0 * Pseudo action: msPostgresql_post_notify_demoted_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: master-group_stop_0 * Pseudo action: vip-rep_stop_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: vip-master_stop_0 * Pseudo action: pgsql_stop_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: master-group_stopped_0 * Pseudo action: master-group_start_0 * Resource action: vip-master start on pgsr01 * Resource action: vip-rep start on pgsr01 * Pseudo action: msPostgresql_post_notify_stopped_0 * Pseudo action: master-group_running_0 * Resource action: vip-master monitor=10000 on pgsr01 * Resource action: vip-rep monitor=10000 on pgsr01 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 * Pseudo action: pgsql_notified_0 * Resource action: pgsql monitor=9000 on pgsr01 * Pseudo action: all_stopped * Resource action: prmStonith1-2 start on bl460g8n4 * Resource action: prmStonith1-2 monitor=3600000 on bl460g8n4 * Resource action: prmStonith2-2 start on bl460g8n3 * Resource action: prmStonith2-2 monitor=3600000 on bl460g8n3 * Pseudo action: grpStonith1_running_0 * Pseudo action: grpStonith2_running_0 Using the original execution date of: 2015-08-12 02:53:40Z Revised cluster status: Online: [ bl460g8n3 bl460g8n4 ] Containers: [ pgsr01:prmDB1 ] prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 prmDB2 (ocf::heartbeat:VirtualDomain): FAILED Resource Group: grpStonith1 prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 Resource Group: grpStonith2 prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 Resource Group: master-group vip-master (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ] vip-rep (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ] Master/Slave Set: msPostgresql [pgsql] Masters: [ pgsr01 ] Stopped: [ bl460g8n3 bl460g8n4 ] diff --git a/pengine/test10/bug-lf-2106.summary b/pengine/test10/bug-lf-2106.summary index 9a711250e6..1cea829da5 100644 --- a/pengine/test10/bug-lf-2106.summary +++ b/pengine/test10/bug-lf-2106.summary @@ -1,90 +1,90 @@ Current cluster status: Online: [ cl-virt-1 cl-virt-2 ] apcstonith (stonith:apcmastersnmp): Started cl-virt-1 Clone Set: pingdclone [pingd] Started: [ cl-virt-1 cl-virt-2 ] Resource Group: ssh ssh-ip1 (ocf::heartbeat:IPaddr2): Started cl-virt-2 ssh-ip2 (ocf::heartbeat:IPaddr2): Started cl-virt-2 ssh-bin (ocf::dk:opensshd): Started cl-virt-2 itwiki (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-itwiki [drbd-itwiki] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] bugtrack (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-bugtrack [drbd-bugtrack] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] servsyslog (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-servsyslog [drbd-servsyslog] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] smsprod2 (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-smsprod2 [drbd-smsprod2] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] medomus-cvs (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-medomus-cvs [drbd-medomus-cvs] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] infotos (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-infotos [drbd-infotos] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] Transition Summary: - * Restart pingd:0 (Started cl-virt-1) - * Restart pingd:1 (Started cl-virt-2) + * Restart pingd:0 ( cl-virt-1 ) + * Restart pingd:1 ( cl-virt-2 ) Executing cluster transition: * Cluster action: clear_failcount for pingd on cl-virt-1 * Cluster action: clear_failcount for pingd on cl-virt-2 * Pseudo action: pingdclone_stop_0 * Resource action: pingd:0 stop on cl-virt-1 * Resource action: pingd:0 stop on cl-virt-2 * Pseudo action: pingdclone_stopped_0 * Pseudo action: pingdclone_start_0 * Pseudo action: all_stopped * Resource action: pingd:0 start on cl-virt-1 * Resource action: pingd:0 monitor=30000 on cl-virt-1 * Resource action: pingd:0 start on cl-virt-2 * Resource action: pingd:0 monitor=30000 on cl-virt-2 * Pseudo action: pingdclone_running_0 Revised cluster status: Online: [ cl-virt-1 cl-virt-2 ] apcstonith (stonith:apcmastersnmp): Started cl-virt-1 Clone Set: pingdclone [pingd] Started: [ cl-virt-1 cl-virt-2 ] Resource Group: ssh ssh-ip1 (ocf::heartbeat:IPaddr2): Started cl-virt-2 ssh-ip2 (ocf::heartbeat:IPaddr2): Started cl-virt-2 ssh-bin (ocf::dk:opensshd): Started cl-virt-2 itwiki (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-itwiki [drbd-itwiki] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] bugtrack (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-bugtrack [drbd-bugtrack] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] servsyslog (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-servsyslog [drbd-servsyslog] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] smsprod2 (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-smsprod2 [drbd-smsprod2] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] medomus-cvs (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-medomus-cvs [drbd-medomus-cvs] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] infotos (ocf::heartbeat:VirtualDomain): Started cl-virt-2 Master/Slave Set: ms-infotos [drbd-infotos] Masters: [ cl-virt-2 ] Slaves: [ cl-virt-1 ] diff --git a/pengine/test10/bug-lf-2153.summary b/pengine/test10/bug-lf-2153.summary index e95713ef7e..01567b5c01 100644 --- a/pengine/test10/bug-lf-2153.summary +++ b/pengine/test10/bug-lf-2153.summary @@ -1,58 +1,58 @@ Current cluster status: Node bob (9a4cafd3-fcfc-4de9-9440-10bc8822d9af): standby Online: [ alice ] Master/Slave Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] Masters: [ alice ] Slaves: [ bob ] Clone Set: cl_tgtd [res_tgtd] Started: [ alice bob ] Resource Group: rg_iscsivg01 res_portblock_iscsivg01_block (ocf::heartbeat:portblock): Started alice res_lvm_iscsivg01 (ocf::heartbeat:LVM): Started alice res_target_iscsivg01 (ocf::heartbeat:iSCSITarget): Started alice res_lu_iscsivg01_lun1 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_lu_iscsivg01_lun2 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_ip_alicebob01 (ocf::heartbeat:IPaddr2): Started alice res_portblock_iscsivg01_unblock (ocf::heartbeat:portblock): Started alice Transition Summary: - * Stop res_drbd_iscsivg01:0 (bob) due to node availability + * Stop res_drbd_iscsivg01:0 ( Slave bob ) due to node availability * Stop res_tgtd:0 (bob) due to node availability Executing cluster transition: * Pseudo action: ms_drbd_iscsivg01_pre_notify_stop_0 * Pseudo action: cl_tgtd_stop_0 * Resource action: res_drbd_iscsivg01:0 notify on bob * Resource action: res_drbd_iscsivg01:1 notify on alice * Pseudo action: ms_drbd_iscsivg01_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_iscsivg01_stop_0 * Resource action: res_tgtd:0 stop on bob * Pseudo action: cl_tgtd_stopped_0 * Resource action: res_drbd_iscsivg01:0 stop on bob * Pseudo action: ms_drbd_iscsivg01_stopped_0 * Pseudo action: ms_drbd_iscsivg01_post_notify_stopped_0 * Resource action: res_drbd_iscsivg01:1 notify on alice * Pseudo action: ms_drbd_iscsivg01_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node bob (9a4cafd3-fcfc-4de9-9440-10bc8822d9af): standby Online: [ alice ] Master/Slave Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] Masters: [ alice ] Stopped: [ bob ] Clone Set: cl_tgtd [res_tgtd] Started: [ alice ] Stopped: [ bob ] Resource Group: rg_iscsivg01 res_portblock_iscsivg01_block (ocf::heartbeat:portblock): Started alice res_lvm_iscsivg01 (ocf::heartbeat:LVM): Started alice res_target_iscsivg01 (ocf::heartbeat:iSCSITarget): Started alice res_lu_iscsivg01_lun1 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_lu_iscsivg01_lun2 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_ip_alicebob01 (ocf::heartbeat:IPaddr2): Started alice res_portblock_iscsivg01_unblock (ocf::heartbeat:portblock): Started alice diff --git a/pengine/test10/bug-lf-2171.summary b/pengine/test10/bug-lf-2171.summary index 8c1d8a4e3f..41a7b94a49 100644 --- a/pengine/test10/bug-lf-2171.summary +++ b/pengine/test10/bug-lf-2171.summary @@ -1,37 +1,37 @@ 3 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ xenserver1 xenserver2 ] Clone Set: cl_res_Dummy1 [res_Dummy1] Started: [ xenserver1 xenserver2 ] Resource Group: gr_Dummy res_Dummy2 (ocf::heartbeat:Dummy): Started xenserver1 res_Dummy3 (ocf::heartbeat:Dummy): Started xenserver1 Transition Summary: * Stop res_Dummy1:0 (xenserver1) due to node availability * Stop res_Dummy1:1 (xenserver2) due to node availability - * Stop res_Dummy2 (Started xenserver1) due to unrunnable cl_res_Dummy1 running - * Stop res_Dummy3 (Started xenserver1) due to unrunnable cl_res_Dummy1 running + * Stop res_Dummy2 ( xenserver1 ) due to unrunnable cl_res_Dummy1 running + * Stop res_Dummy3 ( xenserver1 ) due to unrunnable cl_res_Dummy1 running Executing cluster transition: * Pseudo action: gr_Dummy_stop_0 * Resource action: res_Dummy2 stop on xenserver1 * Resource action: res_Dummy3 stop on xenserver1 * Pseudo action: gr_Dummy_stopped_0 * Pseudo action: cl_res_Dummy1_stop_0 * Resource action: res_Dummy1:1 stop on xenserver1 * Resource action: res_Dummy1:0 stop on xenserver2 * Pseudo action: cl_res_Dummy1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ xenserver1 xenserver2 ] Clone Set: cl_res_Dummy1 [res_Dummy1] Stopped (disabled): [ xenserver1 xenserver2 ] Resource Group: gr_Dummy res_Dummy2 (ocf::heartbeat:Dummy): Stopped res_Dummy3 (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/bug-lf-2361.summary b/pengine/test10/bug-lf-2361.summary index 3089e04873..b88cd90ede 100644 --- a/pengine/test10/bug-lf-2361.summary +++ b/pengine/test10/bug-lf-2361.summary @@ -1,42 +1,42 @@ Current cluster status: Online: [ alice.demo bob.demo ] dummy1 (ocf::heartbeat:Dummy): Stopped Master/Slave Set: ms_stateful [stateful] Stopped: [ alice.demo bob.demo ] Clone Set: cl_dummy2 [dummy2] Stopped: [ alice.demo bob.demo ] Transition Summary: * Start stateful:0 (alice.demo) * Start stateful:1 (bob.demo) - * Start dummy2:0 (alice.demo - blocked) due to unrunnable dummy1 start - * Start dummy2:1 (bob.demo - blocked) due to unrunnable dummy1 start + * Start dummy2:0 ( alice.demo ) due to unrunnable dummy1 start (blocked) + * Start dummy2:1 ( bob.demo ) due to unrunnable dummy1 start (blocked) Executing cluster transition: * Pseudo action: ms_stateful_pre_notify_start_0 * Resource action: service2:0 delete on alice.demo * Resource action: service2:0 delete on bob.demo * Resource action: service2:1 delete on bob.demo * Resource action: service1 delete on alice.demo * Resource action: service1 delete on bob.demo * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 * Pseudo action: ms_stateful_start_0 * Resource action: stateful:0 start on alice.demo * Resource action: stateful:1 start on bob.demo * Pseudo action: ms_stateful_running_0 * Pseudo action: ms_stateful_post_notify_running_0 * Resource action: stateful:0 notify on alice.demo * Resource action: stateful:1 notify on bob.demo * Pseudo action: ms_stateful_confirmed-post_notify_running_0 Revised cluster status: Online: [ alice.demo bob.demo ] dummy1 (ocf::heartbeat:Dummy): Stopped Master/Slave Set: ms_stateful [stateful] Slaves: [ alice.demo bob.demo ] Clone Set: cl_dummy2 [dummy2] Stopped: [ alice.demo bob.demo ] diff --git a/pengine/test10/bug-lf-2435.summary b/pengine/test10/bug-lf-2435.summary index a53834296b..bb07089e1b 100644 --- a/pengine/test10/bug-lf-2435.summary +++ b/pengine/test10/bug-lf-2435.summary @@ -1,32 +1,32 @@ Current cluster status: Node c20.chepkov.lan: standby Online: [ c19.chepkov.lan c21.chepkov.lan ] dummy1 (ocf::pacemaker:Dummy): Started c19.chepkov.lan dummy2 (ocf::pacemaker:Dummy): Started c20.chepkov.lan dummy4 (ocf::pacemaker:Dummy): Stopped dummy3 (ocf::pacemaker:Dummy): Started c21.chepkov.lan Transition Summary: - * Move dummy2 (Started c20.chepkov.lan -> c21.chepkov.lan) + * Move dummy2 ( c20.chepkov.lan -> c21.chepkov.lan ) * Stop dummy3 (c21.chepkov.lan) Executing cluster transition: * Resource action: dummy2 stop on c20.chepkov.lan * Resource action: dummy4 monitor on c21.chepkov.lan * Resource action: dummy4 monitor on c20.chepkov.lan * Resource action: dummy4 monitor on c19.chepkov.lan * Resource action: dummy3 stop on c21.chepkov.lan * Pseudo action: all_stopped * Resource action: dummy2 start on c21.chepkov.lan Revised cluster status: Node c20.chepkov.lan: standby Online: [ c19.chepkov.lan c21.chepkov.lan ] dummy1 (ocf::pacemaker:Dummy): Started c19.chepkov.lan dummy2 (ocf::pacemaker:Dummy): Started c21.chepkov.lan dummy4 (ocf::pacemaker:Dummy): Stopped dummy3 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/bug-lf-2445.summary b/pengine/test10/bug-lf-2445.summary index 4523d9b805..12f50310c5 100644 --- a/pengine/test10/bug-lf-2445.summary +++ b/pengine/test10/bug-lf-2445.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: C [P] (unique) P:0 (ocf::pacemaker:Dummy): Started node1 P:1 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move P:1 (Started node1 -> node2) + * Move P:1 ( node1 -> node2 ) Executing cluster transition: * Pseudo action: C_stop_0 * Resource action: P:1 stop on node1 * Pseudo action: C_stopped_0 * Pseudo action: C_start_0 * Pseudo action: all_stopped * Resource action: P:1 start on node2 * Pseudo action: C_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: C [P] (unique) P:0 (ocf::pacemaker:Dummy): Started node1 P:1 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/bug-lf-2453.summary b/pengine/test10/bug-lf-2453.summary index 398868b230..f038cbb6ba 100644 --- a/pengine/test10/bug-lf-2453.summary +++ b/pengine/test10/bug-lf-2453.summary @@ -1,39 +1,39 @@ 2 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ domu1 domu2 ] PrimitiveResource1 (ocf::heartbeat:IPaddr2): Started domu1 Clone Set: CloneResource1 [apache] Started: [ domu1 domu2 ] Clone Set: CloneResource2 [DummyResource] Started: [ domu1 domu2 ] Transition Summary: - * Stop PrimitiveResource1 (Started domu1) due to required CloneResource2 running + * Stop PrimitiveResource1 ( domu1 ) due to required CloneResource2 running * Stop apache:0 (domu1) due to node availability * Stop apache:1 (domu2) due to node availability - * Stop DummyResource:0 (Started domu1) due to unrunnable CloneResource1 running - * Stop DummyResource:1 (Started domu2) due to unrunnable CloneResource1 running + * Stop DummyResource:0 ( domu1 ) due to unrunnable CloneResource1 running + * Stop DummyResource:1 ( domu2 ) due to unrunnable CloneResource1 running Executing cluster transition: * Resource action: PrimitiveResource1 stop on domu1 * Pseudo action: CloneResource2_stop_0 * Resource action: DummyResource:1 stop on domu1 * Resource action: DummyResource:0 stop on domu2 * Pseudo action: CloneResource2_stopped_0 * Pseudo action: CloneResource1_stop_0 * Resource action: apache:1 stop on domu1 * Resource action: apache:0 stop on domu2 * Pseudo action: CloneResource1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ domu1 domu2 ] PrimitiveResource1 (ocf::heartbeat:IPaddr2): Stopped Clone Set: CloneResource1 [apache] Stopped (disabled): [ domu1 domu2 ] Clone Set: CloneResource2 [DummyResource] Stopped: [ domu1 domu2 ] diff --git a/pengine/test10/bug-lf-2508.summary b/pengine/test10/bug-lf-2508.summary index 5d0d90c48a..d4e548a639 100644 --- a/pengine/test10/bug-lf-2508.summary +++ b/pengine/test10/bug-lf-2508.summary @@ -1,112 +1,112 @@ Current cluster status: Node srv02 (71085d5e-1c63-49e0-8c8c-400d610b4182): UNCLEAN (offline) Online: [ srv01 srv03 srv04 ] Resource Group: Group01 Dummy01 (ocf::heartbeat:Dummy): Stopped Resource Group: Group02 Dummy02 (ocf::heartbeat:Dummy): Started srv02 (UNCLEAN) Resource Group: Group03 Dummy03 (ocf::heartbeat:Dummy): Started srv03 Clone Set: clnStonith1 [grpStonith1] Resource Group: grpStonith1:1 prmStonith1-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith1-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Started: [ srv03 srv04 ] Stopped: [ srv01 ] Clone Set: clnStonith2 [grpStonith2] Started: [ srv01 srv03 srv04 ] Stopped: [ srv02 ] Clone Set: clnStonith3 [grpStonith3] Resource Group: grpStonith3:0 prmStonith3-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith3-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith3:1 prmStonith3-1 (stonith:external/stonith-helper): Started srv01 prmStonith3-3 (stonith:external/ssh): Stopped Started: [ srv04 ] Stopped: [ srv03 ] Clone Set: clnStonith4 [grpStonith4] Resource Group: grpStonith4:1 prmStonith4-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith4-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Started: [ srv01 srv03 ] Stopped: [ srv04 ] Transition Summary: * Fence (reboot) srv02 'peer is no longer part of the cluster' * Start Dummy01 (srv01) - * Move Dummy02 (Started srv02 -> srv04) + * Move Dummy02 ( srv02 -> srv04 ) * Stop prmStonith1-1:1 (srv02) due to node availability * Stop prmStonith1-3:1 (srv02) due to node availability * Stop prmStonith3-1:0 (srv02) due to node availability * Stop prmStonith3-3:0 (srv02) due to node availability * Start prmStonith3-3:1 (srv01) * Stop prmStonith4-1:1 (srv02) due to node availability * Stop prmStonith4-3:1 (srv02) due to node availability Executing cluster transition: * Pseudo action: Group01_start_0 * Resource action: prmStonith3-1:1 monitor=3600000 on srv01 * Fencing srv02 (reboot) * Pseudo action: Group02_stop_0 * Pseudo action: Dummy02_stop_0 * Pseudo action: clnStonith1_stop_0 * Pseudo action: clnStonith3_stop_0 * Pseudo action: clnStonith4_stop_0 * Pseudo action: stonith_complete * Resource action: Dummy01 start on srv01 * Pseudo action: Group02_stopped_0 * Pseudo action: Group02_start_0 * Resource action: Dummy02 start on srv04 * Pseudo action: grpStonith1:1_stop_0 * Pseudo action: prmStonith1-3:1_stop_0 * Pseudo action: grpStonith3:0_stop_0 * Pseudo action: prmStonith3-3:1_stop_0 * Pseudo action: grpStonith4:1_stop_0 * Pseudo action: prmStonith4-3:1_stop_0 * Pseudo action: Group01_running_0 * Resource action: Dummy01 monitor=10000 on srv01 * Pseudo action: Group02_running_0 * Resource action: Dummy02 monitor=10000 on srv04 * Pseudo action: prmStonith1-1:1_stop_0 * Pseudo action: prmStonith3-1:1_stop_0 * Pseudo action: prmStonith4-1:1_stop_0 * Pseudo action: all_stopped * Pseudo action: grpStonith1:1_stopped_0 * Pseudo action: clnStonith1_stopped_0 * Pseudo action: grpStonith3:0_stopped_0 * Pseudo action: clnStonith3_stopped_0 * Pseudo action: clnStonith3_start_0 * Pseudo action: grpStonith4:1_stopped_0 * Pseudo action: clnStonith4_stopped_0 * Pseudo action: grpStonith3:1_start_0 * Resource action: prmStonith3-3:1 start on srv01 * Pseudo action: grpStonith3:1_running_0 * Resource action: prmStonith3-3:1 monitor=3600000 on srv01 * Pseudo action: clnStonith3_running_0 Revised cluster status: Online: [ srv01 srv03 srv04 ] OFFLINE: [ srv02 ] Resource Group: Group01 Dummy01 (ocf::heartbeat:Dummy): Started srv01 Resource Group: Group02 Dummy02 (ocf::heartbeat:Dummy): Started srv04 Resource Group: Group03 Dummy03 (ocf::heartbeat:Dummy): Started srv03 Clone Set: clnStonith1 [grpStonith1] Started: [ srv03 srv04 ] Stopped: [ srv01 srv02 ] Clone Set: clnStonith2 [grpStonith2] Started: [ srv01 srv03 srv04 ] Stopped: [ srv02 ] Clone Set: clnStonith3 [grpStonith3] Started: [ srv01 srv04 ] Stopped: [ srv02 srv03 ] Clone Set: clnStonith4 [grpStonith4] Started: [ srv01 srv03 ] Stopped: [ srv02 srv04 ] diff --git a/pengine/test10/bug-lf-2551.summary b/pengine/test10/bug-lf-2551.summary index 953727782e..1cd51081d7 100644 --- a/pengine/test10/bug-lf-2551.summary +++ b/pengine/test10/bug-lf-2551.summary @@ -1,226 +1,226 @@ Current cluster status: Node hex-9: UNCLEAN (offline) Online: [ hex-0 hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Started hex-0 Clone Set: base-clone [base-group] Resource Group: base-group:3 dlm (ocf::pacemaker:controld): Started hex-9 (UNCLEAN) o2cb (ocf::ocfs2:o2cb): Started hex-9 (UNCLEAN) clvm (ocf::lvm2:clvmd): Started hex-9 (UNCLEAN) cmirrord (ocf::lvm2:cmirrord): Started hex-9 (UNCLEAN) vg1 (ocf::heartbeat:LVM): Started hex-9 (UNCLEAN) ocfs2-1 (ocf::heartbeat:Filesystem): Started hex-9 (UNCLEAN) Started: [ hex-0 hex-7 hex-8 ] vm-01 (ocf::heartbeat:Xen): Started hex-7 vm-02 (ocf::heartbeat:Xen): Started hex-8 vm-03 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-04 (ocf::heartbeat:Xen): Started hex-7 vm-05 (ocf::heartbeat:Xen): Started hex-8 fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN) vm-06 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-07 (ocf::heartbeat:Xen): Started hex-7 vm-08 (ocf::heartbeat:Xen): Started hex-8 vm-09 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-10 (ocf::heartbeat:Xen): Started hex-0 vm-11 (ocf::heartbeat:Xen): Started hex-7 vm-12 (ocf::heartbeat:Xen): Started hex-8 vm-13 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-14 (ocf::heartbeat:Xen): Started hex-0 vm-15 (ocf::heartbeat:Xen): Started hex-7 vm-16 (ocf::heartbeat:Xen): Started hex-8 vm-17 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-18 (ocf::heartbeat:Xen): Started hex-0 vm-19 (ocf::heartbeat:Xen): Started hex-7 vm-20 (ocf::heartbeat:Xen): Started hex-8 vm-21 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-22 (ocf::heartbeat:Xen): Started hex-0 vm-23 (ocf::heartbeat:Xen): Started hex-7 vm-24 (ocf::heartbeat:Xen): Started hex-8 vm-25 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-26 (ocf::heartbeat:Xen): Started hex-0 vm-27 (ocf::heartbeat:Xen): Started hex-7 vm-28 (ocf::heartbeat:Xen): Started hex-8 vm-29 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-30 (ocf::heartbeat:Xen): Started hex-0 vm-31 (ocf::heartbeat:Xen): Started hex-7 vm-32 (ocf::heartbeat:Xen): Started hex-8 dummy1 (ocf::heartbeat:Dummy): Started hex-9 (UNCLEAN) vm-33 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-34 (ocf::heartbeat:Xen): Started hex-0 vm-35 (ocf::heartbeat:Xen): Started hex-7 vm-36 (ocf::heartbeat:Xen): Started hex-8 vm-37 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-38 (ocf::heartbeat:Xen): Started hex-0 vm-39 (ocf::heartbeat:Xen): Started hex-7 vm-40 (ocf::heartbeat:Xen): Started hex-8 vm-41 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-42 (ocf::heartbeat:Xen): Started hex-0 vm-43 (ocf::heartbeat:Xen): Started hex-7 vm-44 (ocf::heartbeat:Xen): Started hex-8 vm-45 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-46 (ocf::heartbeat:Xen): Started hex-0 vm-47 (ocf::heartbeat:Xen): Started hex-7 vm-48 (ocf::heartbeat:Xen): Started hex-8 vm-49 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-50 (ocf::heartbeat:Xen): Started hex-0 vm-51 (ocf::heartbeat:Xen): Started hex-7 vm-52 (ocf::heartbeat:Xen): Started hex-8 vm-53 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-54 (ocf::heartbeat:Xen): Started hex-0 vm-55 (ocf::heartbeat:Xen): Started hex-7 vm-56 (ocf::heartbeat:Xen): Started hex-8 vm-57 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-58 (ocf::heartbeat:Xen): Started hex-0 vm-59 (ocf::heartbeat:Xen): Started hex-7 vm-60 (ocf::heartbeat:Xen): Started hex-8 vm-61 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-62 (ocf::heartbeat:Xen): Stopped vm-63 (ocf::heartbeat:Xen): Stopped vm-64 (ocf::heartbeat:Xen): Stopped Transition Summary: * Fence (reboot) hex-9 'peer is no longer part of the cluster' - * Move fencing-sbd (Started hex-9 -> hex-0) - * Move dummy1 (Started hex-9 -> hex-0) + * Move fencing-sbd ( hex-9 -> hex-0 ) + * Move dummy1 ( hex-9 -> hex-0 ) * Stop dlm:3 (hex-9) due to node availability * Stop o2cb:3 (hex-9) due to node availability * Stop clvm:3 (hex-9) due to node availability * Stop cmirrord:3 (hex-9) due to node availability * Stop vg1:3 (hex-9) due to node availability * Stop ocfs2-1:3 (hex-9) due to node availability * Stop vm-03 (hex-9) * Stop vm-06 (hex-9) * Stop vm-09 (hex-9) * Stop vm-13 (hex-9) * Stop vm-17 (hex-9) * Stop vm-21 (hex-9) * Stop vm-25 (hex-9) * Stop vm-29 (hex-9) * Stop vm-33 (hex-9) * Stop vm-37 (hex-9) * Stop vm-41 (hex-9) * Stop vm-45 (hex-9) * Stop vm-49 (hex-9) * Stop vm-53 (hex-9) * Stop vm-57 (hex-9) * Stop vm-61 (hex-9) Executing cluster transition: * Pseudo action: fencing-sbd_stop_0 * Resource action: dummy1 monitor=300000 on hex-8 * Resource action: dummy1 monitor=300000 on hex-7 * Pseudo action: load_stopped_hex-8 * Pseudo action: load_stopped_hex-7 * Pseudo action: load_stopped_hex-0 * Fencing hex-9 (reboot) * Resource action: fencing-sbd start on hex-0 * Pseudo action: dummy1_stop_0 * Pseudo action: vm-03_stop_0 * Pseudo action: vm-06_stop_0 * Pseudo action: vm-09_stop_0 * Pseudo action: vm-13_stop_0 * Pseudo action: vm-17_stop_0 * Pseudo action: vm-21_stop_0 * Pseudo action: vm-25_stop_0 * Pseudo action: vm-29_stop_0 * Pseudo action: vm-33_stop_0 * Pseudo action: vm-37_stop_0 * Pseudo action: vm-41_stop_0 * Pseudo action: vm-45_stop_0 * Pseudo action: vm-49_stop_0 * Pseudo action: vm-53_stop_0 * Pseudo action: vm-57_stop_0 * Pseudo action: vm-61_stop_0 * Pseudo action: stonith_complete * Pseudo action: load_stopped_hex-9 * Resource action: dummy1 start on hex-0 * Pseudo action: base-clone_stop_0 * Resource action: dummy1 monitor=30000 on hex-0 * Pseudo action: base-group:3_stop_0 * Pseudo action: ocfs2-1:3_stop_0 * Pseudo action: vg1:3_stop_0 * Pseudo action: cmirrord:3_stop_0 * Pseudo action: clvm:3_stop_0 * Pseudo action: o2cb:3_stop_0 * Pseudo action: dlm:3_stop_0 * Pseudo action: all_stopped * Pseudo action: base-group:3_stopped_0 * Pseudo action: base-clone_stopped_0 Revised cluster status: Online: [ hex-0 hex-7 hex-8 ] OFFLINE: [ hex-9 ] vm-00 (ocf::heartbeat:Xen): Started hex-0 Clone Set: base-clone [base-group] Started: [ hex-0 hex-7 hex-8 ] Stopped: [ hex-9 ] vm-01 (ocf::heartbeat:Xen): Started hex-7 vm-02 (ocf::heartbeat:Xen): Started hex-8 vm-03 (ocf::heartbeat:Xen): Stopped vm-04 (ocf::heartbeat:Xen): Started hex-7 vm-05 (ocf::heartbeat:Xen): Started hex-8 fencing-sbd (stonith:external/sbd): Started hex-0 vm-06 (ocf::heartbeat:Xen): Stopped vm-07 (ocf::heartbeat:Xen): Started hex-7 vm-08 (ocf::heartbeat:Xen): Started hex-8 vm-09 (ocf::heartbeat:Xen): Stopped vm-10 (ocf::heartbeat:Xen): Started hex-0 vm-11 (ocf::heartbeat:Xen): Started hex-7 vm-12 (ocf::heartbeat:Xen): Started hex-8 vm-13 (ocf::heartbeat:Xen): Stopped vm-14 (ocf::heartbeat:Xen): Started hex-0 vm-15 (ocf::heartbeat:Xen): Started hex-7 vm-16 (ocf::heartbeat:Xen): Started hex-8 vm-17 (ocf::heartbeat:Xen): Stopped vm-18 (ocf::heartbeat:Xen): Started hex-0 vm-19 (ocf::heartbeat:Xen): Started hex-7 vm-20 (ocf::heartbeat:Xen): Started hex-8 vm-21 (ocf::heartbeat:Xen): Stopped vm-22 (ocf::heartbeat:Xen): Started hex-0 vm-23 (ocf::heartbeat:Xen): Started hex-7 vm-24 (ocf::heartbeat:Xen): Started hex-8 vm-25 (ocf::heartbeat:Xen): Stopped vm-26 (ocf::heartbeat:Xen): Started hex-0 vm-27 (ocf::heartbeat:Xen): Started hex-7 vm-28 (ocf::heartbeat:Xen): Started hex-8 vm-29 (ocf::heartbeat:Xen): Stopped vm-30 (ocf::heartbeat:Xen): Started hex-0 vm-31 (ocf::heartbeat:Xen): Started hex-7 vm-32 (ocf::heartbeat:Xen): Started hex-8 dummy1 (ocf::heartbeat:Dummy): Started hex-0 vm-33 (ocf::heartbeat:Xen): Stopped vm-34 (ocf::heartbeat:Xen): Started hex-0 vm-35 (ocf::heartbeat:Xen): Started hex-7 vm-36 (ocf::heartbeat:Xen): Started hex-8 vm-37 (ocf::heartbeat:Xen): Stopped vm-38 (ocf::heartbeat:Xen): Started hex-0 vm-39 (ocf::heartbeat:Xen): Started hex-7 vm-40 (ocf::heartbeat:Xen): Started hex-8 vm-41 (ocf::heartbeat:Xen): Stopped vm-42 (ocf::heartbeat:Xen): Started hex-0 vm-43 (ocf::heartbeat:Xen): Started hex-7 vm-44 (ocf::heartbeat:Xen): Started hex-8 vm-45 (ocf::heartbeat:Xen): Stopped vm-46 (ocf::heartbeat:Xen): Started hex-0 vm-47 (ocf::heartbeat:Xen): Started hex-7 vm-48 (ocf::heartbeat:Xen): Started hex-8 vm-49 (ocf::heartbeat:Xen): Stopped vm-50 (ocf::heartbeat:Xen): Started hex-0 vm-51 (ocf::heartbeat:Xen): Started hex-7 vm-52 (ocf::heartbeat:Xen): Started hex-8 vm-53 (ocf::heartbeat:Xen): Stopped vm-54 (ocf::heartbeat:Xen): Started hex-0 vm-55 (ocf::heartbeat:Xen): Started hex-7 vm-56 (ocf::heartbeat:Xen): Started hex-8 vm-57 (ocf::heartbeat:Xen): Stopped vm-58 (ocf::heartbeat:Xen): Started hex-0 vm-59 (ocf::heartbeat:Xen): Started hex-7 vm-60 (ocf::heartbeat:Xen): Started hex-8 vm-61 (ocf::heartbeat:Xen): Stopped vm-62 (ocf::heartbeat:Xen): Stopped vm-63 (ocf::heartbeat:Xen): Stopped vm-64 (ocf::heartbeat:Xen): Stopped diff --git a/pengine/test10/bug-lf-2574.summary b/pengine/test10/bug-lf-2574.summary index 800453cc73..8033616987 100644 --- a/pengine/test10/bug-lf-2574.summary +++ b/pengine/test10/bug-lf-2574.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ srv01 srv02 srv03 ] main_rsc (ocf::pacemaker:Dummy): Started srv01 main_rsc2 (ocf::pacemaker:Dummy): Started srv02 Clone Set: clnDummy1 [prmDummy1] Started: [ srv02 srv03 ] Stopped: [ srv01 ] Clone Set: clnPingd [prmPingd] Started: [ srv01 srv02 srv03 ] Transition Summary: - * Move main_rsc (Started srv01 -> srv03) + * Move main_rsc ( srv01 -> srv03 ) * Stop prmPingd:0 (srv01) due to node availability Executing cluster transition: * Resource action: main_rsc stop on srv01 * Pseudo action: clnPingd_stop_0 * Resource action: main_rsc start on srv03 * Resource action: prmPingd:0 stop on srv01 * Pseudo action: clnPingd_stopped_0 * Pseudo action: all_stopped * Resource action: main_rsc monitor=10000 on srv03 Revised cluster status: Online: [ srv01 srv02 srv03 ] main_rsc (ocf::pacemaker:Dummy): Started srv03 main_rsc2 (ocf::pacemaker:Dummy): Started srv02 Clone Set: clnDummy1 [prmDummy1] Started: [ srv02 srv03 ] Stopped: [ srv01 ] Clone Set: clnPingd [prmPingd] Started: [ srv02 srv03 ] Stopped: [ srv01 ] diff --git a/pengine/test10/bug-lf-2606.summary b/pengine/test10/bug-lf-2606.summary index 7e889c9fd6..d3fdb249d9 100644 --- a/pengine/test10/bug-lf-2606.summary +++ b/pengine/test10/bug-lf-2606.summary @@ -1,45 +1,45 @@ 1 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node node2: UNCLEAN (online) Online: [ node1 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node2 ( disabled ) rsc2 (ocf::pacemaker:Dummy): Started node2 Master/Slave Set: ms3 [rsc3] Masters: [ node2 ] Slaves: [ node1 ] Transition Summary: * Fence (reboot) node2 'rsc1 failed there' * Stop rsc1 (node2) - * Move rsc2 (Started node2 -> node1) - * Demote rsc3:1 (Master -> Stopped node2) + * Move rsc2 ( node2 -> node1 ) + * Stop rsc3:1 ( Master node2 ) due to node availability Executing cluster transition: * Pseudo action: ms3_demote_0 * Fencing node2 (reboot) * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc3:1_demote_0 * Pseudo action: ms3_demoted_0 * Pseudo action: ms3_stop_0 * Pseudo action: stonith_complete * Resource action: rsc2 start on node1 * Pseudo action: rsc3:1_stop_0 * Pseudo action: ms3_stopped_0 * Pseudo action: all_stopped * Resource action: rsc2 monitor=10000 on node1 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped ( disabled ) rsc2 (ocf::pacemaker:Dummy): Started node1 Master/Slave Set: ms3 [rsc3] Slaves: [ node1 ] Stopped: [ node2 ] diff --git a/pengine/test10/bug-lf-2613.summary b/pengine/test10/bug-lf-2613.summary index 4312c519e5..51078c0794 100644 --- a/pengine/test10/bug-lf-2613.summary +++ b/pengine/test10/bug-lf-2613.summary @@ -1,89 +1,89 @@ Current cluster status: Online: [ act1 act2 act3 sby1 sby2 ] Resource Group: grpPostgreSQLDB1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-1 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-2 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-3 (ocf::pacemaker:Dummy): Started act1 prmIpPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 prmApPostgreSQLDB1 (ocf::pacemaker:Dummy): FAILED act1 Resource Group: grpPostgreSQLDB2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-1 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-3 (ocf::pacemaker:Dummy): Started act2 prmIpPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmApPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 Resource Group: grpPostgreSQLDB3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-1 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-2 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-3 (ocf::pacemaker:Dummy): Started act3 prmIpPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmApPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 Transition Summary: - * Move prmExPostgreSQLDB1 (Started act1 -> sby1) - * Move prmFsPostgreSQLDB1-1 (Started act1 -> sby1) - * Move prmFsPostgreSQLDB1-2 (Started act1 -> sby1) - * Move prmFsPostgreSQLDB1-3 (Started act1 -> sby1) - * Move prmIpPostgreSQLDB1 (Started act1 -> sby1) - * Recover prmApPostgreSQLDB1 (Started act1 -> sby1) + * Move prmExPostgreSQLDB1 ( act1 -> sby1 ) + * Move prmFsPostgreSQLDB1-1 ( act1 -> sby1 ) + * Move prmFsPostgreSQLDB1-2 ( act1 -> sby1 ) + * Move prmFsPostgreSQLDB1-3 ( act1 -> sby1 ) + * Move prmIpPostgreSQLDB1 ( act1 -> sby1 ) + * Recover prmApPostgreSQLDB1 ( act1 -> sby1 ) Executing cluster transition: * Pseudo action: grpPostgreSQLDB1_stop_0 * Resource action: prmApPostgreSQLDB1 stop on act1 * Pseudo action: load_stopped_sby2 * Pseudo action: load_stopped_sby1 * Pseudo action: load_stopped_act3 * Pseudo action: load_stopped_act2 * Resource action: prmIpPostgreSQLDB1 stop on act1 * Resource action: prmFsPostgreSQLDB1-3 stop on act1 * Resource action: prmFsPostgreSQLDB1-2 stop on act1 * Resource action: prmFsPostgreSQLDB1-1 stop on act1 * Resource action: prmExPostgreSQLDB1 stop on act1 * Pseudo action: load_stopped_act1 * Pseudo action: all_stopped * Pseudo action: grpPostgreSQLDB1_stopped_0 * Pseudo action: grpPostgreSQLDB1_start_0 * Resource action: prmExPostgreSQLDB1 start on sby1 * Resource action: prmFsPostgreSQLDB1-1 start on sby1 * Resource action: prmFsPostgreSQLDB1-2 start on sby1 * Resource action: prmFsPostgreSQLDB1-3 start on sby1 * Resource action: prmIpPostgreSQLDB1 start on sby1 * Resource action: prmApPostgreSQLDB1 start on sby1 * Pseudo action: grpPostgreSQLDB1_running_0 * Resource action: prmExPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-1 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-2 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-3 monitor=5000 on sby1 * Resource action: prmIpPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmApPostgreSQLDB1 monitor=5000 on sby1 Revised cluster status: Online: [ act1 act2 act3 sby1 sby2 ] Resource Group: grpPostgreSQLDB1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-1 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-2 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-3 (ocf::pacemaker:Dummy): Started sby1 prmIpPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 prmApPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 Resource Group: grpPostgreSQLDB2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-1 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-3 (ocf::pacemaker:Dummy): Started act2 prmIpPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmApPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 Resource Group: grpPostgreSQLDB3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-1 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-2 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-3 (ocf::pacemaker:Dummy): Started act3 prmIpPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmApPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 diff --git a/pengine/test10/bug-lf-2619.summary b/pengine/test10/bug-lf-2619.summary index 9a2213d5df..a704be8d11 100644 --- a/pengine/test10/bug-lf-2619.summary +++ b/pengine/test10/bug-lf-2619.summary @@ -1,99 +1,99 @@ Current cluster status: Online: [ act1 act2 act3 sby1 sby2 ] Resource Group: grpPostgreSQLDB1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-1 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-2 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-3 (ocf::pacemaker:Dummy): Started act1 prmIpPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 prmApPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 Resource Group: grpPostgreSQLDB2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-1 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-3 (ocf::pacemaker:Dummy): Started act2 prmIpPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmApPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 Resource Group: grpPostgreSQLDB3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-1 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-2 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-3 (ocf::pacemaker:Dummy): Started act3 prmIpPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmApPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): FAILED act1 Started: [ act2 act3 sby1 sby2 ] Transition Summary: - * Move prmExPostgreSQLDB1 (Started act1 -> sby1) - * Move prmFsPostgreSQLDB1-1 (Started act1 -> sby1) - * Move prmFsPostgreSQLDB1-2 (Started act1 -> sby1) - * Move prmFsPostgreSQLDB1-3 (Started act1 -> sby1) - * Move prmIpPostgreSQLDB1 (Started act1 -> sby1) - * Move prmApPostgreSQLDB1 (Started act1 -> sby1) + * Move prmExPostgreSQLDB1 ( act1 -> sby1 ) + * Move prmFsPostgreSQLDB1-1 ( act1 -> sby1 ) + * Move prmFsPostgreSQLDB1-2 ( act1 -> sby1 ) + * Move prmFsPostgreSQLDB1-3 ( act1 -> sby1 ) + * Move prmIpPostgreSQLDB1 ( act1 -> sby1 ) + * Move prmApPostgreSQLDB1 ( act1 -> sby1 ) * Stop prmPingd:0 (act1) due to node availability Executing cluster transition: * Pseudo action: grpPostgreSQLDB1_stop_0 * Resource action: prmApPostgreSQLDB1 stop on act1 * Pseudo action: load_stopped_sby2 * Pseudo action: load_stopped_sby1 * Pseudo action: load_stopped_act3 * Pseudo action: load_stopped_act2 * Resource action: prmIpPostgreSQLDB1 stop on act1 * Resource action: prmFsPostgreSQLDB1-3 stop on act1 * Resource action: prmFsPostgreSQLDB1-2 stop on act1 * Resource action: prmFsPostgreSQLDB1-1 stop on act1 * Resource action: prmExPostgreSQLDB1 stop on act1 * Pseudo action: load_stopped_act1 * Pseudo action: grpPostgreSQLDB1_stopped_0 * Pseudo action: grpPostgreSQLDB1_start_0 * Resource action: prmExPostgreSQLDB1 start on sby1 * Resource action: prmFsPostgreSQLDB1-1 start on sby1 * Resource action: prmFsPostgreSQLDB1-2 start on sby1 * Resource action: prmFsPostgreSQLDB1-3 start on sby1 * Resource action: prmIpPostgreSQLDB1 start on sby1 * Resource action: prmApPostgreSQLDB1 start on sby1 * Pseudo action: clnPingd_stop_0 * Pseudo action: grpPostgreSQLDB1_running_0 * Resource action: prmExPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-1 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-2 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-3 monitor=5000 on sby1 * Resource action: prmIpPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmApPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmPingd:0 stop on act1 * Pseudo action: clnPingd_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ act1 act2 act3 sby1 sby2 ] Resource Group: grpPostgreSQLDB1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-1 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-2 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-3 (ocf::pacemaker:Dummy): Started sby1 prmIpPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 prmApPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 Resource Group: grpPostgreSQLDB2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-1 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-3 (ocf::pacemaker:Dummy): Started act2 prmIpPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmApPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 Resource Group: grpPostgreSQLDB3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-1 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-2 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-3 (ocf::pacemaker:Dummy): Started act3 prmIpPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmApPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 Clone Set: clnPingd [prmPingd] Started: [ act2 act3 sby1 sby2 ] Stopped: [ act1 ] diff --git a/pengine/test10/bug-n-385265-2.summary b/pengine/test10/bug-n-385265-2.summary index f02554d145..2e0f7c15ec 100644 --- a/pengine/test10/bug-n-385265-2.summary +++ b/pengine/test10/bug-n-385265-2.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ ih01 ih02 ] Resource Group: group_common resource_ip_common (ocf::heartbeat:IPaddr2): FAILED ih02 resource_idvscommon (ocf::dfs:idvs): Started ih02 Transition Summary: - * Recover resource_ip_common (Started ih02 -> ih01) - * Move resource_idvscommon (Started ih02 -> ih01) + * Recover resource_ip_common ( ih02 -> ih01 ) + * Move resource_idvscommon ( ih02 -> ih01 ) Executing cluster transition: * Pseudo action: group_common_stop_0 * Resource action: resource_idvscommon stop on ih02 * Resource action: resource_ip_common stop on ih02 * Pseudo action: all_stopped * Pseudo action: group_common_stopped_0 * Pseudo action: group_common_start_0 * Resource action: resource_ip_common start on ih01 * Resource action: resource_idvscommon start on ih01 * Pseudo action: group_common_running_0 * Resource action: resource_ip_common monitor=30000 on ih01 * Resource action: resource_idvscommon monitor=30000 on ih01 Revised cluster status: Online: [ ih01 ih02 ] Resource Group: group_common resource_ip_common (ocf::heartbeat:IPaddr2): Started ih01 resource_idvscommon (ocf::dfs:idvs): Started ih01 diff --git a/pengine/test10/bug-n-387749.summary b/pengine/test10/bug-n-387749.summary index 2532d355c0..9ef7e7056d 100644 --- a/pengine/test10/bug-n-387749.summary +++ b/pengine/test10/bug-n-387749.summary @@ -1,58 +1,58 @@ Current cluster status: Online: [ power720-1 power720-2 ] OFFLINE: [ power720-4 ] Clone Set: export_home_ocfs2_clone_set [export_home_ocfs2] (unique) export_home_ocfs2:0 (ocf::heartbeat:Filesystem): Stopped export_home_ocfs2:1 (ocf::heartbeat:Filesystem): Started power720-2 export_home_ocfs2:2 (ocf::heartbeat:Filesystem): Stopped Resource Group: group_nfs resource_ipaddr1_single (ocf::heartbeat:IPaddr): Started power720-2 resource_nfsserver_single (lsb:nfsserver): Started power720-2 Transition Summary: * Start export_home_ocfs2:0 (power720-1) - * Move resource_ipaddr1_single (Started power720-2 -> power720-1) - * Move resource_nfsserver_single (Started power720-2 -> power720-1) + * Move resource_ipaddr1_single ( power720-2 -> power720-1 ) + * Move resource_nfsserver_single ( power720-2 -> power720-1 ) Executing cluster transition: * Resource action: export_home_ocfs2:0 monitor on power720-1 * Resource action: export_home_ocfs2:1 monitor on power720-1 * Resource action: export_home_ocfs2:2 monitor on power720-1 * Pseudo action: export_home_ocfs2_clone_set_pre_notify_start_0 * Pseudo action: group_nfs_stop_0 * Resource action: resource_ipaddr1_single monitor on power720-1 * Resource action: resource_nfsserver_single monitor on power720-1 * Resource action: export_home_ocfs2:1 notify on power720-2 * Pseudo action: export_home_ocfs2_clone_set_confirmed-pre_notify_start_0 * Pseudo action: export_home_ocfs2_clone_set_start_0 * Resource action: resource_nfsserver_single stop on power720-2 * Resource action: export_home_ocfs2:0 start on power720-1 * Pseudo action: export_home_ocfs2_clone_set_running_0 * Resource action: resource_ipaddr1_single stop on power720-2 * Pseudo action: all_stopped * Pseudo action: export_home_ocfs2_clone_set_post_notify_running_0 * Pseudo action: group_nfs_stopped_0 * Resource action: export_home_ocfs2:0 notify on power720-1 * Resource action: export_home_ocfs2:1 notify on power720-2 * Pseudo action: export_home_ocfs2_clone_set_confirmed-post_notify_running_0 * Pseudo action: group_nfs_start_0 * Resource action: resource_ipaddr1_single start on power720-1 * Resource action: resource_nfsserver_single start on power720-1 * Pseudo action: group_nfs_running_0 * Resource action: resource_ipaddr1_single monitor=5000 on power720-1 * Resource action: resource_nfsserver_single monitor=15000 on power720-1 Revised cluster status: Online: [ power720-1 power720-2 ] OFFLINE: [ power720-4 ] Clone Set: export_home_ocfs2_clone_set [export_home_ocfs2] (unique) export_home_ocfs2:0 (ocf::heartbeat:Filesystem): Started power720-1 export_home_ocfs2:1 (ocf::heartbeat:Filesystem): Started power720-2 export_home_ocfs2:2 (ocf::heartbeat:Filesystem): Stopped Resource Group: group_nfs resource_ipaddr1_single (ocf::heartbeat:IPaddr): Started power720-1 resource_nfsserver_single (lsb:nfsserver): Started power720-1 diff --git a/pengine/test10/bug-pm-11.summary b/pengine/test10/bug-pm-11.summary index 07f2d9ade6..dc26a2ea1d 100644 --- a/pengine/test10/bug-pm-11.summary +++ b/pengine/test10/bug-pm-11.summary @@ -1,47 +1,46 @@ Current cluster status: Online: [ node-a node-b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Slave node-b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master node-a stateful-2:1 (ocf::heartbeat:Stateful): Stopped Transition Summary: * Start stateful-2:0 (node-b) - * Start stateful-2:1 (node-a) * Promote stateful-2:1 (Stopped -> Master node-a) Executing cluster transition: * Resource action: stateful-2:0 monitor on node-b * Resource action: stateful-2:0 monitor on node-a * Resource action: stateful-2:1 monitor on node-b * Resource action: stateful-2:1 monitor on node-a * Pseudo action: ms-sf_start_0 * Pseudo action: group:0_start_0 * Resource action: stateful-2:0 start on node-b * Pseudo action: group:1_start_0 * Resource action: stateful-2:1 start on node-a * Pseudo action: group:0_running_0 * Pseudo action: group:1_running_0 * Pseudo action: ms-sf_running_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-2:1 promote on node-a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ node-a node-b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Slave node-b stateful-2:0 (ocf::heartbeat:Stateful): Slave node-b Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master node-a stateful-2:1 (ocf::heartbeat:Stateful): Master node-a diff --git a/pengine/test10/bug-rh-1097457.summary b/pengine/test10/bug-rh-1097457.summary index ee21df8615..c50df5fb45 100644 --- a/pengine/test10/bug-rh-1097457.summary +++ b/pengine/test10/bug-rh-1097457.summary @@ -1,105 +1,105 @@ 2 of 26 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ lama2 lama3 ] Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 VM1 (ocf::heartbeat:VirtualDomain): Started lama2 FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1 FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1 VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3 VM3 (ocf::heartbeat:VirtualDomain): Started lama3 FSlun3 (ocf::heartbeat:Filesystem): FAILED lamaVM2 FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3 FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3 Resource Group: lamaVM1-G1 FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G2 FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G3 FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM2-G4 FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2 FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2 Clone Set: FAKE6-clone [FAKE6] Started: [ lamaVM1 lamaVM2 lamaVM3 ] Transition Summary: * Fence (reboot) lamaVM2 (resource: VM2) 'guest is unclean' - * Recover VM2 (Started lama3) - * Recover FSlun3 (Started lamaVM2 -> lama2) - * Restart FAKE4 (Started lamaVM2) due to required VM2 start - * Restart FAKE4-IP (Started lamaVM2) due to required VM2 start - * Restart FAKE6:2 (Started lamaVM2) due to required VM2 start - * Restart lamaVM2 (Started lama3) due to required VM2 start + * Recover VM2 ( lama3 ) + * Recover FSlun3 ( lamaVM2 -> lama2 ) + * Restart FAKE4 ( lamaVM2 ) due to required VM2 start + * Restart FAKE4-IP ( lamaVM2 ) due to required VM2 start + * Restart FAKE6:2 ( lamaVM2 ) due to required VM2 start + * Restart lamaVM2 ( lama3 ) due to required VM2 start Executing cluster transition: * Resource action: lamaVM2 stop on lama3 * Resource action: VM2 stop on lama3 * Pseudo action: stonith-lamaVM2-reboot on lamaVM2 * Pseudo action: stonith_complete * Resource action: VM2 start on lama3 * Resource action: VM2 monitor=10000 on lama3 * Pseudo action: lamaVM2-G4_stop_0 * Pseudo action: FAKE4-IP_stop_0 * Pseudo action: FAKE6-clone_stop_0 * Resource action: lamaVM2 start on lama3 * Resource action: lamaVM2 monitor=30000 on lama3 * Resource action: FSlun3 monitor=10000 on lamaVM2 * Pseudo action: FAKE4_stop_0 * Pseudo action: FAKE6_stop_0 * Pseudo action: FAKE6-clone_stopped_0 * Pseudo action: FAKE6-clone_start_0 * Pseudo action: lamaVM2-G4_stopped_0 * Resource action: FAKE6 start on lamaVM2 * Resource action: FAKE6 monitor=30000 on lamaVM2 * Pseudo action: FAKE6-clone_running_0 * Pseudo action: FSlun3_stop_0 * Pseudo action: all_stopped * Resource action: FSlun3 start on lama2 * Pseudo action: lamaVM2-G4_start_0 * Resource action: FAKE4 start on lamaVM2 * Resource action: FAKE4 monitor=30000 on lamaVM2 * Resource action: FAKE4-IP start on lamaVM2 * Resource action: FAKE4-IP monitor=30000 on lamaVM2 * Resource action: FSlun3 monitor=10000 on lama2 * Pseudo action: lamaVM2-G4_running_0 Revised cluster status: Online: [ lama2 lama3 ] Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 VM1 (ocf::heartbeat:VirtualDomain): Started lama2 FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1 FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1 VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3 VM3 (ocf::heartbeat:VirtualDomain): Started lama3 FSlun3 (ocf::heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ] FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3 FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3 Resource Group: lamaVM1-G1 FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G2 FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G3 FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM2-G4 FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2 FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2 Clone Set: FAKE6-clone [FAKE6] Started: [ lamaVM1 lamaVM2 lamaVM3 ] diff --git a/pengine/test10/bug-rh-880249.summary b/pengine/test10/bug-rh-880249.summary index d4f533326d..766d326204 100644 --- a/pengine/test10/bug-rh-880249.summary +++ b/pengine/test10/bug-rh-880249.summary @@ -1,29 +1,28 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] shoot1 (stonith:fence_xvm): Started 18node1 shoot2 (stonith:fence_xvm): Started 18node2 dummystateful (ocf::pacemaker:Stateful): Master [ 18node2 18node1 18node3 ] Transition Summary: - * Demote dummystateful (Master -> Started 18node2) - * Move dummystateful (Started 18node2 -> 18node3) + * Move dummystateful ( Master 18node2 -> Started 18node3 ) Executing cluster transition: * Resource action: dummystateful demote on 18node3 * Resource action: dummystateful demote on 18node1 * Resource action: dummystateful demote on 18node2 * Resource action: dummystateful stop on 18node3 * Resource action: dummystateful stop on 18node1 * Resource action: dummystateful stop on 18node2 * Pseudo action: all_stopped * Resource action: dummystateful start on 18node3 Revised cluster status: Online: [ 18node1 18node2 18node3 ] shoot1 (stonith:fence_xvm): Started 18node1 shoot2 (stonith:fence_xvm): Started 18node2 dummystateful (ocf::pacemaker:Stateful): Started 18node3 diff --git a/pengine/test10/bug-suse-707150.summary b/pengine/test10/bug-suse-707150.summary index d6922abf10..6e5a025d91 100644 --- a/pengine/test10/bug-suse-707150.summary +++ b/pengine/test10/bug-suse-707150.summary @@ -1,72 +1,72 @@ 9 of 28 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ hex-0 hex-9 ] OFFLINE: [ hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Stopped ( disabled ) Clone Set: base-clone [base-group] Resource Group: base-group:0 dlm (ocf::pacemaker:controld): Started hex-0 o2cb (ocf::ocfs2:o2cb): Stopped clvm (ocf::lvm2:clvmd): Stopped cmirrord (ocf::lvm2:cmirrord): Stopped vg1 (ocf::heartbeat:LVM): Stopped ( disabled ) ocfs2-1 (ocf::heartbeat:Filesystem): Stopped Stopped: [ hex-7 hex-8 hex-9 ] vm-01 (ocf::heartbeat:Xen): Stopped fencing-sbd (stonith:external/sbd): Started hex-9 dummy1 (ocf::heartbeat:Dummy): Started hex-0 Transition Summary: * Start o2cb:0 (hex-0) * Start clvm:0 (hex-0) * Start cmirrord:0 (hex-0) * Start dlm:1 (hex-9) * Start o2cb:1 (hex-9) * Start clvm:1 (hex-9) * Start cmirrord:1 (hex-9) - * Start vm-01 (hex-9 - blocked) due to unrunnable base-clone running + * Start vm-01 ( hex-9 ) due to unrunnable base-clone running (blocked) Executing cluster transition: * Resource action: vg1:1 monitor on hex-9 * Pseudo action: base-clone_start_0 * Pseudo action: load_stopped_hex-9 * Pseudo action: load_stopped_hex-8 * Pseudo action: load_stopped_hex-7 * Pseudo action: load_stopped_hex-0 * Pseudo action: base-group:0_start_0 * Resource action: o2cb:0 start on hex-0 * Resource action: clvm:0 start on hex-0 * Resource action: cmirrord:0 start on hex-0 * Pseudo action: base-group:1_start_0 * Resource action: dlm:1 start on hex-9 * Resource action: o2cb:1 start on hex-9 * Resource action: clvm:1 start on hex-9 * Resource action: cmirrord:1 start on hex-9 Revised cluster status: Online: [ hex-0 hex-9 ] OFFLINE: [ hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Stopped ( disabled ) Clone Set: base-clone [base-group] Resource Group: base-group:0 dlm (ocf::pacemaker:controld): Started hex-0 o2cb (ocf::ocfs2:o2cb): Started hex-0 clvm (ocf::lvm2:clvmd): Started hex-0 cmirrord (ocf::lvm2:cmirrord): Started hex-0 vg1 (ocf::heartbeat:LVM): Stopped ( disabled ) ocfs2-1 (ocf::heartbeat:Filesystem): Stopped Resource Group: base-group:1 dlm (ocf::pacemaker:controld): Started hex-9 o2cb (ocf::ocfs2:o2cb): Started hex-9 clvm (ocf::lvm2:clvmd): Started hex-9 cmirrord (ocf::lvm2:cmirrord): Started hex-9 vg1 (ocf::heartbeat:LVM): Stopped ( disabled ) ocfs2-1 (ocf::heartbeat:Filesystem): Stopped Stopped: [ hex-7 hex-8 ] vm-01 (ocf::heartbeat:Xen): Stopped fencing-sbd (stonith:external/sbd): Started hex-9 dummy1 (ocf::heartbeat:Dummy): Started hex-0 diff --git a/pengine/test10/bundle-nested-colocation.summary b/pengine/test10/bundle-nested-colocation.summary index 9755e5f15e..0e2a68d545 100644 --- a/pengine/test10/bundle-nested-colocation.summary +++ b/pengine/test10/bundle-nested-colocation.summary @@ -1,75 +1,75 @@ Using the original execution date of: 2017-07-14 08:50:25Z Current cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 ] RemoteOnline: [ overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 ] overcloud-rabbit-0 (ocf::pacemaker:remote): Started overcloud-controller-0 overcloud-rabbit-1 (ocf::pacemaker:remote): Started overcloud-controller-1 overcloud-rabbit-2 (ocf::pacemaker:remote): Started overcloud-controller-2 Docker container set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-0 rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-1 rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-2 Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-docker-0 (ocf::heartbeat:docker): Started overcloud-galera-0 galera-bundle-docker-1 (ocf::heartbeat:docker): Started overcloud-galera-1 galera-bundle-docker-2 (ocf::heartbeat:docker): Started overcloud-galera-2 Transition Summary: - * Restart rabbitmq-bundle-docker-0 (Started overcloud-rabbit-0) + * Restart rabbitmq-bundle-docker-0 ( overcloud-rabbit-0 ) * Start rabbitmq-bundle-0 (overcloud-controller-0) * Start rabbitmq:0 (rabbitmq-bundle-0) - * Restart rabbitmq-bundle-docker-1 (Started overcloud-rabbit-1) + * Restart rabbitmq-bundle-docker-1 ( overcloud-rabbit-1 ) * Start rabbitmq-bundle-1 (overcloud-controller-1) * Start rabbitmq:1 (rabbitmq-bundle-1) - * Restart rabbitmq-bundle-docker-2 (Started overcloud-rabbit-2) + * Restart rabbitmq-bundle-docker-2 ( overcloud-rabbit-2 ) * Start rabbitmq-bundle-2 (overcloud-controller-2) * Start rabbitmq:2 (rabbitmq-bundle-2) Executing cluster transition: * Resource action: rabbitmq-bundle-docker-0 stop on overcloud-rabbit-0 * Resource action: rabbitmq-bundle-docker-1 stop on overcloud-rabbit-1 * Resource action: rabbitmq-bundle-docker-2 stop on overcloud-rabbit-2 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: all_stopped * Resource action: rabbitmq-bundle-docker-0 start on overcloud-rabbit-0 * Resource action: rabbitmq-bundle-docker-0 monitor=60000 on overcloud-rabbit-0 * Resource action: rabbitmq-bundle-0 start on overcloud-controller-0 * Resource action: rabbitmq-bundle-docker-1 start on overcloud-rabbit-1 * Resource action: rabbitmq-bundle-docker-1 monitor=60000 on overcloud-rabbit-1 * Resource action: rabbitmq-bundle-1 start on overcloud-controller-1 * Resource action: rabbitmq-bundle-docker-2 start on overcloud-rabbit-2 * Resource action: rabbitmq-bundle-docker-2 monitor=60000 on overcloud-rabbit-2 * Resource action: rabbitmq-bundle-2 start on overcloud-controller-2 * Pseudo action: rabbitmq-bundle-clone_start_0 * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 * Resource action: rabbitmq-bundle-0 monitor=60000 on overcloud-controller-0 * Resource action: rabbitmq:1 start on rabbitmq-bundle-1 * Resource action: rabbitmq-bundle-1 monitor=60000 on overcloud-controller-1 * Resource action: rabbitmq:2 start on rabbitmq-bundle-2 * Resource action: rabbitmq-bundle-2 monitor=60000 on overcloud-controller-2 * Pseudo action: rabbitmq-bundle-clone_running_0 * Pseudo action: rabbitmq-bundle_running_0 * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 * Resource action: rabbitmq:1 monitor=10000 on rabbitmq-bundle-1 * Resource action: rabbitmq:2 monitor=10000 on rabbitmq-bundle-2 Using the original execution date of: 2017-07-14 08:50:25Z Revised cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 ] RemoteOnline: [ overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 ] Containers: [ rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 ] overcloud-rabbit-0 (ocf::pacemaker:remote): Started overcloud-controller-0 overcloud-rabbit-1 (ocf::pacemaker:remote): Started overcloud-controller-1 overcloud-rabbit-2 (ocf::pacemaker:remote): Started overcloud-controller-2 Docker container set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-rabbit-0 rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-rabbit-1 rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started overcloud-rabbit-2 Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-docker-0 (ocf::heartbeat:docker): Started overcloud-galera-0 galera-bundle-docker-1 (ocf::heartbeat:docker): Started overcloud-galera-1 galera-bundle-docker-2 (ocf::heartbeat:docker): Started overcloud-galera-2 diff --git a/pengine/test10/bundle-order-partial-start-2.summary b/pengine/test10/bundle-order-partial-start-2.summary index d67f8fc1fa..790bd17244 100644 --- a/pengine/test10/bundle-order-partial-start-2.summary +++ b/pengine/test10/bundle-order-partial-start-2.summary @@ -1,84 +1,84 @@ Current cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Slave undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud Transition Summary: * Start rabbitmq:0 (rabbitmq-bundle-0) - * Restart galera-bundle-docker-0 (Started undercloud) due to required haproxy-bundle running - * Restart galera-bundle-0 (Started undercloud) due to required galera-bundle-docker-0 start + * Restart galera-bundle-docker-0 ( undercloud ) due to required haproxy-bundle running + * Restart galera-bundle-0 ( undercloud ) due to required galera-bundle-docker-0 start * Start galera:0 (galera-bundle-0) * Promote redis:0 (Slave -> Master redis-bundle-0) * Start haproxy-bundle-docker-0 (undercloud) Executing cluster transition: * Resource action: galera-bundle-0 stop on undercloud * Resource action: haproxy-bundle-docker-0 monitor on undercloud * Pseudo action: haproxy-bundle_start_0 * Pseudo action: redis-bundle_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Pseudo action: rabbitmq-bundle_start_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Resource action: redis promote on redis-bundle-0 * Resource action: haproxy-bundle-docker-0 start on undercloud * Pseudo action: haproxy-bundle_running_0 * Pseudo action: redis-bundle-master_promoted_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Pseudo action: all_stopped * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 * Resource action: redis monitor=20000 on redis-bundle-0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud * Pseudo action: redis-bundle_promoted_0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Pseudo action: rabbitmq-bundle_running_0 * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 * Pseudo action: galera-bundle_start_0 * Resource action: galera-bundle-docker-0 start on undercloud * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud * Resource action: galera-bundle-0 start on undercloud * Resource action: galera-bundle-0 monitor=60000 on undercloud * Pseudo action: galera-bundle-master_start_0 * Resource action: galera:0 start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 Revised cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Slave undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started undercloud Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud diff --git a/pengine/test10/bundle-order-partial-stop.summary b/pengine/test10/bundle-order-partial-stop.summary index e7bac73658..7bc24d2c78 100644 --- a/pengine/test10/bundle-order-partial-stop.summary +++ b/pengine/test10/bundle-order-partial-stop.summary @@ -1,113 +1,111 @@ Current cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Master undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started undercloud Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud Transition Summary: * Shutdown undercloud * Stop rabbitmq-bundle-docker-0 (undercloud) due to node availability * Stop rabbitmq-bundle-0 (undercloud) due to node availability - * Stop rabbitmq:0 (Started rabbitmq-bundle-0) due to unrunnable rabbitmq-bundle-0 start + * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start * Stop galera-bundle-docker-0 (undercloud) due to node availability * Stop galera-bundle-0 (undercloud) due to node availability - * Demote galera:0 (Master -> Slave galera-bundle-0) - * Restart galera:0 (Slave galera-bundle-0) due to unrunnable galera-bundle-0 start + * Stop galera:0 ( Master galera-bundle-0 ) due to unrunnable galera-bundle-0 start * Stop redis-bundle-docker-0 (undercloud) due to node availability * Stop redis-bundle-0 (undercloud) due to node availability - * Demote redis:0 (Master -> Slave redis-bundle-0) - * Restart redis:0 (Slave redis-bundle-0) due to unrunnable redis-bundle-0 start + * Stop redis:0 ( Master redis-bundle-0 ) due to unrunnable redis-bundle-0 start * Stop ip-192.168.122.254 (undercloud) due to node availability * Stop ip-192.168.122.250 (undercloud) due to node availability * Stop ip-192.168.122.249 (undercloud) due to node availability * Stop ip-192.168.122.253 (undercloud) due to node availability * Stop ip-192.168.122.247 (undercloud) due to node availability * Stop ip-192.168.122.248 (undercloud) due to node availability * Stop haproxy-bundle-docker-0 (undercloud) due to node availability * Stop openstack-cinder-volume-docker-0 (undercloud) due to node availability Executing cluster transition: * Resource action: galera cancel=10000 on galera-bundle-0 * Resource action: redis cancel=20000 on redis-bundle-0 * Pseudo action: openstack-cinder-volume_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: galera-bundle-master_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: galera demote on galera-bundle-0 * Resource action: redis demote on redis-bundle-0 * Resource action: openstack-cinder-volume-docker-0 stop on undercloud * Pseudo action: openstack-cinder-volume_stopped_0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Resource action: rabbitmq-bundle-0 stop on undercloud * Pseudo action: redis-bundle_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera stop on galera-bundle-0 * Resource action: galera-bundle-0 stop on undercloud * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle_stopped_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Resource action: redis stop on redis-bundle-0 * Resource action: redis-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_stopped_0 * Resource action: redis-bundle-docker-0 stop on undercloud * Pseudo action: redis-bundle_stopped_0 * Pseudo action: haproxy-bundle_stop_0 * Resource action: haproxy-bundle-docker-0 stop on undercloud * Pseudo action: haproxy-bundle_stopped_0 * Resource action: ip-192.168.122.254 stop on undercloud * Resource action: ip-192.168.122.250 stop on undercloud * Resource action: ip-192.168.122.249 stop on undercloud * Resource action: ip-192.168.122.253 stop on undercloud * Resource action: ip-192.168.122.247 stop on undercloud * Resource action: ip-192.168.122.248 stop on undercloud * Cluster action: do_shutdown on undercloud * Pseudo action: all_stopped Revised cluster status: Online: [ undercloud ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Stopped diff --git a/pengine/test10/bundle-order-startup-clone-2.summary b/pengine/test10/bundle-order-startup-clone-2.summary index e23d9339ca..9b0eeaaad4 100644 --- a/pengine/test10/bundle-order-startup-clone-2.summary +++ b/pengine/test10/bundle-order-startup-clone-2.summary @@ -1,179 +1,176 @@ Current cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Clone Set: storage-clone [storage] Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped galera-bundle-1 (ocf::heartbeat:galera): Stopped galera-bundle-2 (ocf::heartbeat:galera): Stopped Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped haproxy-bundle-docker-1 (ocf::heartbeat:docker): Stopped haproxy-bundle-docker-2 (ocf::heartbeat:docker): Stopped Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped redis-bundle-1 (ocf::heartbeat:redis): Stopped redis-bundle-2 (ocf::heartbeat:redis): Stopped Transition Summary: * Start storage:0 (metal-1) * Start storage:1 (metal-2) * Start storage:2 (metal-3) * Start galera-bundle-docker-0 (metal-1) * Start galera-bundle-0 (metal-1) * Start galera:0 (galera-bundle-0) * Start galera-bundle-docker-1 (metal-2) * Start galera-bundle-1 (metal-2) * Start galera:1 (galera-bundle-1) * Start galera-bundle-docker-2 (metal-3) * Start galera-bundle-2 (metal-3) * Start galera:2 (galera-bundle-2) * Start haproxy-bundle-docker-0 (metal-1) * Start haproxy-bundle-docker-1 (metal-2) * Start haproxy-bundle-docker-2 (metal-3) * Start redis-bundle-docker-0 (metal-1) * Start redis-bundle-0 (metal-1) - * Start redis:0 (redis-bundle-0) * Promote redis:0 (Stopped -> Master redis-bundle-0) * Start redis-bundle-docker-1 (metal-2) * Start redis-bundle-1 (metal-2) - * Start redis:1 (redis-bundle-1) * Promote redis:1 (Stopped -> Master redis-bundle-1) * Start redis-bundle-docker-2 (metal-3) * Start redis-bundle-2 (metal-3) - * Start redis:2 (redis-bundle-2) * Promote redis:2 (Stopped -> Master redis-bundle-2) Executing cluster transition: * Resource action: storage:0 monitor on metal-1 * Resource action: storage:1 monitor on metal-2 * Resource action: storage:2 monitor on metal-3 * Pseudo action: storage-clone_pre_notify_start_0 * Resource action: galera-bundle-docker-0 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-2 * Resource action: galera-bundle-docker-0 monitor on metal-1 * Resource action: galera-bundle-docker-1 monitor on metal-3 * Resource action: galera-bundle-docker-1 monitor on metal-2 * Resource action: galera-bundle-docker-1 monitor on metal-1 * Resource action: galera-bundle-docker-2 monitor on metal-3 * Resource action: galera-bundle-docker-2 monitor on metal-2 * Resource action: galera-bundle-docker-2 monitor on metal-1 * Resource action: haproxy-bundle-docker-0 monitor on metal-3 * Resource action: haproxy-bundle-docker-0 monitor on metal-2 * Resource action: haproxy-bundle-docker-0 monitor on metal-1 * Resource action: haproxy-bundle-docker-1 monitor on metal-3 * Resource action: haproxy-bundle-docker-1 monitor on metal-2 * Resource action: haproxy-bundle-docker-1 monitor on metal-1 * Resource action: haproxy-bundle-docker-2 monitor on metal-3 * Resource action: haproxy-bundle-docker-2 monitor on metal-2 * Resource action: haproxy-bundle-docker-2 monitor on metal-1 * Resource action: redis-bundle-docker-0 monitor on metal-3 * Resource action: redis-bundle-docker-0 monitor on metal-2 * Resource action: redis-bundle-docker-0 monitor on metal-1 * Resource action: redis-bundle-docker-1 monitor on metal-3 * Resource action: redis-bundle-docker-1 monitor on metal-2 * Resource action: redis-bundle-docker-1 monitor on metal-1 * Resource action: redis-bundle-docker-2 monitor on metal-3 * Resource action: redis-bundle-docker-2 monitor on metal-2 * Resource action: redis-bundle-docker-2 monitor on metal-1 * Pseudo action: redis-bundle_start_0 * Pseudo action: haproxy-bundle_start_0 * Pseudo action: storage-clone_confirmed-pre_notify_start_0 * Resource action: haproxy-bundle-docker-0 start on metal-1 * Resource action: haproxy-bundle-docker-1 start on metal-2 * Resource action: haproxy-bundle-docker-2 start on metal-3 * Resource action: redis-bundle-docker-0 start on metal-1 * Resource action: redis-bundle-0 start on metal-1 * Resource action: redis-bundle-docker-1 start on metal-2 * Resource action: redis-bundle-1 start on metal-2 * Resource action: redis-bundle-docker-2 start on metal-3 * Resource action: redis-bundle-2 start on metal-3 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: haproxy-bundle_running_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-1 * Resource action: haproxy-bundle-docker-1 monitor=60000 on metal-2 * Resource action: haproxy-bundle-docker-2 monitor=60000 on metal-3 * Resource action: redis:0 start on redis-bundle-0 * Resource action: redis-bundle-docker-0 monitor=60000 on metal-1 * Resource action: redis-bundle-0 monitor=60000 on metal-1 * Resource action: redis:1 start on redis-bundle-1 * Resource action: redis-bundle-docker-1 monitor=60000 on metal-2 * Resource action: redis-bundle-1 monitor=60000 on metal-2 * Resource action: redis:2 start on redis-bundle-2 * Resource action: redis-bundle-docker-2 monitor=60000 on metal-3 * Resource action: redis-bundle-2 monitor=60000 on metal-3 * Pseudo action: redis-bundle-master_running_0 * Pseudo action: redis-bundle_running_0 * Pseudo action: redis-bundle_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Resource action: redis:0 promote on redis-bundle-0 * Resource action: redis:1 promote on redis-bundle-1 * Resource action: redis:2 promote on redis-bundle-2 * Pseudo action: redis-bundle-master_promoted_0 * Resource action: redis:0 monitor=20000 on redis-bundle-0 * Resource action: redis:1 monitor=20000 on redis-bundle-1 * Resource action: redis:2 monitor=20000 on redis-bundle-2 * Pseudo action: redis-bundle_promoted_0 * Pseudo action: storage-clone_start_0 * Resource action: storage:0 start on metal-1 * Resource action: storage:1 start on metal-2 * Resource action: storage:2 start on metal-3 * Pseudo action: storage-clone_running_0 * Pseudo action: storage-clone_post_notify_running_0 * Resource action: storage:0 notify on metal-1 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-post_notify_running_0 * Pseudo action: galera-bundle_start_0 * Resource action: storage:0 monitor=30000 on metal-1 * Resource action: storage:1 monitor=30000 on metal-2 * Resource action: storage:2 monitor=30000 on metal-3 * Resource action: galera-bundle-docker-0 start on metal-1 * Resource action: galera-bundle-0 start on metal-1 * Resource action: galera-bundle-docker-1 start on metal-2 * Resource action: galera-bundle-1 start on metal-2 * Resource action: galera-bundle-docker-2 start on metal-3 * Resource action: galera-bundle-2 start on metal-3 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera:0 start on galera-bundle-0 * Resource action: galera-bundle-docker-0 monitor=60000 on metal-1 * Resource action: galera-bundle-0 monitor=60000 on metal-1 * Resource action: galera:1 start on galera-bundle-1 * Resource action: galera-bundle-docker-1 monitor=60000 on metal-2 * Resource action: galera-bundle-1 monitor=60000 on metal-2 * Resource action: galera:2 start on galera-bundle-2 * Resource action: galera-bundle-docker-2 monitor=60000 on metal-3 * Resource action: galera-bundle-2 monitor=60000 on metal-3 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 * Resource action: galera:1 monitor=30000 on galera-bundle-1 * Resource action: galera:1 monitor=20000 on galera-bundle-1 * Resource action: galera:2 monitor=30000 on galera-bundle-2 * Resource action: galera:2 monitor=20000 on galera-bundle-2 Revised cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] Clone Set: storage-clone [storage] Started: [ metal-1 metal-2 metal-3 ] Stopped: [ rabbitmq-bundle-0 ] Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Slave metal-1 galera-bundle-1 (ocf::heartbeat:galera): Slave metal-2 galera-bundle-2 (ocf::heartbeat:galera): Slave metal-3 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-1 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started metal-2 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started metal-3 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master metal-1 redis-bundle-1 (ocf::heartbeat:redis): Master metal-2 redis-bundle-2 (ocf::heartbeat:redis): Master metal-3 diff --git a/pengine/test10/bundle-order-startup-clone.summary b/pengine/test10/bundle-order-startup-clone.summary index 0acfd1e4bc..5e826dc0b3 100644 --- a/pengine/test10/bundle-order-startup-clone.summary +++ b/pengine/test10/bundle-order-startup-clone.summary @@ -1,69 +1,69 @@ Current cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Clone Set: storage-clone [storage] Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped Transition Summary: - * Start storage:0 (metal-1 - blocked) due to unrunnable redis-bundle promoted - * Start storage:1 (metal-2 - blocked) due to unrunnable redis-bundle promoted - * Start storage:2 (metal-3 - blocked) due to unrunnable redis-bundle promoted - * Start galera-bundle-docker-0 (metal-1 - blocked) due to unrunnable storage-clone notified - * Start galera-bundle-0 (metal-1 - blocked) due to unrunnable galera-bundle-docker-0 start - * Start galera:0 (galera-bundle-0 - blocked) due to unrunnable galera-bundle-docker-0 start + * Start storage:0 ( metal-1 ) due to unrunnable redis-bundle promoted (blocked) + * Start storage:1 ( metal-2 ) due to unrunnable redis-bundle promoted (blocked) + * Start storage:2 ( metal-3 ) due to unrunnable redis-bundle promoted (blocked) + * Start galera-bundle-docker-0 ( metal-1 ) due to unrunnable storage-clone notified (blocked) + * Start galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start (blocked) + * Start galera:0 ( galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start (blocked) * Start haproxy-bundle-docker-0 (metal-2) * Start redis-bundle-docker-0 (metal-2) * Start redis-bundle-0 (metal-2) * Start redis:0 (redis-bundle-0) Executing cluster transition: * Resource action: storage:0 monitor on metal-1 * Resource action: storage:1 monitor on metal-2 * Resource action: storage:2 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-2 * Resource action: galera-bundle-docker-0 monitor on metal-1 * Resource action: haproxy-bundle-docker-0 monitor on metal-3 * Resource action: haproxy-bundle-docker-0 monitor on metal-2 * Resource action: haproxy-bundle-docker-0 monitor on metal-1 * Resource action: redis-bundle-docker-0 monitor on metal-3 * Resource action: redis-bundle-docker-0 monitor on metal-2 * Resource action: redis-bundle-docker-0 monitor on metal-1 * Pseudo action: redis-bundle_start_0 * Pseudo action: haproxy-bundle_start_0 * Resource action: haproxy-bundle-docker-0 start on metal-2 * Resource action: redis-bundle-docker-0 start on metal-2 * Resource action: redis-bundle-0 start on metal-2 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: haproxy-bundle_running_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-2 * Resource action: redis:0 start on redis-bundle-0 * Resource action: redis-bundle-docker-0 monitor=60000 on metal-2 * Resource action: redis-bundle-0 monitor=60000 on metal-2 * Pseudo action: redis-bundle-master_running_0 * Pseudo action: redis-bundle_running_0 * Resource action: redis:0 monitor=60000 on redis-bundle-0 * Resource action: redis:0 monitor=45000 on redis-bundle-0 Revised cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ redis-bundle-0:redis-bundle-docker-0 ] Clone Set: storage-clone [storage] Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-2 Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Slave metal-2 diff --git a/pengine/test10/bundle-order-stop-clone.summary b/pengine/test10/bundle-order-stop-clone.summary index 9a6b0f2901..66cb82a4de 100644 --- a/pengine/test10/bundle-order-stop-clone.summary +++ b/pengine/test10/bundle-order-stop-clone.summary @@ -1,75 +1,75 @@ Current cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] Clone Set: storage-clone [storage] Started: [ metal-1 metal-2 metal-3 ] Stopped: [ rabbitmq-bundle-0 ] Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Slave metal-1 galera-bundle-1 (ocf::heartbeat:galera): Slave metal-2 galera-bundle-2 (ocf::heartbeat:galera): Slave metal-3 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-1 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started metal-2 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started metal-3 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master metal-1 redis-bundle-1 (ocf::heartbeat:redis): Master metal-2 redis-bundle-2 (ocf::heartbeat:redis): Master metal-3 Transition Summary: * Stop storage:0 (metal-1) due to node availability * Stop galera-bundle-docker-0 (metal-1) due to node availability - * Stop galera-bundle-0 (Started metal-1) due to unrunnable galera-bundle-docker-0 start + * Stop galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start * Stop galera:0 (Slave galera-bundle-0) due to unrunnable galera-bundle-docker-0 start Executing cluster transition: * Pseudo action: storage-clone_pre_notify_stop_0 * Pseudo action: galera-bundle_stop_0 * Resource action: storage:0 notify on metal-1 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-pre_notify_stop_0 * Pseudo action: galera-bundle-master_stop_0 * Resource action: galera:0 stop on galera-bundle-0 * Resource action: galera-bundle-0 stop on metal-1 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-docker-0 stop on metal-1 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: storage-clone_stop_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: storage:0 stop on metal-1 * Pseudo action: storage-clone_stopped_0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: storage-clone_post_notify_stopped_0 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] Clone Set: storage-clone [storage] Started: [ metal-2 metal-3 ] Stopped: [ metal-1 rabbitmq-bundle-0 ] Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped galera-bundle-1 (ocf::heartbeat:galera): Slave metal-2 galera-bundle-2 (ocf::heartbeat:galera): Slave metal-3 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-1 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started metal-2 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started metal-3 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master metal-1 redis-bundle-1 (ocf::heartbeat:redis): Master metal-2 redis-bundle-2 (ocf::heartbeat:redis): Master metal-3 diff --git a/pengine/test10/bundle-order-stop.summary b/pengine/test10/bundle-order-stop.summary index e7bac73658..7bc24d2c78 100644 --- a/pengine/test10/bundle-order-stop.summary +++ b/pengine/test10/bundle-order-stop.summary @@ -1,113 +1,111 @@ Current cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Master undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started undercloud Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud Transition Summary: * Shutdown undercloud * Stop rabbitmq-bundle-docker-0 (undercloud) due to node availability * Stop rabbitmq-bundle-0 (undercloud) due to node availability - * Stop rabbitmq:0 (Started rabbitmq-bundle-0) due to unrunnable rabbitmq-bundle-0 start + * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start * Stop galera-bundle-docker-0 (undercloud) due to node availability * Stop galera-bundle-0 (undercloud) due to node availability - * Demote galera:0 (Master -> Slave galera-bundle-0) - * Restart galera:0 (Slave galera-bundle-0) due to unrunnable galera-bundle-0 start + * Stop galera:0 ( Master galera-bundle-0 ) due to unrunnable galera-bundle-0 start * Stop redis-bundle-docker-0 (undercloud) due to node availability * Stop redis-bundle-0 (undercloud) due to node availability - * Demote redis:0 (Master -> Slave redis-bundle-0) - * Restart redis:0 (Slave redis-bundle-0) due to unrunnable redis-bundle-0 start + * Stop redis:0 ( Master redis-bundle-0 ) due to unrunnable redis-bundle-0 start * Stop ip-192.168.122.254 (undercloud) due to node availability * Stop ip-192.168.122.250 (undercloud) due to node availability * Stop ip-192.168.122.249 (undercloud) due to node availability * Stop ip-192.168.122.253 (undercloud) due to node availability * Stop ip-192.168.122.247 (undercloud) due to node availability * Stop ip-192.168.122.248 (undercloud) due to node availability * Stop haproxy-bundle-docker-0 (undercloud) due to node availability * Stop openstack-cinder-volume-docker-0 (undercloud) due to node availability Executing cluster transition: * Resource action: galera cancel=10000 on galera-bundle-0 * Resource action: redis cancel=20000 on redis-bundle-0 * Pseudo action: openstack-cinder-volume_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: galera-bundle-master_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: galera demote on galera-bundle-0 * Resource action: redis demote on redis-bundle-0 * Resource action: openstack-cinder-volume-docker-0 stop on undercloud * Pseudo action: openstack-cinder-volume_stopped_0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Resource action: rabbitmq-bundle-0 stop on undercloud * Pseudo action: redis-bundle_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera stop on galera-bundle-0 * Resource action: galera-bundle-0 stop on undercloud * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle_stopped_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Resource action: redis stop on redis-bundle-0 * Resource action: redis-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_stopped_0 * Resource action: redis-bundle-docker-0 stop on undercloud * Pseudo action: redis-bundle_stopped_0 * Pseudo action: haproxy-bundle_stop_0 * Resource action: haproxy-bundle-docker-0 stop on undercloud * Pseudo action: haproxy-bundle_stopped_0 * Resource action: ip-192.168.122.254 stop on undercloud * Resource action: ip-192.168.122.250 stop on undercloud * Resource action: ip-192.168.122.249 stop on undercloud * Resource action: ip-192.168.122.253 stop on undercloud * Resource action: ip-192.168.122.247 stop on undercloud * Resource action: ip-192.168.122.248 stop on undercloud * Cluster action: do_shutdown on undercloud * Pseudo action: all_stopped Revised cluster status: Online: [ undercloud ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Stopped diff --git a/pengine/test10/clone-anon-failcount.summary b/pengine/test10/clone-anon-failcount.summary index 3fb39e3306..098b772f38 100644 --- a/pengine/test10/clone-anon-failcount.summary +++ b/pengine/test10/clone-anon-failcount.summary @@ -1,118 +1,118 @@ Current cluster status: Online: [ srv01 srv02 srv03 srv04 ] Resource Group: UMgroup01 UmVIPcheck (ocf::pacemaker:Dummy): Started srv01 UmIPaddr (ocf::pacemaker:Dummy): Started srv01 UmDummy01 (ocf::pacemaker:Dummy): Started srv01 UmDummy02 (ocf::pacemaker:Dummy): Started srv01 Resource Group: OVDBgroup02-1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started srv01 Resource Group: OVDBgroup02-2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started srv02 Resource Group: OVDBgroup02-3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started srv03 Resource Group: grpStonith1 prmStonithN1 (stonith:external/ssh): Started srv04 Resource Group: grpStonith2 prmStonithN2 (stonith:external/ssh): Started srv01 Resource Group: grpStonith3 prmStonithN3 (stonith:external/ssh): Started srv02 Resource Group: grpStonith4 prmStonithN4 (stonith:external/ssh): Started srv03 Clone Set: clnUMgroup01 [clnUmResource] Resource Group: clnUmResource:0 clnUMdummy01 (ocf::pacemaker:Dummy): FAILED srv04 clnUMdummy02 (ocf::pacemaker:Dummy): Started srv04 Started: [ srv01 ] Stopped: [ srv02 srv03 ] Clone Set: clnPingd [clnPrmPingd] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnDiskd1 [clnPrmDiskd1] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy1 [clnG3dummy01] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy2 [clnG3dummy02] Started: [ srv01 srv02 srv03 srv04 ] Transition Summary: - * Move UmVIPcheck (Started srv01 -> srv04) - * Move UmIPaddr (Started srv01 -> srv04) - * Move UmDummy01 (Started srv01 -> srv04) - * Move UmDummy02 (Started srv01 -> srv04) - * Recover clnUMdummy01:0 (Started srv04) - * Restart clnUMdummy02:0 (Started srv04) due to required clnUMdummy01:0 start + * Move UmVIPcheck ( srv01 -> srv04 ) + * Move UmIPaddr ( srv01 -> srv04 ) + * Move UmDummy01 ( srv01 -> srv04 ) + * Move UmDummy02 ( srv01 -> srv04 ) + * Recover clnUMdummy01:0 ( srv04 ) + * Restart clnUMdummy02:0 ( srv04 ) due to required clnUMdummy01:0 start * Stop clnUMdummy01:1 (srv01) due to node availability * Stop clnUMdummy02:1 (srv01) due to node availability Executing cluster transition: * Pseudo action: UMgroup01_stop_0 * Resource action: UmDummy02 stop on srv01 * Resource action: UmDummy01 stop on srv01 * Resource action: UmIPaddr stop on srv01 * Resource action: UmVIPcheck stop on srv01 * Pseudo action: UMgroup01_stopped_0 * Pseudo action: clnUMgroup01_stop_0 * Pseudo action: clnUmResource:0_stop_0 * Resource action: clnUMdummy02:1 stop on srv04 * Pseudo action: clnUmResource:1_stop_0 * Resource action: clnUMdummy02:0 stop on srv01 * Resource action: clnUMdummy01:1 stop on srv04 * Resource action: clnUMdummy01:0 stop on srv01 * Pseudo action: all_stopped * Pseudo action: clnUmResource:0_stopped_0 * Pseudo action: clnUmResource:1_stopped_0 * Pseudo action: clnUMgroup01_stopped_0 * Pseudo action: clnUMgroup01_start_0 * Pseudo action: clnUmResource:0_start_0 * Resource action: clnUMdummy01:1 start on srv04 * Resource action: clnUMdummy01:1 monitor=10000 on srv04 * Resource action: clnUMdummy02:1 start on srv04 * Resource action: clnUMdummy02:1 monitor=10000 on srv04 * Pseudo action: clnUmResource:0_running_0 * Pseudo action: clnUMgroup01_running_0 * Pseudo action: UMgroup01_start_0 * Resource action: UmVIPcheck start on srv04 * Resource action: UmIPaddr start on srv04 * Resource action: UmDummy01 start on srv04 * Resource action: UmDummy02 start on srv04 * Pseudo action: UMgroup01_running_0 * Resource action: UmIPaddr monitor=10000 on srv04 * Resource action: UmDummy01 monitor=10000 on srv04 * Resource action: UmDummy02 monitor=10000 on srv04 Revised cluster status: Online: [ srv01 srv02 srv03 srv04 ] Resource Group: UMgroup01 UmVIPcheck (ocf::pacemaker:Dummy): Started srv04 UmIPaddr (ocf::pacemaker:Dummy): Started srv04 UmDummy01 (ocf::pacemaker:Dummy): Started srv04 UmDummy02 (ocf::pacemaker:Dummy): Started srv04 Resource Group: OVDBgroup02-1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started srv01 Resource Group: OVDBgroup02-2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started srv02 Resource Group: OVDBgroup02-3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started srv03 Resource Group: grpStonith1 prmStonithN1 (stonith:external/ssh): Started srv04 Resource Group: grpStonith2 prmStonithN2 (stonith:external/ssh): Started srv01 Resource Group: grpStonith3 prmStonithN3 (stonith:external/ssh): Started srv02 Resource Group: grpStonith4 prmStonithN4 (stonith:external/ssh): Started srv03 Clone Set: clnUMgroup01 [clnUmResource] Started: [ srv04 ] Stopped: [ srv01 srv02 srv03 ] Clone Set: clnPingd [clnPrmPingd] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnDiskd1 [clnPrmDiskd1] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy1 [clnG3dummy01] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy2 [clnG3dummy02] Started: [ srv01 srv02 srv03 srv04 ] diff --git a/pengine/test10/clone-fail-block-colocation.summary b/pengine/test10/clone-fail-block-colocation.summary index 9b9167af52..7f3f765111 100644 --- a/pengine/test10/clone-fail-block-colocation.summary +++ b/pengine/test10/clone-fail-block-colocation.summary @@ -1,58 +1,58 @@ Current cluster status: Online: [ DEM-1 DEM-2 ] Resource Group: svc ipv6_dem_tas_dns (ocf::heartbeat:IPv6addr): Started DEM-1 d_bird_subnet_state (lsb:bird_subnet_state): Started DEM-1 ip_mgmt (ocf::heartbeat:IPaddr2): Started DEM-1 ip_trf_tas (ocf::heartbeat:IPaddr2): Started DEM-1 Clone Set: cl_bird [d_bird] Started: [ DEM-1 DEM-2 ] Clone Set: cl_bird6 [d_bird6] d_bird6 (lsb:bird6): FAILED DEM-1 ( blocked ) Started: [ DEM-2 ] Clone Set: cl_tomcat_nms [d_tomcat_nms] Started: [ DEM-1 DEM-2 ] Transition Summary: - * Move ipv6_dem_tas_dns (Started DEM-1 -> DEM-2) - * Move d_bird_subnet_state (Started DEM-1 -> DEM-2) - * Move ip_mgmt (Started DEM-1 -> DEM-2) - * Move ip_trf_tas (Started DEM-1 -> DEM-2) + * Move ipv6_dem_tas_dns ( DEM-1 -> DEM-2 ) + * Move d_bird_subnet_state ( DEM-1 -> DEM-2 ) + * Move ip_mgmt ( DEM-1 -> DEM-2 ) + * Move ip_trf_tas ( DEM-1 -> DEM-2 ) Executing cluster transition: * Pseudo action: svc_stop_0 * Resource action: ip_trf_tas stop on DEM-1 * Resource action: ip_mgmt stop on DEM-1 * Resource action: d_bird_subnet_state stop on DEM-1 * Resource action: ipv6_dem_tas_dns stop on DEM-1 * Pseudo action: all_stopped * Pseudo action: svc_stopped_0 * Pseudo action: svc_start_0 * Resource action: ipv6_dem_tas_dns start on DEM-2 * Resource action: d_bird_subnet_state start on DEM-2 * Resource action: ip_mgmt start on DEM-2 * Resource action: ip_trf_tas start on DEM-2 * Pseudo action: svc_running_0 * Resource action: ipv6_dem_tas_dns monitor=10000 on DEM-2 * Resource action: d_bird_subnet_state monitor=10000 on DEM-2 * Resource action: ip_mgmt monitor=10000 on DEM-2 * Resource action: ip_trf_tas monitor=10000 on DEM-2 Revised cluster status: Online: [ DEM-1 DEM-2 ] Resource Group: svc ipv6_dem_tas_dns (ocf::heartbeat:IPv6addr): Started DEM-2 d_bird_subnet_state (lsb:bird_subnet_state): Started DEM-2 ip_mgmt (ocf::heartbeat:IPaddr2): Started DEM-2 ip_trf_tas (ocf::heartbeat:IPaddr2): Started DEM-2 Clone Set: cl_bird [d_bird] Started: [ DEM-1 DEM-2 ] Clone Set: cl_bird6 [d_bird6] d_bird6 (lsb:bird6): FAILED DEM-1 ( blocked ) Started: [ DEM-2 ] Clone Set: cl_tomcat_nms [d_tomcat_nms] Started: [ DEM-1 DEM-2 ] diff --git a/pengine/test10/clone-interleave-2.summary b/pengine/test10/clone-interleave-2.summary index 78d46cdc28..e4c9aa4aea 100644 --- a/pengine/test10/clone-interleave-2.summary +++ b/pengine/test10/clone-interleave-2.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-3 [child-3] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: - * Restart dummy (Started pcmk-1) due to required clone-3 running + * Restart dummy ( pcmk-1 ) due to required clone-3 running * Stop child-2:0 (pcmk-1) due to node availability * Stop child-3:0 (pcmk-1) Executing cluster transition: * Resource action: dummy stop on pcmk-1 * Pseudo action: clone-3_stop_0 * Resource action: child-3:2 stop on pcmk-1 * Pseudo action: clone-3_stopped_0 * Pseudo action: clone-3_start_0 * Pseudo action: clone-2_stop_0 * Pseudo action: clone-3_running_0 * Resource action: dummy start on pcmk-1 * Resource action: child-2:2 stop on pcmk-1 * Pseudo action: clone-2_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] Started: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-1 ] Clone Set: clone-3 [child-3] Started: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-1 ] diff --git a/pengine/test10/clone-interleave-3.summary b/pengine/test10/clone-interleave-3.summary index 8b13dc43f0..a40dc1df36 100644 --- a/pengine/test10/clone-interleave-3.summary +++ b/pengine/test10/clone-interleave-3.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] child-2 (ocf::pacemaker:Dummy): FAILED pcmk-1 Started: [ pcmk-2 pcmk-3 ] Clone Set: clone-3 [child-3] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: - * Restart dummy (Started pcmk-1) due to required clone-3 running - * Recover child-2:0 (Started pcmk-1) - * Restart child-3:0 (Started pcmk-1) due to required child-2:0 start + * Restart dummy ( pcmk-1 ) due to required clone-3 running + * Recover child-2:0 ( pcmk-1 ) + * Restart child-3:0 ( pcmk-1 ) due to required child-2:0 start Executing cluster transition: * Resource action: dummy stop on pcmk-1 * Pseudo action: clone-3_stop_0 * Resource action: child-3:2 stop on pcmk-1 * Pseudo action: clone-3_stopped_0 * Pseudo action: clone-2_stop_0 * Resource action: child-2:2 stop on pcmk-1 * Pseudo action: clone-2_stopped_0 * Pseudo action: clone-2_start_0 * Pseudo action: all_stopped * Resource action: child-2:2 start on pcmk-1 * Pseudo action: clone-2_running_0 * Pseudo action: clone-3_start_0 * Resource action: child-3:2 start on pcmk-1 * Pseudo action: clone-3_running_0 * Resource action: dummy start on pcmk-1 Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-3 [child-3] Started: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/pengine/test10/clone-no-shuffle.summary b/pengine/test10/clone-no-shuffle.summary index 59ffbbe864..c69d8f3189 100644 --- a/pengine/test10/clone-no-shuffle.summary +++ b/pengine/test10/clone-no-shuffle.summary @@ -1,60 +1,60 @@ Current cluster status: Online: [ dktest1sles10 dktest2sles10 ] stonith-1 (stonith:dummy): Stopped Master/Slave Set: ms-drbd1 [drbd1] Masters: [ dktest2sles10 ] Stopped: [ dktest1sles10 ] testip (ocf::heartbeat:IPaddr2): Started dktest2sles10 Transition Summary: * Start stonith-1 (dktest1sles10) - * Demote drbd1:0 (Master -> Stopped dktest2sles10) + * Stop drbd1:0 ( Master dktest2sles10 ) due to node availability * Start drbd1:1 (dktest1sles10) * Stop testip (dktest2sles10) Executing cluster transition: * Resource action: stonith-1 monitor on dktest2sles10 * Resource action: stonith-1 monitor on dktest1sles10 * Resource action: drbd1:1 monitor on dktest1sles10 * Pseudo action: ms-drbd1_pre_notify_demote_0 * Resource action: testip monitor on dktest1sles10 * Resource action: stonith-1 start on dktest1sles10 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-pre_notify_demote_0 * Resource action: testip stop on dktest2sles10 * Pseudo action: ms-drbd1_demote_0 * Resource action: drbd1:0 demote on dktest2sles10 * Pseudo action: ms-drbd1_demoted_0 * Pseudo action: ms-drbd1_post_notify_demoted_0 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-post_notify_demoted_0 * Pseudo action: ms-drbd1_pre_notify_stop_0 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-pre_notify_stop_0 * Pseudo action: ms-drbd1_stop_0 * Resource action: drbd1:0 stop on dktest2sles10 * Pseudo action: ms-drbd1_stopped_0 * Pseudo action: ms-drbd1_post_notify_stopped_0 * Pseudo action: ms-drbd1_confirmed-post_notify_stopped_0 * Pseudo action: ms-drbd1_pre_notify_start_0 * Pseudo action: all_stopped * Pseudo action: ms-drbd1_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd1_start_0 * Resource action: drbd1:1 start on dktest1sles10 * Pseudo action: ms-drbd1_running_0 * Pseudo action: ms-drbd1_post_notify_running_0 * Resource action: drbd1:1 notify on dktest1sles10 * Pseudo action: ms-drbd1_confirmed-post_notify_running_0 * Resource action: drbd1:1 monitor=11000 on dktest1sles10 Revised cluster status: Online: [ dktest1sles10 dktest2sles10 ] stonith-1 (stonith:dummy): Started dktest1sles10 Master/Slave Set: ms-drbd1 [drbd1] Slaves: [ dktest1sles10 ] Stopped: [ dktest2sles10 ] testip (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/clone-require-all-2.summary b/pengine/test10/clone-require-all-2.summary index f5861e7c19..2ebd2307eb 100644 --- a/pengine/test10/clone-require-all-2.summary +++ b/pengine/test10/clone-require-all-2.summary @@ -1,41 +1,41 @@ Current cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 ] Stopped: [ rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] Transition Summary: - * Move shooter (Started rhel7-auto1 -> rhel7-auto3) + * Move shooter ( rhel7-auto1 -> rhel7-auto3 ) * Stop A:0 (rhel7-auto1) due to node availability * Stop A:1 (rhel7-auto2) due to node availability - * Start B:0 (rhel7-auto4 - blocked) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory - * Start B:1 (rhel7-auto3 - blocked) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory + * Start B:0 ( rhel7-auto4 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory (blocked) + * Start B:1 ( rhel7-auto3 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory (blocked) Executing cluster transition: * Resource action: shooter stop on rhel7-auto1 * Pseudo action: A-clone_stop_0 * Resource action: shooter start on rhel7-auto3 * Resource action: A stop on rhel7-auto1 * Resource action: A stop on rhel7-auto2 * Pseudo action: A-clone_stopped_0 * Pseudo action: all_stopped * Resource action: shooter monitor=60000 on rhel7-auto3 Revised cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto3 Clone Set: A-clone [A] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/pengine/test10/clone-require-all-3.summary b/pengine/test10/clone-require-all-3.summary index 1c887e506f..78076e52c3 100644 --- a/pengine/test10/clone-require-all-3.summary +++ b/pengine/test10/clone-require-all-3.summary @@ -1,46 +1,46 @@ Current cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 ] Stopped: [ rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 ] Transition Summary: - * Move shooter (Started rhel7-auto1 -> rhel7-auto3) + * Move shooter ( rhel7-auto1 -> rhel7-auto3 ) * Stop A:0 (rhel7-auto1) due to node availability * Stop A:1 (rhel7-auto2) due to node availability - * Stop B:0 (Started rhel7-auto3) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory - * Stop B:1 (Started rhel7-auto4) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory + * Stop B:0 ( rhel7-auto3 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory + * Stop B:1 ( rhel7-auto4 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory Executing cluster transition: * Resource action: shooter stop on rhel7-auto1 * Pseudo action: B-clone_stop_0 * Resource action: shooter start on rhel7-auto3 * Resource action: B stop on rhel7-auto3 * Resource action: B stop on rhel7-auto4 * Pseudo action: B-clone_stopped_0 * Resource action: shooter monitor=60000 on rhel7-auto3 * Pseudo action: A-clone_stop_0 * Resource action: A stop on rhel7-auto1 * Resource action: A stop on rhel7-auto2 * Pseudo action: A-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto3 Clone Set: A-clone [A] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/pengine/test10/clone-require-all-4.summary b/pengine/test10/clone-require-all-4.summary index 90d2059708..5b50e04ae2 100644 --- a/pengine/test10/clone-require-all-4.summary +++ b/pengine/test10/clone-require-all-4.summary @@ -1,40 +1,40 @@ Current cluster status: Node rhel7-auto1 (1): standby Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 ] Stopped: [ rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 ] Transition Summary: - * Move shooter (Started rhel7-auto1 -> rhel7-auto2) + * Move shooter ( rhel7-auto1 -> rhel7-auto2 ) * Stop A:0 (rhel7-auto1) due to node availability Executing cluster transition: * Resource action: shooter stop on rhel7-auto1 * Pseudo action: A-clone_stop_0 * Resource action: shooter start on rhel7-auto2 * Resource action: A stop on rhel7-auto1 * Pseudo action: A-clone_stopped_0 * Pseudo action: A-clone_start_0 * Pseudo action: all_stopped * Resource action: shooter monitor=60000 on rhel7-auto2 * Pseudo action: A-clone_running_0 Revised cluster status: Node rhel7-auto1 (1): standby Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto2 Clone Set: A-clone [A] Started: [ rhel7-auto2 ] Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 ] diff --git a/pengine/test10/clone-require-all-no-interleave-3.summary b/pengine/test10/clone-require-all-no-interleave-3.summary index 43796446c4..d45d078897 100644 --- a/pengine/test10/clone-require-all-no-interleave-3.summary +++ b/pengine/test10/clone-require-all-no-interleave-3.summary @@ -1,61 +1,61 @@ Current cluster status: Node rhel7-auto4 (4): standby Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] Clone Set: B-clone [B] Started: [ rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] Clone Set: C-clone [C] Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] Stopped: [ rhel7-auto3 ] Transition Summary: - * Move A:0 (Started rhel7-auto4 -> rhel7-auto3) - * Move B:0 (Started rhel7-auto4 -> rhel7-auto3) - * Move C:0 (Started rhel7-auto4 -> rhel7-auto3) + * Move A:0 ( rhel7-auto4 -> rhel7-auto3 ) + * Move B:0 ( rhel7-auto4 -> rhel7-auto3 ) + * Move C:0 ( rhel7-auto4 -> rhel7-auto3 ) Executing cluster transition: * Pseudo action: C-clone_stop_0 * Resource action: C stop on rhel7-auto4 * Pseudo action: C-clone_stopped_0 * Pseudo action: B-clone_stop_0 * Resource action: B stop on rhel7-auto4 * Pseudo action: B-clone_stopped_0 * Pseudo action: A-clone_stop_0 * Resource action: A stop on rhel7-auto4 * Pseudo action: A-clone_stopped_0 * Pseudo action: A-clone_start_0 * Pseudo action: all_stopped * Resource action: A start on rhel7-auto3 * Pseudo action: A-clone_running_0 * Pseudo action: B-clone_start_0 * Resource action: A monitor=10000 on rhel7-auto3 * Resource action: B start on rhel7-auto3 * Pseudo action: B-clone_running_0 * Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory * Resource action: B monitor=10000 on rhel7-auto3 * Pseudo action: C-clone_start_0 * Resource action: C start on rhel7-auto3 * Pseudo action: C-clone_running_0 * Resource action: C monitor=10000 on rhel7-auto3 Revised cluster status: Node rhel7-auto4 (4): standby Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto3 ] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 ] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] Clone Set: C-clone [C] Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] Stopped: [ rhel7-auto4 ] diff --git a/pengine/test10/clone_min_interleave_start_one.summary b/pengine/test10/clone_min_interleave_start_one.summary index 4ee71c4e41..f2e0c0c016 100644 --- a/pengine/test10/clone_min_interleave_start_one.summary +++ b/pengine/test10/clone_min_interleave_start_one.summary @@ -1,39 +1,39 @@ Current cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Stopped: [ c7auto1 c7auto2 c7auto3 ] Transition Summary: * Start FAKE1:0 (c7auto1) - * Start FAKE2:0 (c7auto2 - blocked) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Start FAKE2:1 (c7auto3 - blocked) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Start FAKE2:2 (c7auto1 - blocked) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Start FAKE3:0 (c7auto2 - blocked) due to unrunnable FAKE2:0 start - * Start FAKE3:1 (c7auto3 - blocked) due to unrunnable FAKE2:1 start - * Start FAKE3:2 (c7auto1 - blocked) due to unrunnable FAKE2:2 start + * Start FAKE2:0 ( c7auto2 ) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory (blocked) + * Start FAKE2:1 ( c7auto3 ) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory (blocked) + * Start FAKE2:2 ( c7auto1 ) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory (blocked) + * Start FAKE3:0 ( c7auto2 ) due to unrunnable FAKE2:0 start (blocked) + * Start FAKE3:1 ( c7auto3 ) due to unrunnable FAKE2:1 start (blocked) + * Start FAKE3:2 ( c7auto1 ) due to unrunnable FAKE2:2 start (blocked) Executing cluster transition: * Pseudo action: FAKE1-clone_start_0 * Resource action: FAKE1 start on c7auto1 * Pseudo action: FAKE1-clone_running_0 * Resource action: FAKE1 monitor=10000 on c7auto1 Revised cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/clone_min_interleave_stop_two.summary b/pengine/test10/clone_min_interleave_stop_two.summary index 0866f3cd8d..ccdb5e9300 100644 --- a/pengine/test10/clone_min_interleave_stop_two.summary +++ b/pengine/test10/clone_min_interleave_stop_two.summary @@ -1,53 +1,53 @@ Current cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Started: [ c7auto1 c7auto2 c7auto3 ] Transition Summary: * Stop FAKE1:0 (c7auto3) due to node availability * Stop FAKE1:2 (c7auto2) due to node availability - * Stop FAKE2:0 (Started c7auto3) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Stop FAKE2:1 (Started c7auto1) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Stop FAKE2:2 (Started c7auto2) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Stop FAKE3:0 (Started c7auto3) due to required FAKE2:0 start - * Stop FAKE3:1 (Started c7auto1) due to required FAKE2:1 start - * Stop FAKE3:2 (Started c7auto2) due to required FAKE2:2 start + * Stop FAKE2:0 ( c7auto3 ) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Stop FAKE2:1 ( c7auto1 ) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Stop FAKE2:2 ( c7auto2 ) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Stop FAKE3:0 ( c7auto3 ) due to required FAKE2:0 start + * Stop FAKE3:1 ( c7auto1 ) due to required FAKE2:1 start + * Stop FAKE3:2 ( c7auto2 ) due to required FAKE2:2 start Executing cluster transition: * Pseudo action: FAKE3-clone_stop_0 * Resource action: FAKE3 stop on c7auto3 * Resource action: FAKE3 stop on c7auto1 * Resource action: FAKE3 stop on c7auto2 * Pseudo action: FAKE3-clone_stopped_0 * Pseudo action: FAKE2-clone_stop_0 * Resource action: FAKE2 stop on c7auto3 * Resource action: FAKE2 stop on c7auto1 * Resource action: FAKE2 stop on c7auto2 * Pseudo action: FAKE2-clone_stopped_0 * Pseudo action: FAKE1-clone_stop_0 * Resource action: FAKE1 stop on c7auto3 * Resource action: FAKE1 stop on c7auto2 * Pseudo action: FAKE1-clone_stopped_0 * Pseudo action: FAKE1-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKE1-clone_running_0 Revised cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/clone_min_start_one.summary b/pengine/test10/clone_min_start_one.summary index 196f1b3039..504b81fa3f 100644 --- a/pengine/test10/clone_min_start_one.summary +++ b/pengine/test10/clone_min_start_one.summary @@ -1,37 +1,37 @@ Current cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped Transition Summary: - * Move shooter (Started c7auto1 -> c7auto3) + * Move shooter ( c7auto1 -> c7auto3 ) * Start FAKECLONE:0 (c7auto3) - * Start FAKE (c7auto4 - blocked) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory + * Start FAKE ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory (blocked) Executing cluster transition: * Resource action: shooter stop on c7auto1 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Resource action: shooter start on c7auto3 * Resource action: FAKECLONE start on c7auto3 * Pseudo action: FAKECLONE-clone_running_0 * Resource action: shooter monitor=60000 on c7auto3 * Resource action: FAKECLONE monitor=10000 on c7auto3 Revised cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto3 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto3 ] Stopped: [ c7auto1 c7auto2 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/clone_min_stop_all.summary b/pengine/test10/clone_min_stop_all.summary index 877d12f407..c12b0a3248 100644 --- a/pengine/test10/clone_min_stop_all.summary +++ b/pengine/test10/clone_min_stop_all.summary @@ -1,43 +1,43 @@ Current cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] FAKE (ocf::heartbeat:Dummy): Started c7auto4 Transition Summary: - * Move shooter (Started c7auto1 -> c7auto4) + * Move shooter ( c7auto1 -> c7auto4 ) * Stop FAKECLONE:0 (c7auto1) due to node availability * Stop FAKECLONE:1 (c7auto2) due to node availability * Stop FAKECLONE:2 (c7auto3) due to node availability - * Stop FAKE (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory + * Stop FAKE ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing cluster transition: * Resource action: shooter stop on c7auto1 * Resource action: FAKE stop on c7auto4 * Resource action: shooter start on c7auto4 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: shooter monitor=60000 on c7auto4 * Resource action: FAKECLONE stop on c7auto1 * Resource action: FAKECLONE stop on c7auto2 * Resource action: FAKECLONE stop on c7auto3 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto4 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/clone_min_stop_two.summary b/pengine/test10/clone_min_stop_two.summary index 4d8c38f53a..5223501485 100644 --- a/pengine/test10/clone_min_stop_two.summary +++ b/pengine/test10/clone_min_stop_two.summary @@ -1,42 +1,42 @@ Current cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] FAKE (ocf::heartbeat:Dummy): Started c7auto4 Transition Summary: - * Move shooter (Started c7auto1 -> c7auto3) + * Move shooter ( c7auto1 -> c7auto3 ) * Stop FAKECLONE:0 (c7auto1) due to node availability * Stop FAKECLONE:1 (c7auto2) due to node availability - * Stop FAKE (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory + * Stop FAKE ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing cluster transition: * Resource action: shooter stop on c7auto1 * Resource action: FAKE stop on c7auto4 * Resource action: shooter start on c7auto3 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: shooter monitor=60000 on c7auto3 * Resource action: FAKECLONE stop on c7auto1 * Resource action: FAKECLONE stop on c7auto2 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKECLONE-clone_running_0 Revised cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto3 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto3 ] Stopped: [ c7auto1 c7auto2 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/cloned-group-stop.summary b/pengine/test10/cloned-group-stop.summary index f7a980c116..4d5f3909a6 100644 --- a/pengine/test10/cloned-group-stop.summary +++ b/pengine/test10/cloned-group-stop.summary @@ -1,89 +1,89 @@ 2 of 20 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ rhos4-node3 rhos4-node4 ] virt-fencing (stonith:fence_xvm): Started rhos4-node3 Resource Group: mysql-group mysql-vip (ocf::heartbeat:IPaddr2): Started rhos4-node3 mysql-fs (ocf::heartbeat:Filesystem): Started rhos4-node3 mysql-db (ocf::heartbeat:mysql): Started rhos4-node3 Clone Set: qpidd-clone [qpidd] Started: [ rhos4-node3 rhos4-node4 ] Clone Set: keystone-clone [keystone] Started: [ rhos4-node3 rhos4-node4 ] Clone Set: glance-clone [glance] Started: [ rhos4-node3 rhos4-node4 ] Clone Set: cinder-clone [cinder] Started: [ rhos4-node3 rhos4-node4 ] Transition Summary: * Stop qpidd:0 (rhos4-node4) due to node availability * Stop qpidd:1 (rhos4-node3) due to node availability - * Stop keystone:0 (Started rhos4-node4) due to unrunnable qpidd-clone running - * Stop keystone:1 (Started rhos4-node3) due to unrunnable qpidd-clone running - * Stop glance-fs:0 (Started rhos4-node4) due to required keystone-clone running - * Stop glance-registry:0 (Started rhos4-node4) due to required glance-fs:0 start - * Stop glance-api:0 (Started rhos4-node4) due to required glance-registry:0 start - * Stop glance-fs:1 (Started rhos4-node3) due to required keystone-clone running - * Stop glance-registry:1 (Started rhos4-node3) due to required glance-fs:1 start - * Stop glance-api:1 (Started rhos4-node3) due to required glance-registry:1 start - * Stop cinder-api:0 (Started rhos4-node4) due to required glance-clone running - * Stop cinder-scheduler:0 (Started rhos4-node4) due to required cinder-api:0 start - * Stop cinder-volume:0 (Started rhos4-node4) due to required cinder-scheduler:0 start - * Stop cinder-api:1 (Started rhos4-node3) due to required glance-clone running - * Stop cinder-scheduler:1 (Started rhos4-node3) due to required cinder-api:1 start - * Stop cinder-volume:1 (Started rhos4-node3) due to required cinder-scheduler:1 start + * Stop keystone:0 ( rhos4-node4 ) due to unrunnable qpidd-clone running + * Stop keystone:1 ( rhos4-node3 ) due to unrunnable qpidd-clone running + * Stop glance-fs:0 ( rhos4-node4 ) due to required keystone-clone running + * Stop glance-registry:0 ( rhos4-node4 ) due to required glance-fs:0 start + * Stop glance-api:0 ( rhos4-node4 ) due to required glance-registry:0 start + * Stop glance-fs:1 ( rhos4-node3 ) due to required keystone-clone running + * Stop glance-registry:1 ( rhos4-node3 ) due to required glance-fs:1 start + * Stop glance-api:1 ( rhos4-node3 ) due to required glance-registry:1 start + * Stop cinder-api:0 ( rhos4-node4 ) due to required glance-clone running + * Stop cinder-scheduler:0 ( rhos4-node4 ) due to required cinder-api:0 start + * Stop cinder-volume:0 ( rhos4-node4 ) due to required cinder-scheduler:0 start + * Stop cinder-api:1 ( rhos4-node3 ) due to required glance-clone running + * Stop cinder-scheduler:1 ( rhos4-node3 ) due to required cinder-api:1 start + * Stop cinder-volume:1 ( rhos4-node3 ) due to required cinder-scheduler:1 start Executing cluster transition: * Pseudo action: cinder-clone_stop_0 * Pseudo action: cinder:0_stop_0 * Resource action: cinder-volume stop on rhos4-node4 * Pseudo action: cinder:1_stop_0 * Resource action: cinder-volume stop on rhos4-node3 * Resource action: cinder-scheduler stop on rhos4-node4 * Resource action: cinder-scheduler stop on rhos4-node3 * Resource action: cinder-api stop on rhos4-node4 * Resource action: cinder-api stop on rhos4-node3 * Pseudo action: cinder:0_stopped_0 * Pseudo action: cinder:1_stopped_0 * Pseudo action: cinder-clone_stopped_0 * Pseudo action: glance-clone_stop_0 * Pseudo action: glance:0_stop_0 * Resource action: glance-api stop on rhos4-node4 * Pseudo action: glance:1_stop_0 * Resource action: glance-api stop on rhos4-node3 * Resource action: glance-registry stop on rhos4-node4 * Resource action: glance-registry stop on rhos4-node3 * Resource action: glance-fs stop on rhos4-node4 * Resource action: glance-fs stop on rhos4-node3 * Pseudo action: glance:0_stopped_0 * Pseudo action: glance:1_stopped_0 * Pseudo action: glance-clone_stopped_0 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhos4-node4 * Resource action: keystone stop on rhos4-node3 * Pseudo action: keystone-clone_stopped_0 * Pseudo action: qpidd-clone_stop_0 * Resource action: qpidd stop on rhos4-node4 * Resource action: qpidd stop on rhos4-node3 * Pseudo action: qpidd-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ rhos4-node3 rhos4-node4 ] virt-fencing (stonith:fence_xvm): Started rhos4-node3 Resource Group: mysql-group mysql-vip (ocf::heartbeat:IPaddr2): Started rhos4-node3 mysql-fs (ocf::heartbeat:Filesystem): Started rhos4-node3 mysql-db (ocf::heartbeat:mysql): Started rhos4-node3 Clone Set: qpidd-clone [qpidd] Stopped (disabled): [ rhos4-node3 rhos4-node4 ] Clone Set: keystone-clone [keystone] Stopped: [ rhos4-node3 rhos4-node4 ] Clone Set: glance-clone [glance] Stopped: [ rhos4-node3 rhos4-node4 ] Clone Set: cinder-clone [cinder] Stopped: [ rhos4-node3 rhos4-node4 ] diff --git a/pengine/test10/cloned-group.summary b/pengine/test10/cloned-group.summary index e1456b9f69..1c4f277e97 100644 --- a/pengine/test10/cloned-group.summary +++ b/pengine/test10/cloned-group.summary @@ -1,47 +1,47 @@ Current cluster status: Online: [ webcluster01 ] OFFLINE: [ webcluster02 ] Clone Set: apache2_clone [grrr] Resource Group: grrr:2 apache2 (ocf::heartbeat:apache): ORPHANED Started webcluster01 mysql-proxy (lsb:mysql-proxy): ORPHANED Started webcluster01 Started: [ webcluster01 ] Stopped: [ webcluster02 ] Transition Summary: - * Restart apache2:0 (Started webcluster01) - * Restart mysql-proxy:0 (Started webcluster01) due to required apache2:0 start + * Restart apache2:0 ( webcluster01 ) + * Restart mysql-proxy:0 ( webcluster01 ) due to required apache2:0 start * Stop apache2:2 (webcluster01) due to node availability * Stop mysql-proxy:2 (webcluster01) due to node availability Executing cluster transition: * Pseudo action: apache2_clone_stop_0 * Pseudo action: grrr:0_stop_0 * Resource action: mysql-proxy:1 stop on webcluster01 * Pseudo action: grrr:2_stop_0 * Resource action: mysql-proxy:0 stop on webcluster01 * Resource action: apache2:1 stop on webcluster01 * Resource action: apache2:0 stop on webcluster01 * Pseudo action: all_stopped * Pseudo action: grrr:0_stopped_0 * Pseudo action: grrr:2_stopped_0 * Pseudo action: apache2_clone_stopped_0 * Pseudo action: apache2_clone_start_0 * Pseudo action: grrr:0_start_0 * Resource action: apache2:1 start on webcluster01 * Resource action: apache2:1 monitor=10000 on webcluster01 * Resource action: mysql-proxy:1 start on webcluster01 * Resource action: mysql-proxy:1 monitor=10000 on webcluster01 * Pseudo action: grrr:0_running_0 * Pseudo action: apache2_clone_running_0 Revised cluster status: Online: [ webcluster01 ] OFFLINE: [ webcluster02 ] Clone Set: apache2_clone [grrr] Started: [ webcluster01 ] Stopped: [ webcluster02 ] diff --git a/pengine/test10/cloned_start_one.summary b/pengine/test10/cloned_start_one.summary index 5dedc18dd8..38bf2aef21 100644 --- a/pengine/test10/cloned_start_one.summary +++ b/pengine/test10/cloned_start_one.summary @@ -1,41 +1,41 @@ Current cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto3 c7auto4 ] Stopped: [ c7auto1 c7auto2 ] Transition Summary: * Start FAKECLONE:0 (c7auto1) * Stop FAKECLONE2:0 (c7auto3) due to node availability - * Stop FAKECLONE2:1 (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory + * Stop FAKECLONE2:1 ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory Executing cluster transition: * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: FAKECLONE2-clone_stop_0 * Resource action: FAKECLONE start on c7auto1 * Pseudo action: FAKECLONE-clone_running_0 * Resource action: FAKECLONE2 stop on c7auto3 * Resource action: FAKECLONE2 stop on c7auto4 * Pseudo action: FAKECLONE2-clone_stopped_0 * Pseudo action: all_stopped * Resource action: FAKECLONE monitor=10000 on c7auto1 Revised cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] diff --git a/pengine/test10/cloned_stop_two.summary b/pengine/test10/cloned_stop_two.summary index 8f1e0398ea..d6a4f2cff4 100644 --- a/pengine/test10/cloned_stop_two.summary +++ b/pengine/test10/cloned_stop_two.summary @@ -1,45 +1,45 @@ Current cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto3 c7auto4 ] Stopped: [ c7auto1 c7auto2 ] Transition Summary: * Stop FAKECLONE:1 (c7auto2) due to node availability * Stop FAKECLONE:2 (c7auto3) due to node availability * Stop FAKECLONE2:0 (c7auto3) due to node availability - * Stop FAKECLONE2:1 (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory + * Stop FAKECLONE2:1 ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory Executing cluster transition: * Pseudo action: FAKECLONE2-clone_stop_0 * Resource action: FAKECLONE2 stop on c7auto3 * Resource action: FAKECLONE2 stop on c7auto4 * Pseudo action: FAKECLONE2-clone_stopped_0 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: FAKECLONE stop on c7auto2 * Resource action: FAKECLONE stop on c7auto3 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKECLONE-clone_running_0 Revised cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] diff --git a/pengine/test10/colo_master_w_native.summary b/pengine/test10/colo_master_w_native.summary index 98774c12f6..fda8e85e5f 100644 --- a/pengine/test10/colo_master_w_native.summary +++ b/pengine/test10/colo_master_w_native.summary @@ -1,47 +1,47 @@ Current cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 Master/Slave Set: MS_RSC [MS_RSC_NATIVE] Masters: [ node2 ] Slaves: [ node1 ] Transition Summary: - * Demote MS_RSC_NATIVE:0 (Master -> Slave node2) + * Demote MS_RSC_NATIVE:0 ( Master -> Slave node2 ) * Promote MS_RSC_NATIVE:1 (Slave -> Master node1) Executing cluster transition: * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1 * Pseudo action: MS_RSC_pre_notify_demote_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_demote_0 * Pseudo action: MS_RSC_demote_0 * Resource action: MS_RSC_NATIVE:0 demote on node2 * Pseudo action: MS_RSC_demoted_0 * Pseudo action: MS_RSC_post_notify_demoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_demoted_0 * Pseudo action: MS_RSC_pre_notify_promote_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_promote_0 * Pseudo action: MS_RSC_promote_0 * Resource action: MS_RSC_NATIVE:1 promote on node1 * Pseudo action: MS_RSC_promoted_0 * Pseudo action: MS_RSC_post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2 Revised cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 Master/Slave Set: MS_RSC [MS_RSC_NATIVE] Masters: [ node1 ] Slaves: [ node2 ] diff --git a/pengine/test10/colo_slave_w_native.summary b/pengine/test10/colo_slave_w_native.summary index 705935e37d..f59d93b286 100644 --- a/pengine/test10/colo_slave_w_native.summary +++ b/pengine/test10/colo_slave_w_native.summary @@ -1,52 +1,52 @@ Current cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 Master/Slave Set: MS_RSC [MS_RSC_NATIVE] Masters: [ node2 ] Slaves: [ node1 ] Transition Summary: - * Move A (Started node1 -> node2) - * Demote MS_RSC_NATIVE:0 (Master -> Slave node2) + * Move A ( node1 -> node2 ) + * Demote MS_RSC_NATIVE:0 ( Master -> Slave node2 ) * Promote MS_RSC_NATIVE:1 (Slave -> Master node1) Executing cluster transition: * Resource action: A stop on node1 * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1 * Pseudo action: MS_RSC_pre_notify_demote_0 * Pseudo action: all_stopped * Resource action: A start on node2 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_demote_0 * Pseudo action: MS_RSC_demote_0 * Resource action: A monitor=10000 on node2 * Resource action: MS_RSC_NATIVE:0 demote on node2 * Pseudo action: MS_RSC_demoted_0 * Pseudo action: MS_RSC_post_notify_demoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_demoted_0 * Pseudo action: MS_RSC_pre_notify_promote_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_promote_0 * Pseudo action: MS_RSC_promote_0 * Resource action: MS_RSC_NATIVE:1 promote on node1 * Pseudo action: MS_RSC_promoted_0 * Pseudo action: MS_RSC_post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2 Revised cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node2 Master/Slave Set: MS_RSC [MS_RSC_NATIVE] Masters: [ node1 ] Slaves: [ node2 ] diff --git a/pengine/test10/coloc-clone-stays-active.summary b/pengine/test10/coloc-clone-stays-active.summary index b0171377f3..df9b92c58a 100644 --- a/pengine/test10/coloc-clone-stays-active.summary +++ b/pengine/test10/coloc-clone-stays-active.summary @@ -1,207 +1,207 @@ 12 of 87 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ s01-0 s01-1 ] stonith-s01-0 (stonith:external/ipmi): Started s01-1 stonith-s01-1 (stonith:external/ipmi): Started s01-0 Resource Group: iscsi-pool-0-target-all iscsi-pool-0-target (ocf::vds-ok:iSCSITarget): Started s01-0 iscsi-pool-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-0 Resource Group: iscsi-pool-0-vips vip-235 (ocf::heartbeat:IPaddr2): Started s01-0 vip-236 (ocf::heartbeat:IPaddr2): Started s01-0 Resource Group: iscsi-pool-1-target-all iscsi-pool-1-target (ocf::vds-ok:iSCSITarget): Started s01-1 iscsi-pool-1-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-1 Resource Group: iscsi-pool-1-vips vip-237 (ocf::heartbeat:IPaddr2): Started s01-1 vip-238 (ocf::heartbeat:IPaddr2): Started s01-1 Master/Slave Set: ms-drbd-pool-0 [drbd-pool-0] Masters: [ s01-0 ] Slaves: [ s01-1 ] Master/Slave Set: ms-drbd-pool-1 [drbd-pool-1] Masters: [ s01-1 ] Slaves: [ s01-0 ] Master/Slave Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] Masters: [ s01-0 ] Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] Masters: [ s01-1 ] Slaves: [ s01-0 ] Clone Set: cl-o2cb [o2cb] Stopped (disabled): [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-service [drbd-s01-service] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-service-fs [s01-service-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-ietd [ietd] Started: [ s01-0 s01-1 ] Clone Set: cl-dhcpd [dhcpd] Stopped (disabled): [ s01-0 s01-1 ] Resource Group: http-server vip-233 (ocf::heartbeat:IPaddr2): Started s01-0 nginx (lsb:nginx): Stopped ( disabled ) Master/Slave Set: ms-drbd-s01-logs [drbd-s01-logs] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-logs-fs [s01-logs-fs] Started: [ s01-0 s01-1 ] Resource Group: syslog-server vip-234 (ocf::heartbeat:IPaddr2): Started s01-1 syslog-ng (ocf::heartbeat:syslog-ng): Started s01-1 Resource Group: tftp-server vip-232 (ocf::heartbeat:IPaddr2): Stopped tftpd (ocf::heartbeat:Xinetd): Stopped Clone Set: cl-xinetd [xinetd] Started: [ s01-0 s01-1 ] Clone Set: cl-ospf-routing [ospf-routing] Started: [ s01-0 s01-1 ] Clone Set: connected-outer [ping-bmc-and-switch] Started: [ s01-0 s01-1 ] Resource Group: iscsi-vds-dom0-stateless-0-target-all iscsi-vds-dom0-stateless-0-target (ocf::vds-ok:iSCSITarget): Stopped ( disabled ) iscsi-vds-dom0-stateless-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Stopped ( disabled ) Resource Group: iscsi-vds-dom0-stateless-0-vips vip-227 (ocf::heartbeat:IPaddr2): Stopped vip-228 (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] Masters: [ s01-0 ] Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] Slaves: [ s01-0 s01-1 ] Clone Set: cl-dlm [dlm] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] Stopped (disabled): [ s01-0 s01-1 ] Clone Set: cl-gfs2 [gfs2] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-http [drbd-vds-http] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-http-fs [vds-http-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-clvmd [clvmd] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data] Started: [ s01-0 s01-1 ] mgmt-vm (ocf::vds-ok:VirtualDomain): Started s01-0 Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service] Started: [ s01-0 s01-1 ] Clone Set: cl-libvirtd [libvirtd] Started: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool] Started: [ s01-0 s01-1 ] Transition Summary: - * Migrate mgmt-vm (Started s01-0 -> s01-1) + * Migrate mgmt-vm ( s01-0 -> s01-1 ) Executing cluster transition: * Resource action: mgmt-vm migrate_to on s01-0 * Resource action: mgmt-vm migrate_from on s01-1 * Resource action: mgmt-vm stop on s01-0 * Pseudo action: all_stopped * Pseudo action: mgmt-vm_start_0 * Resource action: mgmt-vm monitor=10000 on s01-1 Revised cluster status: Online: [ s01-0 s01-1 ] stonith-s01-0 (stonith:external/ipmi): Started s01-1 stonith-s01-1 (stonith:external/ipmi): Started s01-0 Resource Group: iscsi-pool-0-target-all iscsi-pool-0-target (ocf::vds-ok:iSCSITarget): Started s01-0 iscsi-pool-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-0 Resource Group: iscsi-pool-0-vips vip-235 (ocf::heartbeat:IPaddr2): Started s01-0 vip-236 (ocf::heartbeat:IPaddr2): Started s01-0 Resource Group: iscsi-pool-1-target-all iscsi-pool-1-target (ocf::vds-ok:iSCSITarget): Started s01-1 iscsi-pool-1-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started s01-1 Resource Group: iscsi-pool-1-vips vip-237 (ocf::heartbeat:IPaddr2): Started s01-1 vip-238 (ocf::heartbeat:IPaddr2): Started s01-1 Master/Slave Set: ms-drbd-pool-0 [drbd-pool-0] Masters: [ s01-0 ] Slaves: [ s01-1 ] Master/Slave Set: ms-drbd-pool-1 [drbd-pool-1] Masters: [ s01-1 ] Slaves: [ s01-0 ] Master/Slave Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] Masters: [ s01-0 ] Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] Masters: [ s01-1 ] Slaves: [ s01-0 ] Clone Set: cl-o2cb [o2cb] Stopped (disabled): [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-service [drbd-s01-service] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-service-fs [s01-service-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-ietd [ietd] Started: [ s01-0 s01-1 ] Clone Set: cl-dhcpd [dhcpd] Stopped (disabled): [ s01-0 s01-1 ] Resource Group: http-server vip-233 (ocf::heartbeat:IPaddr2): Started s01-0 nginx (lsb:nginx): Stopped ( disabled ) Master/Slave Set: ms-drbd-s01-logs [drbd-s01-logs] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-logs-fs [s01-logs-fs] Started: [ s01-0 s01-1 ] Resource Group: syslog-server vip-234 (ocf::heartbeat:IPaddr2): Started s01-1 syslog-ng (ocf::heartbeat:syslog-ng): Started s01-1 Resource Group: tftp-server vip-232 (ocf::heartbeat:IPaddr2): Stopped tftpd (ocf::heartbeat:Xinetd): Stopped Clone Set: cl-xinetd [xinetd] Started: [ s01-0 s01-1 ] Clone Set: cl-ospf-routing [ospf-routing] Started: [ s01-0 s01-1 ] Clone Set: connected-outer [ping-bmc-and-switch] Started: [ s01-0 s01-1 ] Resource Group: iscsi-vds-dom0-stateless-0-target-all iscsi-vds-dom0-stateless-0-target (ocf::vds-ok:iSCSITarget): Stopped ( disabled ) iscsi-vds-dom0-stateless-0-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Stopped ( disabled ) Resource Group: iscsi-vds-dom0-stateless-0-vips vip-227 (ocf::heartbeat:IPaddr2): Stopped vip-228 (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] Masters: [ s01-0 ] Slaves: [ s01-1 ] Master/Slave Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] Slaves: [ s01-0 s01-1 ] Clone Set: cl-dlm [dlm] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] Stopped (disabled): [ s01-0 s01-1 ] Clone Set: cl-gfs2 [gfs2] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-vds-http [drbd-vds-http] Masters: [ s01-0 s01-1 ] Clone Set: cl-vds-http-fs [vds-http-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-clvmd [clvmd] Started: [ s01-0 s01-1 ] Master/Slave Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] Masters: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs] Started: [ s01-0 s01-1 ] Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data] Started: [ s01-0 s01-1 ] mgmt-vm (ocf::vds-ok:VirtualDomain): Started s01-1 Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service] Started: [ s01-0 s01-1 ] Clone Set: cl-libvirtd [libvirtd] Started: [ s01-0 s01-1 ] Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool] Started: [ s01-0 s01-1 ] diff --git a/pengine/test10/coloc-intra-set.summary b/pengine/test10/coloc-intra-set.summary index caefd3e2ef..e313d5a875 100644 --- a/pengine/test10/coloc-intra-set.summary +++ b/pengine/test10/coloc-intra-set.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ hex-13 hex-14 ] fencing-sbd (stonith:external/sbd): Started hex-13 dummy0 (ocf::heartbeat:Dummy): Started hex-14 dummy1 (ocf::heartbeat:Dummy): Started hex-13 dummy2 (ocf::heartbeat:Dummy): Started hex-14 dummy3 (ocf::heartbeat:Dummy): Started hex-13 Transition Summary: - * Move dummy1 (Started hex-13 -> hex-14) - * Move dummy3 (Started hex-13 -> hex-14) + * Move dummy1 ( hex-13 -> hex-14 ) + * Move dummy3 ( hex-13 -> hex-14 ) Executing cluster transition: * Resource action: dummy1 stop on hex-13 * Resource action: dummy3 stop on hex-13 * Resource action: d0:0 delete on hex-13 * Resource action: o2cb:0 delete on hex-13 * Resource action: dummy4 delete on hex-13 * Resource action: dlm:0 delete on hex-13 * Resource action: ocfs2-3:0 delete on hex-13 * Pseudo action: all_stopped * Resource action: dummy1 start on hex-14 * Resource action: dummy3 start on hex-14 * Resource action: dummy1 monitor=15000 on hex-14 * Resource action: dummy3 monitor=15000 on hex-14 Revised cluster status: Online: [ hex-13 hex-14 ] fencing-sbd (stonith:external/sbd): Started hex-13 dummy0 (ocf::heartbeat:Dummy): Started hex-14 dummy1 (ocf::heartbeat:Dummy): Started hex-14 dummy2 (ocf::heartbeat:Dummy): Started hex-14 dummy3 (ocf::heartbeat:Dummy): Started hex-14 diff --git a/pengine/test10/coloc_fp_logic.summary b/pengine/test10/coloc_fp_logic.summary index bf479d8476..0c2f4b9f96 100644 --- a/pengine/test10/coloc_fp_logic.summary +++ b/pengine/test10/coloc_fp_logic.summary @@ -1,22 +1,22 @@ Current cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 B (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Move A (Started node1 -> node2) + * Move A ( node1 -> node2 ) Executing cluster transition: * Resource action: A stop on node1 * Pseudo action: all_stopped * Resource action: A start on node2 * Resource action: A monitor=10000 on node2 Revised cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node2 B (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/colocation_constraint_stops_master.summary b/pengine/test10/colocation_constraint_stops_master.summary index e0b69765fb..e4b8697d1c 100644 --- a/pengine/test10/colocation_constraint_stops_master.summary +++ b/pengine/test10/colocation_constraint_stops_master.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Masters: [ fc16-builder ] Transition Summary: - * Demote NATIVE_RSC_A:0 (Master -> Stopped fc16-builder) + * Stop NATIVE_RSC_A:0 ( Master fc16-builder ) due to node availability Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_demote_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_demote_0 * Pseudo action: MASTER_RSC_A_demote_0 * Resource action: NATIVE_RSC_A:0 demote on fc16-builder * Pseudo action: MASTER_RSC_A_demoted_0 * Pseudo action: MASTER_RSC_A_post_notify_demoted_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-post_notify_demoted_0 * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] diff --git a/pengine/test10/colocation_constraint_stops_slave.summary b/pengine/test10/colocation_constraint_stops_slave.summary index a97b74b39a..4af4415f3c 100644 --- a/pengine/test10/colocation_constraint_stops_slave.summary +++ b/pengine/test10/colocation_constraint_stops_slave.summary @@ -1,34 +1,34 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Slaves: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop NATIVE_RSC_A:0 (fc16-builder) due to node availability + * Stop NATIVE_RSC_A:0 ( Slave fc16-builder ) due to node availability * Stop NATIVE_RSC_B (fc16-builder) Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_B stop on fc16-builder * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/complex_enforce_colo.summary b/pengine/test10/complex_enforce_colo.summary index a21d5c1330..2332269484 100644 --- a/pengine/test10/complex_enforce_colo.summary +++ b/pengine/test10/complex_enforce_colo.summary @@ -1,453 +1,453 @@ 3 of 132 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] node1-fence (stonith:fence_xvm): Started rhos6-node1 node2-fence (stonith:fence_xvm): Started rhos6-node2 node3-fence (stonith:fence_xvm): Started rhos6-node3 Clone Set: lb-haproxy-clone [lb-haproxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] vip-db (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-rabbitmq (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-qpid (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-keystone (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-glance (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-cinder (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-swift (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-neutron (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-nova (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-horizon (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-heat (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-ceilometer (ocf::heartbeat:IPaddr2): Started rhos6-node3 Master/Slave Set: galera-master [galera] Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: rabbitmq-server-clone [rabbitmq-server] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: memcached-clone [memcached] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: mongodb-clone [mongodb] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: keystone-clone [keystone] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-fs-clone [glance-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-registry-clone [glance-registry] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-api-clone [glance-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] cinder-api (systemd:openstack-cinder-api): Started rhos6-node1 cinder-scheduler (systemd:openstack-cinder-scheduler): Started rhos6-node1 cinder-volume (systemd:openstack-cinder-volume): Started rhos6-node1 Clone Set: swift-fs-clone [swift-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-account-clone [swift-account] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-container-clone [swift-container] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-object-clone [swift-object] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-proxy-clone [swift-proxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] swift-object-expirer (systemd:openstack-swift-object-expirer): Started rhos6-node2 Clone Set: neutron-server-clone [neutron-server] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-scale-clone [neutron-scale] (unique) neutron-scale:0 (ocf::neutron:NeutronScale): Started rhos6-node3 neutron-scale:1 (ocf::neutron:NeutronScale): Started rhos6-node2 neutron-scale:2 (ocf::neutron:NeutronScale): Started rhos6-node1 Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-consoleauth-clone [nova-consoleauth] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-novncproxy-clone [nova-novncproxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-api-clone [nova-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-scheduler-clone [nova-scheduler] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-conductor-clone [nova-conductor] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] ceilometer-central (systemd:openstack-ceilometer-central): Started rhos6-node3 Clone Set: ceilometer-collector-clone [ceilometer-collector] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-api-clone [ceilometer-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-delay-clone [ceilometer-delay] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-notification-clone [ceilometer-notification] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-clone [heat-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cfn-clone [heat-api-cfn] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] heat-engine (systemd:openstack-heat-engine): Started rhos6-node2 Clone Set: horizon-clone [horizon] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Transition Summary: * Stop keystone:0 (rhos6-node1) due to node availability * Stop keystone:1 (rhos6-node2) due to node availability * Stop keystone:2 (rhos6-node3) due to node availability * Stop glance-registry:0 (rhos6-node1) * Stop glance-registry:1 (rhos6-node2) * Stop glance-registry:2 (rhos6-node3) * Stop glance-api:0 (rhos6-node1) * Stop glance-api:1 (rhos6-node2) * Stop glance-api:2 (rhos6-node3) - * Stop cinder-api (Started rhos6-node1) due to unrunnable keystone-clone running - * Stop cinder-scheduler (Started rhos6-node1) due to required cinder-api start - * Stop cinder-volume (Started rhos6-node1) + * Stop cinder-api ( rhos6-node1 ) due to unrunnable keystone-clone running + * Stop cinder-scheduler ( rhos6-node1 ) due to required cinder-api start + * Stop cinder-volume ( rhos6-node1 ) * Stop swift-account:0 (rhos6-node1) * Stop swift-account:1 (rhos6-node2) * Stop swift-account:2 (rhos6-node3) * Stop swift-container:0 (rhos6-node1) * Stop swift-container:1 (rhos6-node2) * Stop swift-container:2 (rhos6-node3) * Stop swift-object:0 (rhos6-node1) * Stop swift-object:1 (rhos6-node2) * Stop swift-object:2 (rhos6-node3) * Stop swift-proxy:0 (rhos6-node1) * Stop swift-proxy:1 (rhos6-node2) * Stop swift-proxy:2 (rhos6-node3) - * Stop swift-object-expirer (Started rhos6-node2) due to required swift-proxy-clone running + * Stop swift-object-expirer ( rhos6-node2 ) due to required swift-proxy-clone running * Stop neutron-server:0 (rhos6-node1) * Stop neutron-server:1 (rhos6-node2) * Stop neutron-server:2 (rhos6-node3) * Stop neutron-scale:0 (rhos6-node3) * Stop neutron-scale:1 (rhos6-node2) * Stop neutron-scale:2 (rhos6-node1) * Stop neutron-ovs-cleanup:0 (rhos6-node1) * Stop neutron-ovs-cleanup:1 (rhos6-node2) * Stop neutron-ovs-cleanup:2 (rhos6-node3) * Stop neutron-netns-cleanup:0 (rhos6-node1) * Stop neutron-netns-cleanup:1 (rhos6-node2) * Stop neutron-netns-cleanup:2 (rhos6-node3) * Stop neutron-openvswitch-agent:0 (rhos6-node1) * Stop neutron-openvswitch-agent:1 (rhos6-node2) * Stop neutron-openvswitch-agent:2 (rhos6-node3) * Stop neutron-dhcp-agent:0 (rhos6-node1) * Stop neutron-dhcp-agent:1 (rhos6-node2) * Stop neutron-dhcp-agent:2 (rhos6-node3) * Stop neutron-l3-agent:0 (rhos6-node1) * Stop neutron-l3-agent:1 (rhos6-node2) * Stop neutron-l3-agent:2 (rhos6-node3) * Stop neutron-metadata-agent:0 (rhos6-node1) * Stop neutron-metadata-agent:1 (rhos6-node2) * Stop neutron-metadata-agent:2 (rhos6-node3) * Stop nova-consoleauth:0 (rhos6-node1) * Stop nova-consoleauth:1 (rhos6-node2) * Stop nova-consoleauth:2 (rhos6-node3) * Stop nova-novncproxy:0 (rhos6-node1) * Stop nova-novncproxy:1 (rhos6-node2) * Stop nova-novncproxy:2 (rhos6-node3) * Stop nova-api:0 (rhos6-node1) * Stop nova-api:1 (rhos6-node2) * Stop nova-api:2 (rhos6-node3) * Stop nova-scheduler:0 (rhos6-node1) * Stop nova-scheduler:1 (rhos6-node2) * Stop nova-scheduler:2 (rhos6-node3) * Stop nova-conductor:0 (rhos6-node1) * Stop nova-conductor:1 (rhos6-node2) * Stop nova-conductor:2 (rhos6-node3) - * Stop ceilometer-central (Started rhos6-node3) due to unrunnable keystone-clone running - * Stop ceilometer-collector:0 (Started rhos6-node1) due to required ceilometer-central start - * Stop ceilometer-collector:1 (Started rhos6-node2) due to required ceilometer-central start - * Stop ceilometer-collector:2 (Started rhos6-node3) due to required ceilometer-central start - * Stop ceilometer-api:0 (Started rhos6-node1) due to required ceilometer-collector:0 start - * Stop ceilometer-api:1 (Started rhos6-node2) due to required ceilometer-collector:1 start - * Stop ceilometer-api:2 (Started rhos6-node3) due to required ceilometer-collector:2 start - * Stop ceilometer-delay:0 (Started rhos6-node1) due to required ceilometer-api:0 start - * Stop ceilometer-delay:1 (Started rhos6-node2) due to required ceilometer-api:1 start - * Stop ceilometer-delay:2 (Started rhos6-node3) due to required ceilometer-api:2 start - * Stop ceilometer-alarm-evaluator:0 (Started rhos6-node1) due to required ceilometer-delay:0 start - * Stop ceilometer-alarm-evaluator:1 (Started rhos6-node2) due to required ceilometer-delay:1 start - * Stop ceilometer-alarm-evaluator:2 (Started rhos6-node3) due to required ceilometer-delay:2 start - * Stop ceilometer-alarm-notifier:0 (Started rhos6-node1) due to required ceilometer-alarm-evaluator:0 start - * Stop ceilometer-alarm-notifier:1 (Started rhos6-node2) due to required ceilometer-alarm-evaluator:1 start - * Stop ceilometer-alarm-notifier:2 (Started rhos6-node3) due to required ceilometer-alarm-evaluator:2 start - * Stop ceilometer-notification:0 (Started rhos6-node1) due to required ceilometer-alarm-notifier:0 start - * Stop ceilometer-notification:1 (Started rhos6-node2) due to required ceilometer-alarm-notifier:1 start - * Stop ceilometer-notification:2 (Started rhos6-node3) due to required ceilometer-alarm-notifier:2 start - * Stop heat-api:0 (Started rhos6-node1) due to required ceilometer-notification:0 start - * Stop heat-api:1 (Started rhos6-node2) due to required ceilometer-notification:1 start - * Stop heat-api:2 (Started rhos6-node3) due to required ceilometer-notification:2 start - * Stop heat-api-cfn:0 (Started rhos6-node1) due to required heat-api:0 start - * Stop heat-api-cfn:1 (Started rhos6-node2) due to required heat-api:1 start - * Stop heat-api-cfn:2 (Started rhos6-node3) due to required heat-api:2 start - * Stop heat-api-cloudwatch:0 (Started rhos6-node1) due to required heat-api-cfn:0 start - * Stop heat-api-cloudwatch:1 (Started rhos6-node2) due to required heat-api-cfn:1 start - * Stop heat-api-cloudwatch:2 (Started rhos6-node3) due to required heat-api-cfn:2 start - * Stop heat-engine (Started rhos6-node2) due to required heat-api-cloudwatch-clone running + * Stop ceilometer-central ( rhos6-node3 ) due to unrunnable keystone-clone running + * Stop ceilometer-collector:0 ( rhos6-node1 ) due to required ceilometer-central start + * Stop ceilometer-collector:1 ( rhos6-node2 ) due to required ceilometer-central start + * Stop ceilometer-collector:2 ( rhos6-node3 ) due to required ceilometer-central start + * Stop ceilometer-api:0 ( rhos6-node1 ) due to required ceilometer-collector:0 start + * Stop ceilometer-api:1 ( rhos6-node2 ) due to required ceilometer-collector:1 start + * Stop ceilometer-api:2 ( rhos6-node3 ) due to required ceilometer-collector:2 start + * Stop ceilometer-delay:0 ( rhos6-node1 ) due to required ceilometer-api:0 start + * Stop ceilometer-delay:1 ( rhos6-node2 ) due to required ceilometer-api:1 start + * Stop ceilometer-delay:2 ( rhos6-node3 ) due to required ceilometer-api:2 start + * Stop ceilometer-alarm-evaluator:0 ( rhos6-node1 ) due to required ceilometer-delay:0 start + * Stop ceilometer-alarm-evaluator:1 ( rhos6-node2 ) due to required ceilometer-delay:1 start + * Stop ceilometer-alarm-evaluator:2 ( rhos6-node3 ) due to required ceilometer-delay:2 start + * Stop ceilometer-alarm-notifier:0 ( rhos6-node1 ) due to required ceilometer-alarm-evaluator:0 start + * Stop ceilometer-alarm-notifier:1 ( rhos6-node2 ) due to required ceilometer-alarm-evaluator:1 start + * Stop ceilometer-alarm-notifier:2 ( rhos6-node3 ) due to required ceilometer-alarm-evaluator:2 start + * Stop ceilometer-notification:0 ( rhos6-node1 ) due to required ceilometer-alarm-notifier:0 start + * Stop ceilometer-notification:1 ( rhos6-node2 ) due to required ceilometer-alarm-notifier:1 start + * Stop ceilometer-notification:2 ( rhos6-node3 ) due to required ceilometer-alarm-notifier:2 start + * Stop heat-api:0 ( rhos6-node1 ) due to required ceilometer-notification:0 start + * Stop heat-api:1 ( rhos6-node2 ) due to required ceilometer-notification:1 start + * Stop heat-api:2 ( rhos6-node3 ) due to required ceilometer-notification:2 start + * Stop heat-api-cfn:0 ( rhos6-node1 ) due to required heat-api:0 start + * Stop heat-api-cfn:1 ( rhos6-node2 ) due to required heat-api:1 start + * Stop heat-api-cfn:2 ( rhos6-node3 ) due to required heat-api:2 start + * Stop heat-api-cloudwatch:0 ( rhos6-node1 ) due to required heat-api-cfn:0 start + * Stop heat-api-cloudwatch:1 ( rhos6-node2 ) due to required heat-api-cfn:1 start + * Stop heat-api-cloudwatch:2 ( rhos6-node3 ) due to required heat-api-cfn:2 start + * Stop heat-engine ( rhos6-node2 ) due to required heat-api-cloudwatch-clone running Executing cluster transition: * Pseudo action: glance-api-clone_stop_0 * Resource action: cinder-volume stop on rhos6-node1 * Pseudo action: swift-object-clone_stop_0 * Resource action: swift-object-expirer stop on rhos6-node2 * Pseudo action: neutron-metadata-agent-clone_stop_0 * Pseudo action: nova-conductor-clone_stop_0 * Resource action: heat-engine stop on rhos6-node2 * Resource action: glance-api stop on rhos6-node1 * Resource action: glance-api stop on rhos6-node2 * Resource action: glance-api stop on rhos6-node3 * Pseudo action: glance-api-clone_stopped_0 * Resource action: cinder-scheduler stop on rhos6-node1 * Resource action: swift-object stop on rhos6-node1 * Resource action: swift-object stop on rhos6-node2 * Resource action: swift-object stop on rhos6-node3 * Pseudo action: swift-object-clone_stopped_0 * Pseudo action: swift-proxy-clone_stop_0 * Resource action: neutron-metadata-agent stop on rhos6-node1 * Resource action: neutron-metadata-agent stop on rhos6-node2 * Resource action: neutron-metadata-agent stop on rhos6-node3 * Pseudo action: neutron-metadata-agent-clone_stopped_0 * Resource action: nova-conductor stop on rhos6-node1 * Resource action: nova-conductor stop on rhos6-node2 * Resource action: nova-conductor stop on rhos6-node3 * Pseudo action: nova-conductor-clone_stopped_0 * Pseudo action: heat-api-cloudwatch-clone_stop_0 * Pseudo action: glance-registry-clone_stop_0 * Resource action: cinder-api stop on rhos6-node1 * Pseudo action: swift-container-clone_stop_0 * Resource action: swift-proxy stop on rhos6-node1 * Resource action: swift-proxy stop on rhos6-node2 * Resource action: swift-proxy stop on rhos6-node3 * Pseudo action: swift-proxy-clone_stopped_0 * Pseudo action: neutron-l3-agent-clone_stop_0 * Pseudo action: nova-scheduler-clone_stop_0 * Resource action: heat-api-cloudwatch stop on rhos6-node1 * Resource action: heat-api-cloudwatch stop on rhos6-node2 * Resource action: heat-api-cloudwatch stop on rhos6-node3 * Pseudo action: heat-api-cloudwatch-clone_stopped_0 * Resource action: glance-registry stop on rhos6-node1 * Resource action: glance-registry stop on rhos6-node2 * Resource action: glance-registry stop on rhos6-node3 * Pseudo action: glance-registry-clone_stopped_0 * Resource action: swift-container stop on rhos6-node1 * Resource action: swift-container stop on rhos6-node2 * Resource action: swift-container stop on rhos6-node3 * Pseudo action: swift-container-clone_stopped_0 * Resource action: neutron-l3-agent stop on rhos6-node1 * Resource action: neutron-l3-agent stop on rhos6-node2 * Resource action: neutron-l3-agent stop on rhos6-node3 * Pseudo action: neutron-l3-agent-clone_stopped_0 * Resource action: nova-scheduler stop on rhos6-node1 * Resource action: nova-scheduler stop on rhos6-node2 * Resource action: nova-scheduler stop on rhos6-node3 * Pseudo action: nova-scheduler-clone_stopped_0 * Pseudo action: heat-api-cfn-clone_stop_0 * Pseudo action: swift-account-clone_stop_0 * Pseudo action: neutron-dhcp-agent-clone_stop_0 * Pseudo action: nova-api-clone_stop_0 * Resource action: heat-api-cfn stop on rhos6-node1 * Resource action: heat-api-cfn stop on rhos6-node2 * Resource action: heat-api-cfn stop on rhos6-node3 * Pseudo action: heat-api-cfn-clone_stopped_0 * Resource action: swift-account stop on rhos6-node1 * Resource action: swift-account stop on rhos6-node2 * Resource action: swift-account stop on rhos6-node3 * Pseudo action: swift-account-clone_stopped_0 * Resource action: neutron-dhcp-agent stop on rhos6-node1 * Resource action: neutron-dhcp-agent stop on rhos6-node2 * Resource action: neutron-dhcp-agent stop on rhos6-node3 * Pseudo action: neutron-dhcp-agent-clone_stopped_0 * Resource action: nova-api stop on rhos6-node1 * Resource action: nova-api stop on rhos6-node2 * Resource action: nova-api stop on rhos6-node3 * Pseudo action: nova-api-clone_stopped_0 * Pseudo action: heat-api-clone_stop_0 * Pseudo action: neutron-openvswitch-agent-clone_stop_0 * Pseudo action: nova-novncproxy-clone_stop_0 * Resource action: heat-api stop on rhos6-node1 * Resource action: heat-api stop on rhos6-node2 * Resource action: heat-api stop on rhos6-node3 * Pseudo action: heat-api-clone_stopped_0 * Resource action: neutron-openvswitch-agent stop on rhos6-node1 * Resource action: neutron-openvswitch-agent stop on rhos6-node2 * Resource action: neutron-openvswitch-agent stop on rhos6-node3 * Pseudo action: neutron-openvswitch-agent-clone_stopped_0 * Resource action: nova-novncproxy stop on rhos6-node1 * Resource action: nova-novncproxy stop on rhos6-node2 * Resource action: nova-novncproxy stop on rhos6-node3 * Pseudo action: nova-novncproxy-clone_stopped_0 * Pseudo action: ceilometer-notification-clone_stop_0 * Pseudo action: neutron-netns-cleanup-clone_stop_0 * Pseudo action: nova-consoleauth-clone_stop_0 * Resource action: ceilometer-notification stop on rhos6-node1 * Resource action: ceilometer-notification stop on rhos6-node2 * Resource action: ceilometer-notification stop on rhos6-node3 * Pseudo action: ceilometer-notification-clone_stopped_0 * Resource action: neutron-netns-cleanup stop on rhos6-node1 * Resource action: neutron-netns-cleanup stop on rhos6-node2 * Resource action: neutron-netns-cleanup stop on rhos6-node3 * Pseudo action: neutron-netns-cleanup-clone_stopped_0 * Resource action: nova-consoleauth stop on rhos6-node1 * Resource action: nova-consoleauth stop on rhos6-node2 * Resource action: nova-consoleauth stop on rhos6-node3 * Pseudo action: nova-consoleauth-clone_stopped_0 * Pseudo action: ceilometer-alarm-notifier-clone_stop_0 * Pseudo action: neutron-ovs-cleanup-clone_stop_0 * Resource action: ceilometer-alarm-notifier stop on rhos6-node1 * Resource action: ceilometer-alarm-notifier stop on rhos6-node2 * Resource action: ceilometer-alarm-notifier stop on rhos6-node3 * Pseudo action: ceilometer-alarm-notifier-clone_stopped_0 * Resource action: neutron-ovs-cleanup stop on rhos6-node1 * Resource action: neutron-ovs-cleanup stop on rhos6-node2 * Resource action: neutron-ovs-cleanup stop on rhos6-node3 * Pseudo action: neutron-ovs-cleanup-clone_stopped_0 * Pseudo action: ceilometer-alarm-evaluator-clone_stop_0 * Pseudo action: neutron-scale-clone_stop_0 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node1 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node2 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node3 * Pseudo action: ceilometer-alarm-evaluator-clone_stopped_0 * Resource action: neutron-scale:0 stop on rhos6-node3 * Resource action: neutron-scale:1 stop on rhos6-node2 * Resource action: neutron-scale:2 stop on rhos6-node1 * Pseudo action: neutron-scale-clone_stopped_0 * Pseudo action: ceilometer-delay-clone_stop_0 * Pseudo action: neutron-server-clone_stop_0 * Resource action: ceilometer-delay stop on rhos6-node1 * Resource action: ceilometer-delay stop on rhos6-node2 * Resource action: ceilometer-delay stop on rhos6-node3 * Pseudo action: ceilometer-delay-clone_stopped_0 * Resource action: neutron-server stop on rhos6-node1 * Resource action: neutron-server stop on rhos6-node2 * Resource action: neutron-server stop on rhos6-node3 * Pseudo action: neutron-server-clone_stopped_0 * Pseudo action: ceilometer-api-clone_stop_0 * Resource action: ceilometer-api stop on rhos6-node1 * Resource action: ceilometer-api stop on rhos6-node2 * Resource action: ceilometer-api stop on rhos6-node3 * Pseudo action: ceilometer-api-clone_stopped_0 * Pseudo action: ceilometer-collector-clone_stop_0 * Resource action: ceilometer-collector stop on rhos6-node1 * Resource action: ceilometer-collector stop on rhos6-node2 * Resource action: ceilometer-collector stop on rhos6-node3 * Pseudo action: ceilometer-collector-clone_stopped_0 * Resource action: ceilometer-central stop on rhos6-node3 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhos6-node1 * Resource action: keystone stop on rhos6-node2 * Resource action: keystone stop on rhos6-node3 * Pseudo action: keystone-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] node1-fence (stonith:fence_xvm): Started rhos6-node1 node2-fence (stonith:fence_xvm): Started rhos6-node2 node3-fence (stonith:fence_xvm): Started rhos6-node3 Clone Set: lb-haproxy-clone [lb-haproxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] vip-db (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-rabbitmq (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-qpid (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-keystone (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-glance (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-cinder (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-swift (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-neutron (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-nova (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-horizon (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-heat (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-ceilometer (ocf::heartbeat:IPaddr2): Started rhos6-node3 Master/Slave Set: galera-master [galera] Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: rabbitmq-server-clone [rabbitmq-server] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: memcached-clone [memcached] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: mongodb-clone [mongodb] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: keystone-clone [keystone] Stopped (disabled): [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-fs-clone [glance-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-registry-clone [glance-registry] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-api-clone [glance-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] cinder-api (systemd:openstack-cinder-api): Stopped cinder-scheduler (systemd:openstack-cinder-scheduler): Stopped cinder-volume (systemd:openstack-cinder-volume): Stopped Clone Set: swift-fs-clone [swift-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-account-clone [swift-account] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-container-clone [swift-container] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-object-clone [swift-object] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-proxy-clone [swift-proxy] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped Clone Set: neutron-server-clone [neutron-server] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-scale-clone [neutron-scale] (unique) neutron-scale:0 (ocf::neutron:NeutronScale): Stopped neutron-scale:1 (ocf::neutron:NeutronScale): Stopped neutron-scale:2 (ocf::neutron:NeutronScale): Stopped Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-consoleauth-clone [nova-consoleauth] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-novncproxy-clone [nova-novncproxy] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-api-clone [nova-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-scheduler-clone [nova-scheduler] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-conductor-clone [nova-conductor] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] ceilometer-central (systemd:openstack-ceilometer-central): Stopped Clone Set: ceilometer-collector-clone [ceilometer-collector] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-api-clone [ceilometer-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-delay-clone [ceilometer-delay] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-notification-clone [ceilometer-notification] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-clone [heat-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cfn-clone [heat-api-cfn] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] heat-engine (systemd:openstack-heat-engine): Stopped Clone Set: horizon-clone [horizon] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] diff --git a/pengine/test10/container-2.summary b/pengine/test10/container-2.summary index f011cd3a98..011dbe436d 100644 --- a/pengine/test10/container-2.summary +++ b/pengine/test10/container-2.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Restart container1 (Started node1) - * Recover rsc1 (Started node1) - * Restart rsc2 (Started node1) due to required container1 start + * Restart container1 ( node1 ) + * Recover rsc1 ( node1 ) + * Restart rsc2 ( node1 ) due to required container1 start Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 start on node1 * Resource action: rsc2 monitor=5000 on node1 Revised cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/container-3.summary b/pengine/test10/container-3.summary index f853ab2098..424f27e4eb 100644 --- a/pengine/test10/container-3.summary +++ b/pengine/test10/container-3.summary @@ -1,31 +1,31 @@ Current cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED (failure ignored) rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Restart container1 (Started node1) + * Restart container1 ( node1 ) * Start rsc1 (node1) - * Restart rsc2 (Started node1) due to required container1 start + * Restart rsc2 ( node1 ) due to required container1 start Executing cluster transition: * Resource action: rsc2 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 * Resource action: rsc2 monitor=5000 on node1 * Resource action: rsc1 monitor=10000 on node1 Revised cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 (failure ignored) rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/container-4.summary b/pengine/test10/container-4.summary index c5852ded18..387db8f970 100644 --- a/pengine/test10/container-4.summary +++ b/pengine/test10/container-4.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move container1 (Started node1 -> node2) - * Recover rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) + * Move container1 ( node1 -> node2 ) + * Recover rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Resource action: container1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: container1 monitor=20000 on node2 * Resource action: rsc1 monitor=10000 on node2 * Resource action: rsc2 monitor=5000 on node2 Revised cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node2 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/container-group-2.summary b/pengine/test10/container-group-2.summary index c0dbbf8bdb..68d62aa343 100644 --- a/pengine/test10/container-group-2.summary +++ b/pengine/test10/container-group-2.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Restart container1 (Started node1) - * Recover rsc1 (Started node1) - * Restart rsc2 (Started node1) due to required rsc1 start + * Restart container1 ( node1 ) + * Recover rsc1 ( node1 ) + * Restart rsc2 ( node1 ) due to required rsc1 start Executing cluster transition: * Pseudo action: container-group_stop_0 * Resource action: rsc2 stop on node1 * Resource action: rsc1 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Pseudo action: container-group_stopped_0 * Pseudo action: container-group_start_0 * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 start on node1 * Resource action: rsc2 monitor=5000 on node1 * Pseudo action: container-group_running_0 Revised cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/container-group-3.summary b/pengine/test10/container-group-3.summary index d5fac464f1..3e327ffc5f 100644 --- a/pengine/test10/container-group-3.summary +++ b/pengine/test10/container-group-3.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED (failure ignored) rsc2 (ocf::pacemaker:Dummy): Stopped Transition Summary: - * Restart container1 (Started node1) + * Restart container1 ( node1 ) * Start rsc1 (node1) * Start rsc2 (node1) Executing cluster transition: * Pseudo action: container-group_stop_0 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Pseudo action: container-group_stopped_0 * Pseudo action: container-group_start_0 * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 * Pseudo action: container-group_running_0 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 monitor=5000 on node1 Revised cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 (failure ignored) rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/container-group-4.summary b/pengine/test10/container-group-4.summary index 3ee85bcbd5..8ab682bc18 100644 --- a/pengine/test10/container-group-4.summary +++ b/pengine/test10/container-group-4.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move container1 (Started node1 -> node2) - * Recover rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) + * Move container1 ( node1 -> node2 ) + * Recover rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) Executing cluster transition: * Pseudo action: container-group_stop_0 * Resource action: rsc2 stop on node1 * Resource action: rsc1 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Pseudo action: container-group_stopped_0 * Pseudo action: container-group_start_0 * Resource action: container1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Pseudo action: container-group_running_0 * Resource action: container1 monitor=20000 on node2 * Resource action: rsc1 monitor=10000 on node2 * Resource action: rsc2 monitor=5000 on node2 Revised cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node2 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/enforce-colo1.summary b/pengine/test10/enforce-colo1.summary index b79b8cbc0b..64815ae61c 100644 --- a/pengine/test10/enforce-colo1.summary +++ b/pengine/test10/enforce-colo1.summary @@ -1,37 +1,37 @@ 3 of 6 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto2 engine (ocf::heartbeat:Dummy): Started rhel7-auto3 Clone Set: keystone-clone [keystone] Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] central (ocf::heartbeat:Dummy): Started rhel7-auto3 Transition Summary: - * Stop engine (Started rhel7-auto3) + * Stop engine ( rhel7-auto3 ) * Stop keystone:0 (rhel7-auto2) due to node availability * Stop keystone:1 (rhel7-auto3) due to node availability * Stop keystone:2 (rhel7-auto1) due to node availability - * Stop central (Started rhel7-auto3) due to unrunnable keystone-clone running + * Stop central ( rhel7-auto3 ) due to unrunnable keystone-clone running Executing cluster transition: * Resource action: engine stop on rhel7-auto3 * Resource action: central stop on rhel7-auto3 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhel7-auto2 * Resource action: keystone stop on rhel7-auto3 * Resource action: keystone stop on rhel7-auto1 * Pseudo action: keystone-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto2 engine (ocf::heartbeat:Dummy): Stopped Clone Set: keystone-clone [keystone] Stopped (disabled): [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] central (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/group-dependents.summary b/pengine/test10/group-dependents.summary index c1366568b0..15b750b172 100644 --- a/pengine/test10/group-dependents.summary +++ b/pengine/test10/group-dependents.summary @@ -1,195 +1,195 @@ Current cluster status: Online: [ asttest1 asttest2 ] Resource Group: voip mysqld (lsb:mysql): Started asttest1 dahdi (lsb:dahdi): Started asttest1 fonulator (lsb:fonulator): Stopped asterisk (lsb:asterisk-11.0.1): Stopped iax2_mon (lsb:iax2_mon): Stopped httpd (lsb:apache2): Stopped tftp (lsb:tftp-srce): Stopped Resource Group: ip_voip_routes ip_voip_route_test1 (ocf::heartbeat:Route): Started asttest1 ip_voip_route_test2 (ocf::heartbeat:Route): Started asttest1 Resource Group: ip_voip_addresses_p ip_voip_vlan850 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan998 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan851 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan852 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan853 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan854 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan855 (ocf::heartbeat:IPaddr2): Started asttest1 ip_voip_vlan856 (ocf::heartbeat:IPaddr2): Started asttest1 Clone Set: cl_route [ip_voip_route_default] Started: [ asttest1 asttest2 ] fs_drbd (ocf::heartbeat:Filesystem): Started asttest1 Master/Slave Set: ms_drbd [drbd] Masters: [ asttest1 ] Slaves: [ asttest2 ] Transition Summary: - * Migrate mysqld (Started asttest1 -> asttest2) - * Migrate dahdi (Started asttest1 -> asttest2) + * Migrate mysqld ( asttest1 -> asttest2 ) + * Migrate dahdi ( asttest1 -> asttest2 ) * Start fonulator (asttest2) * Start asterisk (asttest2) * Start iax2_mon (asttest2) * Start httpd (asttest2) * Start tftp (asttest2) - * Migrate ip_voip_route_test1 (Started asttest1 -> asttest2) - * Migrate ip_voip_route_test2 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan850 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan998 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan851 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan852 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan853 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan854 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan855 (Started asttest1 -> asttest2) - * Migrate ip_voip_vlan856 (Started asttest1 -> asttest2) - * Move fs_drbd (Started asttest1 -> asttest2) - * Demote drbd:0 (Master -> Slave asttest1) + * Migrate ip_voip_route_test1 ( asttest1 -> asttest2 ) + * Migrate ip_voip_route_test2 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan850 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan998 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan851 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan852 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan853 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan854 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan855 ( asttest1 -> asttest2 ) + * Migrate ip_voip_vlan856 ( asttest1 -> asttest2 ) + * Move fs_drbd ( asttest1 -> asttest2 ) + * Demote drbd:0 ( Master -> Slave asttest1 ) * Promote drbd:1 (Slave -> Master asttest2) Executing cluster transition: * Pseudo action: voip_stop_0 * Resource action: mysqld migrate_to on asttest1 * Resource action: ip_voip_route_test1 migrate_to on asttest1 * Resource action: ip_voip_route_test2 migrate_to on asttest1 * Resource action: ip_voip_vlan850 migrate_to on asttest1 * Resource action: ip_voip_vlan998 migrate_to on asttest1 * Resource action: ip_voip_vlan851 migrate_to on asttest1 * Resource action: ip_voip_vlan852 migrate_to on asttest1 * Resource action: ip_voip_vlan853 migrate_to on asttest1 * Resource action: ip_voip_vlan854 migrate_to on asttest1 * Resource action: ip_voip_vlan855 migrate_to on asttest1 * Resource action: ip_voip_vlan856 migrate_to on asttest1 * Resource action: drbd:1 cancel=31000 on asttest2 * Pseudo action: ms_drbd_pre_notify_demote_0 * Resource action: mysqld migrate_from on asttest2 * Resource action: dahdi migrate_to on asttest1 * Resource action: ip_voip_route_test1 migrate_from on asttest2 * Resource action: ip_voip_route_test2 migrate_from on asttest2 * Resource action: ip_voip_vlan850 migrate_from on asttest2 * Resource action: ip_voip_vlan998 migrate_from on asttest2 * Resource action: ip_voip_vlan851 migrate_from on asttest2 * Resource action: ip_voip_vlan852 migrate_from on asttest2 * Resource action: ip_voip_vlan853 migrate_from on asttest2 * Resource action: ip_voip_vlan854 migrate_from on asttest2 * Resource action: ip_voip_vlan855 migrate_from on asttest2 * Resource action: ip_voip_vlan856 migrate_from on asttest2 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-pre_notify_demote_0 * Resource action: dahdi migrate_from on asttest2 * Resource action: dahdi stop on asttest1 * Resource action: mysqld stop on asttest1 * Pseudo action: voip_stopped_0 * Pseudo action: ip_voip_routes_stop_0 * Resource action: ip_voip_route_test1 stop on asttest1 * Resource action: ip_voip_route_test2 stop on asttest1 * Pseudo action: ip_voip_routes_stopped_0 * Pseudo action: ip_voip_addresses_p_stop_0 * Resource action: ip_voip_vlan850 stop on asttest1 * Resource action: ip_voip_vlan998 stop on asttest1 * Resource action: ip_voip_vlan851 stop on asttest1 * Resource action: ip_voip_vlan852 stop on asttest1 * Resource action: ip_voip_vlan853 stop on asttest1 * Resource action: ip_voip_vlan854 stop on asttest1 * Resource action: ip_voip_vlan855 stop on asttest1 * Resource action: ip_voip_vlan856 stop on asttest1 * Pseudo action: ip_voip_addresses_p_stopped_0 * Resource action: fs_drbd stop on asttest1 * Pseudo action: ms_drbd_demote_0 * Pseudo action: all_stopped * Resource action: drbd:0 demote on asttest1 * Pseudo action: ms_drbd_demoted_0 * Pseudo action: ms_drbd_post_notify_demoted_0 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_pre_notify_promote_0 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_promote_0 * Resource action: drbd:1 promote on asttest2 * Pseudo action: ms_drbd_promoted_0 * Pseudo action: ms_drbd_post_notify_promoted_0 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-post_notify_promoted_0 * Resource action: fs_drbd start on asttest2 * Resource action: drbd:0 monitor=31000 on asttest1 * Pseudo action: ip_voip_addresses_p_start_0 * Pseudo action: ip_voip_vlan850_start_0 * Pseudo action: ip_voip_vlan998_start_0 * Pseudo action: ip_voip_vlan851_start_0 * Pseudo action: ip_voip_vlan852_start_0 * Pseudo action: ip_voip_vlan853_start_0 * Pseudo action: ip_voip_vlan854_start_0 * Pseudo action: ip_voip_vlan855_start_0 * Pseudo action: ip_voip_vlan856_start_0 * Resource action: fs_drbd monitor=1000 on asttest2 * Pseudo action: ip_voip_addresses_p_running_0 * Resource action: ip_voip_vlan850 monitor=1000 on asttest2 * Resource action: ip_voip_vlan998 monitor=1000 on asttest2 * Resource action: ip_voip_vlan851 monitor=1000 on asttest2 * Resource action: ip_voip_vlan852 monitor=1000 on asttest2 * Resource action: ip_voip_vlan853 monitor=1000 on asttest2 * Resource action: ip_voip_vlan854 monitor=1000 on asttest2 * Resource action: ip_voip_vlan855 monitor=1000 on asttest2 * Resource action: ip_voip_vlan856 monitor=1000 on asttest2 * Pseudo action: ip_voip_routes_start_0 * Pseudo action: ip_voip_route_test1_start_0 * Pseudo action: ip_voip_route_test2_start_0 * Pseudo action: ip_voip_routes_running_0 * Resource action: ip_voip_route_test1 monitor=1000 on asttest2 * Resource action: ip_voip_route_test2 monitor=1000 on asttest2 * Pseudo action: voip_start_0 * Pseudo action: mysqld_start_0 * Pseudo action: dahdi_start_0 * Resource action: fonulator start on asttest2 * Resource action: asterisk start on asttest2 * Resource action: iax2_mon start on asttest2 * Resource action: httpd start on asttest2 * Resource action: tftp start on asttest2 * Pseudo action: voip_running_0 * Resource action: mysqld monitor=1000 on asttest2 * Resource action: dahdi monitor=1000 on asttest2 * Resource action: fonulator monitor=1000 on asttest2 * Resource action: asterisk monitor=1000 on asttest2 * Resource action: iax2_mon monitor=60000 on asttest2 * Resource action: httpd monitor=1000 on asttest2 * Resource action: tftp monitor=60000 on asttest2 Revised cluster status: Online: [ asttest1 asttest2 ] Resource Group: voip mysqld (lsb:mysql): Started asttest2 dahdi (lsb:dahdi): Started asttest2 fonulator (lsb:fonulator): Started asttest2 asterisk (lsb:asterisk-11.0.1): Started asttest2 iax2_mon (lsb:iax2_mon): Started asttest2 httpd (lsb:apache2): Started asttest2 tftp (lsb:tftp-srce): Started asttest2 Resource Group: ip_voip_routes ip_voip_route_test1 (ocf::heartbeat:Route): Started asttest2 ip_voip_route_test2 (ocf::heartbeat:Route): Started asttest2 Resource Group: ip_voip_addresses_p ip_voip_vlan850 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan998 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan851 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan852 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan853 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan854 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan855 (ocf::heartbeat:IPaddr2): Started asttest2 ip_voip_vlan856 (ocf::heartbeat:IPaddr2): Started asttest2 Clone Set: cl_route [ip_voip_route_default] Started: [ asttest1 asttest2 ] fs_drbd (ocf::heartbeat:Filesystem): Started asttest2 Master/Slave Set: ms_drbd [drbd] Masters: [ asttest2 ] Slaves: [ asttest1 ] diff --git a/pengine/test10/group-fail.summary b/pengine/test10/group-fail.summary index 9067bf684d..8ed59ca7de 100644 --- a/pengine/test10/group-fail.summary +++ b/pengine/test10/group-fail.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: group1 rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) due to required rsc1 start + * Restart rsc2 ( node1 ) due to required rsc1 start * Start rsc3 (node1) - * Restart rsc4 (Started node1) due to required rsc3 start + * Restart rsc4 ( node1 ) due to required rsc3 start Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc4 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 * Pseudo action: group1_start_0 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 * Resource action: rsc3 start on node1 * Resource action: rsc4 start on node1 * Pseudo action: group1_running_0 Revised cluster status: Online: [ node1 node2 ] Resource Group: group1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/group10.summary b/pengine/test10/group10.summary index 570fd28bf3..f7d0eefca2 100644 --- a/pengine/test10/group10.summary +++ b/pengine/test10/group10.summary @@ -1,67 +1,67 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): FAILED c001n01 child_192.168.100.182 (ocf::heartbeat:IPaddr): Started c001n01 child_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Started c001n08 Transition Summary: - * Recover child_192.168.100.181 (Started c001n01) - * Restart child_192.168.100.182 (Started c001n01) due to required child_192.168.100.181 start - * Restart child_192.168.100.183 (Started c001n01) due to required child_192.168.100.182 start + * Recover child_192.168.100.181 ( c001n01 ) + * Restart child_192.168.100.182 ( c001n01 ) due to required child_192.168.100.181 start + * Restart child_192.168.100.183 ( c001n01 ) due to required child_192.168.100.182 start Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: child_192.168.100.183 stop on c001n01 * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n02 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n01 * Resource action: child_192.168.100.182 stop on c001n01 * Resource action: child_192.168.100.181 stop on c001n01 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: child_192.168.100.181 start on c001n01 * Resource action: child_192.168.100.181 monitor=5000 on c001n01 * Resource action: child_192.168.100.182 start on c001n01 * Resource action: child_192.168.100.182 monitor=5000 on c001n01 * Resource action: child_192.168.100.183 start on c001n01 * Resource action: child_192.168.100.183 monitor=5000 on c001n01 * Pseudo action: group-1_running_0 Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n01 child_192.168.100.182 (ocf::heartbeat:IPaddr): Started c001n01 child_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Started c001n08 diff --git a/pengine/test10/group14.summary b/pengine/test10/group14.summary index 69e688bfda..f7a5d7d046 100644 --- a/pengine/test10/group14.summary +++ b/pengine/test10/group14.summary @@ -1,101 +1,101 @@ Current cluster status: Online: [ c001n06 c001n07 ] OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ] DcIPaddr (heartbeat:IPaddr): Stopped Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n06 r192.168.100.182 (ocf::heartbeat:IPaddr): Stopped r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped migrator (ocf::heartbeat:Dummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped Transition Summary: - * Start DcIPaddr (c001n06 - blocked) due to no quorum - * Stop r192.168.100.181 (Started c001n06) due to no quorum - * Start r192.168.100.182 (c001n07 - blocked) due to no quorum - * Start r192.168.100.183 (c001n07 - blocked) due to no quorum - * Start lsb_dummy (c001n06 - blocked) due to no quorum - * Start migrator (c001n06 - blocked) due to no quorum - * Start rsc_c001n03 (c001n06 - blocked) due to no quorum - * Start rsc_c001n02 (c001n07 - blocked) due to no quorum - * Start rsc_c001n04 (c001n06 - blocked) due to no quorum - * Start rsc_c001n05 (c001n07 - blocked) due to no quorum - * Start rsc_c001n06 (c001n06 - blocked) due to no quorum - * Start rsc_c001n07 (c001n07 - blocked) due to no quorum + * Start DcIPaddr ( c001n06 ) due to no quorum (blocked) + * Stop r192.168.100.181 ( c001n06 ) due to no quorum + * Start r192.168.100.182 ( c001n07 ) due to no quorum (blocked) + * Start r192.168.100.183 ( c001n07 ) due to no quorum (blocked) + * Start lsb_dummy ( c001n06 ) due to no quorum (blocked) + * Start migrator ( c001n06 ) due to no quorum (blocked) + * Start rsc_c001n03 ( c001n06 ) due to no quorum (blocked) + * Start rsc_c001n02 ( c001n07 ) due to no quorum (blocked) + * Start rsc_c001n04 ( c001n06 ) due to no quorum (blocked) + * Start rsc_c001n05 ( c001n07 ) due to no quorum (blocked) + * Start rsc_c001n06 ( c001n06 ) due to no quorum (blocked) + * Start rsc_c001n07 ( c001n07 ) due to no quorum (blocked) * Start child_DoFencing:0 (c001n06) * Start child_DoFencing:1 (c001n07) - * Start ocf_msdummy:0 (c001n06 - blocked) due to no quorum - * Start ocf_msdummy:1 (c001n07 - blocked) due to no quorum - * Start ocf_msdummy:2 (c001n06 - blocked) due to no quorum - * Start ocf_msdummy:3 (c001n07 - blocked) due to no quorum + * Start ocf_msdummy:0 ( c001n06 ) due to no quorum (blocked) + * Start ocf_msdummy:1 ( c001n07 ) due to no quorum (blocked) + * Start ocf_msdummy:2 ( c001n06 ) due to no quorum (blocked) + * Start ocf_msdummy:3 ( c001n07 ) due to no quorum (blocked) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: r192.168.100.181 stop on c001n06 * Pseudo action: DoFencing_start_0 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: child_DoFencing:0 start on c001n06 * Resource action: child_DoFencing:1 start on c001n07 * Pseudo action: DoFencing_running_0 * Resource action: child_DoFencing:0 monitor=20000 on c001n06 * Resource action: child_DoFencing:1 monitor=20000 on c001n07 Revised cluster status: Online: [ c001n06 c001n07 ] OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ] DcIPaddr (heartbeat:IPaddr): Stopped Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Stopped r192.168.100.182 (ocf::heartbeat:IPaddr): Stopped r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped migrator (ocf::heartbeat:Dummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Started: [ c001n06 c001n07 ] Stopped: [ c001n02 c001n03 c001n04 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/group5.summary b/pengine/test10/group5.summary index 68a0e91e39..f709b9d2cb 100644 --- a/pengine/test10/group5.summary +++ b/pengine/test10/group5.summary @@ -1,50 +1,50 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 Resource Group: rsc2 child_rsc1 (heartbeat:apache): Started node1 child_rsc2 (heartbeat:apache): Started node1 child_rsc3 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 Transition Summary: - * Move rsc1 (Started node1 -> node2) - * Move child_rsc1 (Started node1 -> node2) - * Move child_rsc2 (Started node1 -> node2) - * Move child_rsc3 (Started node1 -> node2) - * Move rsc3 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move child_rsc1 ( node1 -> node2 ) + * Move child_rsc2 ( node1 -> node2 ) + * Move child_rsc3 ( node1 -> node2 ) + * Move rsc3 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: child_rsc1 monitor on node2 * Resource action: child_rsc2 monitor on node2 * Resource action: child_rsc3 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 stop on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc3 stop on node1 * Resource action: child_rsc2 stop on node1 * Resource action: child_rsc1 stop on node1 * Pseudo action: rsc2_stopped_0 * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node2 * Pseudo action: rsc2_start_0 * Resource action: child_rsc1 start on node2 * Resource action: child_rsc2 start on node2 * Resource action: child_rsc3 start on node2 * Pseudo action: rsc2_running_0 * Resource action: rsc3 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node2 Resource Group: rsc2 child_rsc1 (heartbeat:apache): Started node2 child_rsc2 (heartbeat:apache): Started node2 child_rsc3 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/group6.summary b/pengine/test10/group6.summary index f2fa1e214d..0d1886834a 100644 --- a/pengine/test10/group6.summary +++ b/pengine/test10/group6.summary @@ -1,62 +1,62 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: rsc1 child_rsc1 (heartbeat:apache): Started node1 child_rsc2 (heartbeat:apache): Started node1 child_rsc3 (heartbeat:apache): Started node1 Resource Group: rsc2 child_rsc4 (heartbeat:apache): Started node1 child_rsc5 (heartbeat:apache): Started node1 child_rsc6 (heartbeat:apache): Started node1 Transition Summary: - * Move child_rsc1 (Started node1 -> node2) - * Move child_rsc2 (Started node1 -> node2) - * Move child_rsc3 (Started node1 -> node2) - * Move child_rsc4 (Started node1 -> node2) - * Move child_rsc5 (Started node1 -> node2) - * Move child_rsc6 (Started node1 -> node2) + * Move child_rsc1 ( node1 -> node2 ) + * Move child_rsc2 ( node1 -> node2 ) + * Move child_rsc3 ( node1 -> node2 ) + * Move child_rsc4 ( node1 -> node2 ) + * Move child_rsc5 ( node1 -> node2 ) + * Move child_rsc6 ( node1 -> node2 ) Executing cluster transition: * Resource action: child_rsc1 monitor on node2 * Resource action: child_rsc2 monitor on node2 * Resource action: child_rsc3 monitor on node2 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc4 monitor on node2 * Resource action: child_rsc5 monitor on node2 * Resource action: child_rsc6 monitor on node2 * Resource action: child_rsc6 stop on node1 * Resource action: child_rsc5 stop on node1 * Resource action: child_rsc4 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc3 stop on node1 * Resource action: child_rsc2 stop on node1 * Resource action: child_rsc1 stop on node1 * Pseudo action: all_stopped * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1 start on node2 * Resource action: child_rsc2 start on node2 * Resource action: child_rsc3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc4 start on node2 * Resource action: child_rsc5 start on node2 * Resource action: child_rsc6 start on node2 * Pseudo action: rsc2_running_0 Revised cluster status: Online: [ node1 node2 ] Resource Group: rsc1 child_rsc1 (heartbeat:apache): Started node2 child_rsc2 (heartbeat:apache): Started node2 child_rsc3 (heartbeat:apache): Started node2 Resource Group: rsc2 child_rsc4 (heartbeat:apache): Started node2 child_rsc5 (heartbeat:apache): Started node2 child_rsc6 (heartbeat:apache): Started node2 diff --git a/pengine/test10/group9.summary b/pengine/test10/group9.summary index f6755b9bc3..ede3c82f8e 100644 --- a/pengine/test10/group9.summary +++ b/pengine/test10/group9.summary @@ -1,65 +1,65 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 Resource Group: foo rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): FAILED node1 rsc5 (heartbeat:apache): Started node1 Resource Group: bar rsc6 (heartbeat:apache): Started node1 rsc7 (heartbeat:apache): FAILED node1 rsc8 (heartbeat:apache): Started node1 Transition Summary: - * Recover rsc4 (Started node1) - * Restart rsc5 (Started node1) due to required rsc4 start - * Move rsc6 (Started node1 -> node2) - * Recover rsc7 (Started node1 -> node2) - * Move rsc8 (Started node1 -> node2) + * Recover rsc4 ( node1 ) + * Restart rsc5 ( node1 ) due to required rsc4 start + * Move rsc6 ( node1 -> node2 ) + * Recover rsc7 ( node1 -> node2 ) + * Move rsc8 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Pseudo action: foo_stop_0 * Resource action: rsc3 monitor on node2 * Resource action: rsc4 monitor on node2 * Resource action: rsc5 monitor on node2 * Pseudo action: bar_stop_0 * Resource action: rsc6 monitor on node2 * Resource action: rsc7 monitor on node2 * Resource action: rsc8 monitor on node2 * Resource action: rsc5 stop on node1 * Resource action: rsc8 stop on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc7 stop on node1 * Pseudo action: foo_stopped_0 * Pseudo action: foo_start_0 * Resource action: rsc4 start on node1 * Resource action: rsc5 start on node1 * Resource action: rsc6 stop on node1 * Pseudo action: all_stopped * Pseudo action: foo_running_0 * Pseudo action: bar_stopped_0 * Pseudo action: bar_start_0 * Resource action: rsc6 start on node2 * Resource action: rsc7 start on node2 * Resource action: rsc8 start on node2 * Pseudo action: bar_running_0 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 Resource Group: foo rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 rsc5 (heartbeat:apache): Started node1 Resource Group: bar rsc6 (heartbeat:apache): Started node2 rsc7 (heartbeat:apache): Started node2 rsc8 (heartbeat:apache): Started node2 diff --git a/pengine/test10/guest-node-host-dies.summary b/pengine/test10/guest-node-host-dies.summary index d22fe3d4ca..ea0fa4c538 100644 --- a/pengine/test10/guest-node-host-dies.summary +++ b/pengine/test10/guest-node-host-dies.summary @@ -1,82 +1,82 @@ Current cluster status: Node rhel7-1 (1): UNCLEAN (offline) Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Fencing (stonith:fence_xvm): Started rhel7-4 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 ( UNCLEAN ) container1 (ocf::heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) container2 (ocf::heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) Master/Slave Set: lxc-ms-master [lxc-ms] Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Transition Summary: * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' * Fence (reboot) rhel7-1 'rsc_rhel7-1 is thought to be active there' - * Restart Fencing (Started rhel7-4) - * Move rsc_rhel7-1 (Started rhel7-1 -> rhel7-5) - * Recover container1 (Started rhel7-1 -> rhel7-2) - * Recover container2 (Started rhel7-1 -> rhel7-3) + * Restart Fencing ( rhel7-4 ) + * Move rsc_rhel7-1 ( rhel7-1 -> rhel7-5 ) + * Recover container1 ( rhel7-1 -> rhel7-2 ) + * Recover container2 ( rhel7-1 -> rhel7-3 ) * Recover lxc-ms:0 (Master lxc1) * Recover lxc-ms:1 (Slave lxc2) - * Move lxc1 (Started rhel7-1 -> rhel7-2) - * Move lxc2 (Started rhel7-1 -> rhel7-3) + * Move lxc1 ( rhel7-1 -> rhel7-2 ) + * Move lxc2 ( rhel7-1 -> rhel7-3 ) Executing cluster transition: * Resource action: Fencing stop on rhel7-4 * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on rhel7-5 * Resource action: lxc1 monitor on rhel7-4 * Resource action: lxc1 monitor on rhel7-3 * Resource action: lxc2 monitor on rhel7-5 * Resource action: lxc2 monitor on rhel7-4 * Resource action: lxc2 monitor on rhel7-2 * Fencing rhel7-1 (reboot) * Pseudo action: rsc_rhel7-1_stop_0 * Pseudo action: lxc1_stop_0 * Pseudo action: lxc2_stop_0 * Pseudo action: container1_stop_0 * Pseudo action: container2_stop_0 * Pseudo action: stonith-lxc2-reboot on lxc2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: rsc_rhel7-1 start on rhel7-5 * Resource action: container1 start on rhel7-2 * Resource action: container2 start on rhel7-3 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc1 start on rhel7-2 * Resource action: lxc2 start on rhel7-3 * Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 monitor=30000 on rhel7-2 * Resource action: lxc2 monitor=30000 on rhel7-3 * Pseudo action: all_stopped * Resource action: Fencing start on rhel7-4 * Resource action: Fencing monitor=120000 on rhel7-4 * Resource action: lxc-ms start on lxc1 * Resource action: lxc-ms start on lxc2 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc-ms monitor=10000 on lxc2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised cluster status: Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] OFFLINE: [ rhel7-1 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started rhel7-4 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-5 container1 (ocf::heartbeat:VirtualDomain): Started rhel7-2 container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] diff --git a/pengine/test10/inc11.summary b/pengine/test10/inc11.summary index 6f4d8ef7b9..08d024d34d 100644 --- a/pengine/test10/inc11.summary +++ b/pengine/test10/inc11.summary @@ -1,42 +1,41 @@ Current cluster status: Online: [ node0 node1 node2 ] simple-rsc (heartbeat:apache): Stopped Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped Transition Summary: * Start simple-rsc (node2) * Start child_rsc1:0 (node1) - * Start child_rsc1:1 (node2) - * Promote child_rsc1:1 (Stopped -> Master node2) + * Promote child_rsc1:1 ( Stopped -> Master node2 ) Executing cluster transition: * Resource action: simple-rsc monitor on node2 * Resource action: simple-rsc monitor on node1 * Resource action: simple-rsc monitor on node0 * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:0 monitor on node0 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:1 monitor on node0 * Pseudo action: rsc1_start_0 * Resource action: simple-rsc start on node2 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node0 node1 node2 ] simple-rsc (heartbeat:apache): Started node2 Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 diff --git a/pengine/test10/inc12.summary b/pengine/test10/inc12.summary index e950c6727c..cfe99c51a9 100644 --- a/pengine/test10/inc12.summary +++ b/pengine/test10/inc12.summary @@ -1,137 +1,137 @@ Current cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ] Stopped: [ c001n03 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave c001n04 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave c001n04 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave c001n05 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave c001n05 ocf_msdummy:6 (ocf::heartbeat:Stateful): Slave c001n06 ocf_msdummy:7 (ocf::heartbeat:Stateful): Slave c001n06 ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave c001n07 ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave c001n02 ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave c001n02 Transition Summary: * Shutdown c001n07 * Shutdown c001n06 * Shutdown c001n05 * Shutdown c001n04 * Shutdown c001n03 * Shutdown c001n02 * Stop ocf_192.168.100.181 (c001n02) due to node availability * Stop heartbeat_192.168.100.182 (c001n02) due to node availability * Stop ocf_192.168.100.183 (c001n02) due to node availability * Stop lsb_dummy (c001n04) * Stop rsc_c001n03 (c001n05) * Stop rsc_c001n02 (c001n02) * Stop rsc_c001n04 (c001n04) * Stop rsc_c001n05 (c001n05) * Stop rsc_c001n06 (c001n06) * Stop rsc_c001n07 (c001n07) * Stop child_DoFencing:0 (c001n02) due to node availability * Stop child_DoFencing:1 (c001n04) due to node availability * Stop child_DoFencing:2 (c001n05) due to node availability * Stop child_DoFencing:3 (c001n06) due to node availability * Stop child_DoFencing:4 (c001n07) due to node availability - * Stop ocf_msdummy:10 (c001n02) due to node availability - * Stop ocf_msdummy:11 (c001n02) due to node availability - * Stop ocf_msdummy:2 (c001n04) due to node availability - * Stop ocf_msdummy:3 (c001n04) due to node availability - * Stop ocf_msdummy:4 (c001n05) due to node availability - * Stop ocf_msdummy:5 (c001n05) due to node availability - * Stop ocf_msdummy:6 (c001n06) due to node availability - * Stop ocf_msdummy:7 (c001n06) due to node availability - * Stop ocf_msdummy:8 (c001n07) due to node availability - * Stop ocf_msdummy:9 (c001n07) due to node availability + * Stop ocf_msdummy:10 ( Slave c001n02 ) due to node availability + * Stop ocf_msdummy:11 ( Slave c001n02 ) due to node availability + * Stop ocf_msdummy:2 ( Slave c001n04 ) due to node availability + * Stop ocf_msdummy:3 ( Slave c001n04 ) due to node availability + * Stop ocf_msdummy:4 ( Slave c001n05 ) due to node availability + * Stop ocf_msdummy:5 ( Slave c001n05 ) due to node availability + * Stop ocf_msdummy:6 ( Slave c001n06 ) due to node availability + * Stop ocf_msdummy:7 ( Slave c001n06 ) due to node availability + * Stop ocf_msdummy:8 ( Slave c001n07 ) due to node availability + * Stop ocf_msdummy:9 ( Slave c001n07 ) due to node availability Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n02 * Resource action: lsb_dummy stop on c001n04 * Resource action: rsc_c001n03 stop on c001n05 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n04 stop on c001n04 * Resource action: rsc_c001n05 stop on c001n05 * Resource action: rsc_c001n06 stop on c001n06 * Resource action: rsc_c001n07 stop on c001n07 * Pseudo action: DoFencing_stop_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n02 * Resource action: child_DoFencing:1 stop on c001n02 * Resource action: child_DoFencing:2 stop on c001n04 * Resource action: child_DoFencing:3 stop on c001n05 * Resource action: child_DoFencing:4 stop on c001n06 * Resource action: child_DoFencing:5 stop on c001n07 * Pseudo action: DoFencing_stopped_0 * Resource action: ocf_msdummy:10 stop on c001n02 * Resource action: ocf_msdummy:11 stop on c001n02 * Resource action: ocf_msdummy:2 stop on c001n04 * Resource action: ocf_msdummy:3 stop on c001n04 * Resource action: ocf_msdummy:4 stop on c001n05 * Resource action: ocf_msdummy:5 stop on c001n05 * Resource action: ocf_msdummy:6 stop on c001n06 * Resource action: ocf_msdummy:7 stop on c001n06 * Resource action: ocf_msdummy:8 stop on c001n07 * Resource action: ocf_msdummy:9 stop on c001n07 * Pseudo action: master_rsc_1_stopped_0 * Cluster action: do_shutdown on c001n07 * Cluster action: do_shutdown on c001n06 * Cluster action: do_shutdown on c001n05 * Cluster action: do_shutdown on c001n04 * Resource action: ocf_192.168.100.181 stop on c001n02 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Cluster action: do_shutdown on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped heartbeat_192.168.100.182 (heartbeat:IPaddr): Stopped ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/inc2.summary b/pengine/test10/inc2.summary index 898729fd41..b3049bed5a 100644 --- a/pengine/test10/inc2.summary +++ b/pengine/test10/inc2.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node1 child_rsc1:3 (heartbeat:apache): Started node1 child_rsc1:4 (heartbeat:apache): Started node1 Transition Summary: - * Move child_rsc1:2 (Started node1 -> node2) - * Move child_rsc1:3 (Started node1 -> node2) + * Move child_rsc1:2 ( node1 -> node2 ) + * Move child_rsc1:3 ( node1 -> node2 ) * Stop child_rsc1:4 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:4 monitor on node2 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:2 stop on node1 * Resource action: child_rsc1:3 stop on node1 * Resource action: child_rsc1:4 stop on node1 * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Pseudo action: all_stopped * Resource action: child_rsc1:2 start on node2 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node2 child_rsc1:3 (heartbeat:apache): Started node2 child_rsc1:4 (heartbeat:apache): Stopped diff --git a/pengine/test10/inc3.summary b/pengine/test10/inc3.summary index 955ff3b30f..5ceab722e1 100644 --- a/pengine/test10/inc3.summary +++ b/pengine/test10/inc3.summary @@ -1,70 +1,70 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node1 child_rsc1:3 (heartbeat:apache): Started node1 child_rsc1:4 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node2 child_rsc2:4 (heartbeat:apache): Started node2 Transition Summary: - * Move child_rsc1:2 (Started node1 -> node2) - * Move child_rsc1:3 (Started node1 -> node2) + * Move child_rsc1:2 ( node1 -> node2 ) + * Move child_rsc1:3 ( node1 -> node2 ) * Stop child_rsc1:4 (node1) due to node availability - * Move child_rsc2:3 (Started node2 -> node1) - * Move child_rsc2:4 (Started node2 -> node1) + * Move child_rsc2:3 ( node2 -> node1 ) + * Move child_rsc2:4 ( node2 -> node1 ) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc2:0 monitor on node1 * Resource action: child_rsc2:1 monitor on node1 * Resource action: child_rsc2:2 monitor on node1 * Resource action: child_rsc2:3 monitor on node1 * Resource action: child_rsc2:4 monitor on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc2:3 stop on node2 * Resource action: child_rsc2:4 stop on node2 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:2 stop on node1 * Resource action: child_rsc1:3 stop on node1 * Resource action: child_rsc1:4 stop on node1 * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Pseudo action: all_stopped * Resource action: child_rsc1:2 start on node2 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc2:3 start on node1 * Resource action: child_rsc2:4 start on node1 * Pseudo action: rsc2_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node2 child_rsc1:3 (heartbeat:apache): Started node2 child_rsc1:4 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node1 child_rsc2:4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/inc4.summary b/pengine/test10/inc4.summary index e730360541..3b3f10b64c 100644 --- a/pengine/test10/inc4.summary +++ b/pengine/test10/inc4.summary @@ -1,70 +1,70 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node1 child_rsc1:3 (heartbeat:apache): Started node1 child_rsc1:4 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node2 child_rsc2:4 (heartbeat:apache): Started node2 Transition Summary: - * Move child_rsc1:2 (Started node1 -> node2) - * Move child_rsc1:3 (Started node1 -> node2) + * Move child_rsc1:2 ( node1 -> node2 ) + * Move child_rsc1:3 ( node1 -> node2 ) * Stop child_rsc1:4 (node1) due to node availability - * Move child_rsc2:3 (Started node2 -> node1) - * Move child_rsc2:4 (Started node2 -> node1) + * Move child_rsc2:3 ( node2 -> node1 ) + * Move child_rsc2:4 ( node2 -> node1 ) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc2:0 monitor on node1 * Resource action: child_rsc2:1 monitor on node1 * Resource action: child_rsc2:2 monitor on node1 * Resource action: child_rsc2:3 monitor on node1 * Resource action: child_rsc2:4 monitor on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc2:4 stop on node2 * Resource action: child_rsc2:3 stop on node2 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:4 stop on node1 * Resource action: child_rsc1:3 stop on node1 * Resource action: child_rsc1:2 stop on node1 * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Pseudo action: all_stopped * Resource action: child_rsc1:2 start on node2 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc2:3 start on node1 * Resource action: child_rsc2:4 start on node1 * Pseudo action: rsc2_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node2 child_rsc1:3 (heartbeat:apache): Started node2 child_rsc1:4 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node1 child_rsc2:4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/inc5.summary b/pengine/test10/inc5.summary index d4e8863b3c..93e0d8e3cf 100644 --- a/pengine/test10/inc5.summary +++ b/pengine/test10/inc5.summary @@ -1,138 +1,138 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node2 child_rsc1:2 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Started node1 child_rsc2:2 (heartbeat:apache): Stopped Clone Set: rsc3 [child_rsc3] (unique) child_rsc3:0 (heartbeat:apache): Started node1 child_rsc3:1 (heartbeat:apache): Started node2 child_rsc3:2 (heartbeat:apache): Stopped Clone Set: rsc4 [child_rsc4] (unique) child_rsc4:0 (heartbeat:apache): Started node1 child_rsc4:1 (heartbeat:apache): Started node1 child_rsc4:2 (heartbeat:apache): Stopped Clone Set: rsc5 [child_rsc5] (unique) child_rsc5:0 (heartbeat:apache): Started node2 child_rsc5:1 (heartbeat:apache): Started node2 child_rsc5:2 (heartbeat:apache): Stopped Clone Set: rsc6 [child_rsc6] (unique) child_rsc6:0 (heartbeat:apache): Started node1 child_rsc6:1 (heartbeat:apache): Started node2 child_rsc6:2 (heartbeat:apache): Stopped Clone Set: rsc7 [child_rsc7] (unique) child_rsc7:0 (heartbeat:apache): Started node2 child_rsc7:1 (heartbeat:apache): Started node2 child_rsc7:2 (heartbeat:apache): Stopped Clone Set: rsc8 [child_rsc8] (unique) child_rsc8:0 (heartbeat:apache): Started node1 child_rsc8:1 (heartbeat:apache): Started node2 child_rsc8:2 (heartbeat:apache): Stopped Transition Summary: - * Move child_rsc2:1 (Started node1 -> node2) - * Move child_rsc4:1 (Started node1 -> node2) - * Move child_rsc5:1 (Started node2 -> node1) - * Move child_rsc7:1 (Started node2 -> node1) + * Move child_rsc2:1 ( node1 -> node2 ) + * Move child_rsc4:1 ( node1 -> node2 ) + * Move child_rsc5:1 ( node2 -> node1 ) + * Move child_rsc7:1 ( node2 -> node1 ) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc2:0 monitor on node2 * Resource action: child_rsc2:1 monitor on node2 * Resource action: child_rsc2:2 monitor on node2 * Resource action: child_rsc2:2 monitor on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc3:0 monitor on node2 * Resource action: child_rsc3:1 monitor on node1 * Resource action: child_rsc3:2 monitor on node2 * Resource action: child_rsc3:2 monitor on node1 * Resource action: child_rsc4:0 monitor on node2 * Resource action: child_rsc4:1 monitor on node2 * Resource action: child_rsc4:2 monitor on node2 * Resource action: child_rsc4:2 monitor on node1 * Pseudo action: rsc4_stop_0 * Resource action: child_rsc5:0 monitor on node1 * Resource action: child_rsc5:1 monitor on node1 * Resource action: child_rsc5:2 monitor on node2 * Resource action: child_rsc5:2 monitor on node1 * Pseudo action: rsc5_stop_0 * Resource action: child_rsc6:0 monitor on node2 * Resource action: child_rsc6:1 monitor on node1 * Resource action: child_rsc6:2 monitor on node2 * Resource action: child_rsc6:2 monitor on node1 * Resource action: child_rsc7:0 monitor on node1 * Resource action: child_rsc7:1 monitor on node1 * Resource action: child_rsc7:2 monitor on node2 * Resource action: child_rsc7:2 monitor on node1 * Pseudo action: rsc7_stop_0 * Resource action: child_rsc8:0 monitor on node2 * Resource action: child_rsc8:1 monitor on node1 * Resource action: child_rsc8:2 monitor on node2 * Resource action: child_rsc8:2 monitor on node1 * Resource action: child_rsc2:1 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc4:1 stop on node1 * Pseudo action: rsc4_stopped_0 * Pseudo action: rsc4_start_0 * Resource action: child_rsc5:1 stop on node2 * Pseudo action: rsc5_stopped_0 * Pseudo action: rsc5_start_0 * Resource action: child_rsc7:1 stop on node2 * Pseudo action: rsc7_stopped_0 * Pseudo action: rsc7_start_0 * Pseudo action: all_stopped * Resource action: child_rsc2:1 start on node2 * Pseudo action: rsc2_running_0 * Resource action: child_rsc4:1 start on node2 * Pseudo action: rsc4_running_0 * Resource action: child_rsc5:1 start on node1 * Pseudo action: rsc5_running_0 * Resource action: child_rsc7:1 start on node1 * Pseudo action: rsc7_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node2 child_rsc1:2 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Stopped Clone Set: rsc3 [child_rsc3] (unique) child_rsc3:0 (heartbeat:apache): Started node1 child_rsc3:1 (heartbeat:apache): Started node2 child_rsc3:2 (heartbeat:apache): Stopped Clone Set: rsc4 [child_rsc4] (unique) child_rsc4:0 (heartbeat:apache): Started node1 child_rsc4:1 (heartbeat:apache): Started node2 child_rsc4:2 (heartbeat:apache): Stopped Clone Set: rsc5 [child_rsc5] (unique) child_rsc5:0 (heartbeat:apache): Started node2 child_rsc5:1 (heartbeat:apache): Started node1 child_rsc5:2 (heartbeat:apache): Stopped Clone Set: rsc6 [child_rsc6] (unique) child_rsc6:0 (heartbeat:apache): Started node1 child_rsc6:1 (heartbeat:apache): Started node2 child_rsc6:2 (heartbeat:apache): Stopped Clone Set: rsc7 [child_rsc7] (unique) child_rsc7:0 (heartbeat:apache): Started node2 child_rsc7:1 (heartbeat:apache): Started node1 child_rsc7:2 (heartbeat:apache): Stopped Clone Set: rsc8 [child_rsc8] (unique) child_rsc8:0 (heartbeat:apache): Started node1 child_rsc8:1 (heartbeat:apache): Started node2 child_rsc8:2 (heartbeat:apache): Stopped diff --git a/pengine/test10/inc6.summary b/pengine/test10/inc6.summary index 4c754d7c91..99f37026ff 100644 --- a/pengine/test10/inc6.summary +++ b/pengine/test10/inc6.summary @@ -1,100 +1,100 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] Started: [ node1 node2 ] Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Started node1 child_rsc2:2 (heartbeat:apache): Stopped Clone Set: rsc3 [child_rsc3] Started: [ node1 node2 ] Clone Set: rsc4 [child_rsc4] (unique) child_rsc4:0 (heartbeat:apache): Started node1 child_rsc4:1 (heartbeat:apache): Started node1 child_rsc4:2 (heartbeat:apache): Stopped Clone Set: rsc5 [child_rsc5] (unique) child_rsc5:0 (heartbeat:apache): Started node2 child_rsc5:1 (heartbeat:apache): Started node2 child_rsc5:2 (heartbeat:apache): Stopped Clone Set: rsc6 [child_rsc6] Started: [ node1 node2 ] Clone Set: rsc7 [child_rsc7] (unique) child_rsc7:0 (heartbeat:apache): Started node2 child_rsc7:1 (heartbeat:apache): Started node2 child_rsc7:2 (heartbeat:apache): Stopped Clone Set: rsc8 [child_rsc8] Started: [ node1 node2 ] Transition Summary: - * Move child_rsc2:1 (Started node1 -> node2) - * Move child_rsc4:1 (Started node1 -> node2) - * Move child_rsc5:1 (Started node2 -> node1) - * Restart child_rsc6:0 (Started node1) due to required rsc5 running - * Restart child_rsc6:1 (Started node2) due to required rsc5 running - * Move child_rsc7:1 (Started node2 -> node1) + * Move child_rsc2:1 ( node1 -> node2 ) + * Move child_rsc4:1 ( node1 -> node2 ) + * Move child_rsc5:1 ( node2 -> node1 ) + * Restart child_rsc6:0 ( node1 ) due to required rsc5 running + * Restart child_rsc6:1 ( node2 ) due to required rsc5 running + * Move child_rsc7:1 ( node2 -> node1 ) Executing cluster transition: * Pseudo action: rsc2_stop_0 * Pseudo action: rsc4_stop_0 * Pseudo action: rsc6_stop_0 * Pseudo action: rsc7_stop_0 * Resource action: child_rsc2:1 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc4:1 stop on node1 * Pseudo action: rsc4_stopped_0 * Pseudo action: rsc4_start_0 * Resource action: child_rsc6:0 stop on node1 * Resource action: child_rsc6:1 stop on node2 * Pseudo action: rsc6_stopped_0 * Resource action: child_rsc7:1 stop on node2 * Pseudo action: rsc7_stopped_0 * Pseudo action: rsc7_start_0 * Resource action: child_rsc2:1 start on node2 * Pseudo action: rsc2_running_0 * Resource action: child_rsc4:1 start on node2 * Pseudo action: rsc4_running_0 * Pseudo action: rsc5_stop_0 * Resource action: child_rsc7:1 start on node1 * Pseudo action: rsc7_running_0 * Resource action: child_rsc5:1 stop on node2 * Pseudo action: rsc5_stopped_0 * Pseudo action: rsc5_start_0 * Pseudo action: all_stopped * Resource action: child_rsc5:1 start on node1 * Pseudo action: rsc5_running_0 * Pseudo action: rsc6_start_0 * Resource action: child_rsc6:0 start on node1 * Resource action: child_rsc6:1 start on node2 * Pseudo action: rsc6_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] Started: [ node1 node2 ] Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Started [ node1 node2 ] child_rsc2:2 (heartbeat:apache): Stopped Clone Set: rsc3 [child_rsc3] Started: [ node1 node2 ] Clone Set: rsc4 [child_rsc4] (unique) child_rsc4:0 (heartbeat:apache): Started node1 child_rsc4:1 (heartbeat:apache): Started [ node1 node2 ] child_rsc4:2 (heartbeat:apache): Stopped Clone Set: rsc5 [child_rsc5] (unique) child_rsc5:0 (heartbeat:apache): Started node2 child_rsc5:1 (heartbeat:apache): Started node1 child_rsc5:2 (heartbeat:apache): Stopped Clone Set: rsc6 [child_rsc6] Started: [ node1 node2 ] Clone Set: rsc7 [child_rsc7] (unique) child_rsc7:0 (heartbeat:apache): Started node2 child_rsc7:1 (heartbeat:apache): Started node1 child_rsc7:2 (heartbeat:apache): Stopped Clone Set: rsc8 [child_rsc8] Started: [ node1 node2 ] diff --git a/pengine/test10/interleave-restart.summary b/pengine/test10/interleave-restart.summary index 5ac19b9c98..c151581a0d 100644 --- a/pengine/test10/interleave-restart.summary +++ b/pengine/test10/interleave-restart.summary @@ -1,96 +1,96 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] evmsclone (ocf::heartbeat:EvmsSCC): FAILED node1 Started: [ node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] Transition Summary: - * Recover evmsclone:1 (Started node1) - * Restart imagestoreclone:1 (Started node1) due to required evmsclone:1 start - * Restart configstoreclone:1 (Started node1) due to required evmsclone:1 start + * Recover evmsclone:1 ( node1 ) + * Restart imagestoreclone:1 ( node1 ) due to required evmsclone:1 start + * Restart configstoreclone:1 ( node1 ) due to required evmsclone:1 start Executing cluster transition: * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Resource action: evmsclone:1 notify on node2 * Resource action: evmsclone:0 notify on node1 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:1 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Resource action: imagestoreclone:0 stop on node1 * Pseudo action: imagestorecloneset_stopped_0 * Resource action: configstoreclone:0 stop on node1 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: imagestorecloneset_pre_notify_start_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: configstorecloneset_pre_notify_start_0 * Pseudo action: evmscloneset_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 * Resource action: evmsclone:0 stop on node1 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_pre_notify_start_0 * Pseudo action: all_stopped * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 * Pseudo action: evmscloneset_start_0 * Resource action: evmsclone:0 start on node1 * Pseudo action: evmscloneset_running_0 * Pseudo action: evmscloneset_post_notify_running_0 * Resource action: evmsclone:1 notify on node2 * Resource action: evmsclone:0 notify on node1 * Pseudo action: evmscloneset_confirmed-post_notify_running_0 * Pseudo action: imagestorecloneset_start_0 * Pseudo action: configstorecloneset_start_0 * Resource action: imagestoreclone:0 start on node1 * Pseudo action: imagestorecloneset_running_0 * Resource action: configstoreclone:0 start on node1 * Pseudo action: configstorecloneset_running_0 * Pseudo action: imagestorecloneset_post_notify_running_0 * Pseudo action: configstorecloneset_post_notify_running_0 * Resource action: imagestoreclone:1 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 * Resource action: configstoreclone:1 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 * Resource action: imagestoreclone:0 monitor=20000 on node1 * Resource action: configstoreclone:0 monitor=20000 on node1 Revised cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] diff --git a/pengine/test10/isolation-restart-all.summary b/pengine/test10/isolation-restart-all.summary index a2939f104c..595c254f0f 100644 --- a/pengine/test10/isolation-restart-all.summary +++ b/pengine/test10/isolation-restart-all.summary @@ -1,118 +1,118 @@ Current cluster status: Online: [ rhel7-auto5 ] OFFLINE: [ rhel7-auto4 ] fake (ocf::heartbeat:Dummy): Started rhel7-auto5 Clone Set: replicated-clone [replicated] (unique) replicated:0 (ocf::heartbeat:Dummy): Started rhel7-auto5 replicated:1 (ocf::heartbeat:Dummy): Started rhel7-auto5 Resource Group: group_is_container s1 (ocf::heartbeat:Dummy): Started rhel7-auto5 s2 (ocf::heartbeat:Dummy): Started rhel7-auto5 Clone Set: mygroup-clone [mygroup] (unique) Resource Group: mygroup:0 g1:0 (ocf::heartbeat:Dummy): Started rhel7-auto5 g2:0 (ocf::heartbeat:Dummy): Started rhel7-auto5 Resource Group: mygroup:1 g1:1 (ocf::heartbeat:Dummy): Started rhel7-auto5 g2:1 (ocf::heartbeat:Dummy): Started rhel7-auto5 Resource Group: group_of_containers iso_mem1 (ocf::heartbeat:Dummy): Started rhel7-auto5 iso_mem2 (ocf::heartbeat:Dummy): Started rhel7-auto5 Transition Summary: - * Restart fake (Started rhel7-auto5) - * Restart replicated:0 (Started rhel7-auto5) - * Restart replicated:1 (Started rhel7-auto5) - * Restart s1 (Started rhel7-auto5) - * Restart s2 (Started rhel7-auto5) - * Restart g1:0 (Started rhel7-auto5) - * Restart g2:0 (Started rhel7-auto5) - * Restart g1:1 (Started rhel7-auto5) - * Restart g2:1 (Started rhel7-auto5) - * Restart iso_mem1 (Started rhel7-auto5) - * Restart iso_mem2 (Started rhel7-auto5) + * Restart fake ( rhel7-auto5 ) + * Restart replicated:0 ( rhel7-auto5 ) + * Restart replicated:1 ( rhel7-auto5 ) + * Restart s1 ( rhel7-auto5 ) + * Restart s2 ( rhel7-auto5 ) + * Restart g1:0 ( rhel7-auto5 ) + * Restart g2:0 ( rhel7-auto5 ) + * Restart g1:1 ( rhel7-auto5 ) + * Restart g2:1 ( rhel7-auto5 ) + * Restart iso_mem1 ( rhel7-auto5 ) + * Restart iso_mem2 ( rhel7-auto5 ) Executing cluster transition: * Resource action: fake stop on rhel7-auto5 * Resource action: fake start on rhel7-auto5 * Resource action: fake monitor=60000 on rhel7-auto5 * Pseudo action: replicated-clone_stop_0 * Pseudo action: group_is_container_stop_0 * Resource action: s2 stop on rhel7-auto5 * Pseudo action: mygroup-clone_stop_0 * Pseudo action: group_of_containers_stop_0 * Resource action: iso_mem2 stop on rhel7-auto5 * Resource action: replicated:0 stop on rhel7-auto5 * Resource action: replicated:1 stop on rhel7-auto5 * Pseudo action: replicated-clone_stopped_0 * Pseudo action: replicated-clone_start_0 * Resource action: s1 stop on rhel7-auto5 * Pseudo action: mygroup:0_stop_0 * Resource action: g2:0 stop on rhel7-auto5 * Pseudo action: mygroup:1_stop_0 * Resource action: g2:1 stop on rhel7-auto5 * Resource action: iso_mem1 stop on rhel7-auto5 * Resource action: replicated:0 start on rhel7-auto5 * Resource action: replicated:0 monitor=10000 on rhel7-auto5 * Resource action: replicated:1 start on rhel7-auto5 * Resource action: replicated:1 monitor=10000 on rhel7-auto5 * Pseudo action: replicated-clone_running_0 * Pseudo action: group_is_container_stopped_0 * Pseudo action: group_is_container_start_0 * Resource action: s1 start on rhel7-auto5 * Resource action: s1 monitor=10000 on rhel7-auto5 * Resource action: s2 start on rhel7-auto5 * Resource action: s2 monitor=10000 on rhel7-auto5 * Resource action: g1:0 stop on rhel7-auto5 * Resource action: g1:1 stop on rhel7-auto5 * Pseudo action: group_of_containers_stopped_0 * Pseudo action: group_of_containers_start_0 * Resource action: iso_mem1 start on rhel7-auto5 * Resource action: iso_mem1 monitor=60000 on rhel7-auto5 * Resource action: iso_mem2 start on rhel7-auto5 * Resource action: iso_mem2 monitor=60000 on rhel7-auto5 * Pseudo action: all_stopped * Pseudo action: group_is_container_running_0 * Pseudo action: mygroup:0_stopped_0 * Pseudo action: mygroup:1_stopped_0 * Pseudo action: mygroup-clone_stopped_0 * Pseudo action: mygroup-clone_start_0 * Pseudo action: group_of_containers_running_0 * Pseudo action: mygroup:0_start_0 * Resource action: g1:0 start on rhel7-auto5 * Resource action: g1:0 monitor=10000 on rhel7-auto5 * Resource action: g2:0 start on rhel7-auto5 * Resource action: g2:0 monitor=10000 on rhel7-auto5 * Pseudo action: mygroup:1_start_0 * Resource action: g1:1 start on rhel7-auto5 * Resource action: g1:1 monitor=10000 on rhel7-auto5 * Resource action: g2:1 start on rhel7-auto5 * Resource action: g2:1 monitor=10000 on rhel7-auto5 * Pseudo action: mygroup:0_running_0 * Pseudo action: mygroup:1_running_0 * Pseudo action: mygroup-clone_running_0 Revised cluster status: Online: [ rhel7-auto5 ] OFFLINE: [ rhel7-auto4 ] fake (ocf::heartbeat:Dummy): Started rhel7-auto5 Clone Set: replicated-clone [replicated] (unique) replicated:0 (ocf::heartbeat:Dummy): Started rhel7-auto5 replicated:1 (ocf::heartbeat:Dummy): Started rhel7-auto5 Resource Group: group_is_container s1 (ocf::heartbeat:Dummy): Started rhel7-auto5 s2 (ocf::heartbeat:Dummy): Started rhel7-auto5 Clone Set: mygroup-clone [mygroup] (unique) Resource Group: mygroup:0 g1:0 (ocf::heartbeat:Dummy): Started rhel7-auto5 g2:0 (ocf::heartbeat:Dummy): Started rhel7-auto5 Resource Group: mygroup:1 g1:1 (ocf::heartbeat:Dummy): Started rhel7-auto5 g2:1 (ocf::heartbeat:Dummy): Started rhel7-auto5 Resource Group: group_of_containers iso_mem1 (ocf::heartbeat:Dummy): Started rhel7-auto5 iso_mem2 (ocf::heartbeat:Dummy): Started rhel7-auto5 diff --git a/pengine/test10/load-stopped-loop-2.summary b/pengine/test10/load-stopped-loop-2.summary index 5da41e4f9c..d4fbf148ef 100644 --- a/pengine/test10/load-stopped-loop-2.summary +++ b/pengine/test10/load-stopped-loop-2.summary @@ -1,112 +1,112 @@ 4 of 25 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ xfc0 xfc1 xfc2 xfc3 ] Clone Set: cl_glusterd [p_glusterd] Started: [ xfc0 xfc1 xfc2 xfc3 ] Clone Set: cl_p_bl_glusterfs [p_bl_glusterfs] Started: [ xfc0 xfc1 xfc2 xfc3 ] xu-test8 (ocf::heartbeat:Xen): Started xfc3 xu-test1 (ocf::heartbeat:Xen): Started xfc3 xu-test10 (ocf::heartbeat:Xen): Started xfc3 xu-test11 (ocf::heartbeat:Xen): Started xfc3 xu-test12 (ocf::heartbeat:Xen): Started xfc2 xu-test13 (ocf::heartbeat:Xen): Stopped xu-test14 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test15 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test16 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test17 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test2 (ocf::heartbeat:Xen): Started xfc3 xu-test3 (ocf::heartbeat:Xen): Started xfc1 xu-test4 (ocf::heartbeat:Xen): Started xfc0 xu-test5 (ocf::heartbeat:Xen): Started xfc2 xu-test6 (ocf::heartbeat:Xen): Started xfc3 xu-test7 (ocf::heartbeat:Xen): Started xfc1 xu-test9 (ocf::heartbeat:Xen): Started xfc0 Transition Summary: - * Migrate xu-test12 (Started xfc2 -> xfc3) - * Migrate xu-test2 (Started xfc3 -> xfc1) - * Migrate xu-test3 (Started xfc1 -> xfc0) - * Migrate xu-test4 (Started xfc0 -> xfc2) - * Migrate xu-test5 (Started xfc2 -> xfc3) - * Migrate xu-test6 (Started xfc3 -> xfc1) - * Migrate xu-test7 (Started xfc1 -> xfc0) - * Migrate xu-test9 (Started xfc0 -> xfc2) + * Migrate xu-test12 ( xfc2 -> xfc3 ) + * Migrate xu-test2 ( xfc3 -> xfc1 ) + * Migrate xu-test3 ( xfc1 -> xfc0 ) + * Migrate xu-test4 ( xfc0 -> xfc2 ) + * Migrate xu-test5 ( xfc2 -> xfc3 ) + * Migrate xu-test6 ( xfc3 -> xfc1 ) + * Migrate xu-test7 ( xfc1 -> xfc0 ) + * Migrate xu-test9 ( xfc0 -> xfc2 ) * Start xu-test13 (xfc3) Executing cluster transition: * Resource action: xu-test12 migrate_to on xfc2 * Resource action: xu-test2 migrate_to on xfc3 * Resource action: xu-test3 migrate_to on xfc1 * Resource action: xu-test4 migrate_to on xfc0 * Resource action: xu-test5 migrate_to on xfc2 * Resource action: xu-test12 migrate_from on xfc3 * Resource action: xu-test12 stop on xfc2 * Resource action: xu-test2 migrate_from on xfc1 * Resource action: xu-test2 stop on xfc3 * Resource action: xu-test3 migrate_from on xfc0 * Resource action: xu-test3 stop on xfc1 * Resource action: xu-test4 migrate_from on xfc2 * Resource action: xu-test4 stop on xfc0 * Resource action: xu-test5 migrate_from on xfc3 * Resource action: xu-test5 stop on xfc2 * Pseudo action: load_stopped_xfc2 * Pseudo action: xu-test4_start_0 * Resource action: xu-test9 migrate_to on xfc0 * Resource action: xu-test4 monitor=10000 on xfc2 * Resource action: xu-test9 migrate_from on xfc2 * Resource action: xu-test9 stop on xfc0 * Pseudo action: load_stopped_xfc0 * Pseudo action: xu-test3_start_0 * Resource action: xu-test7 migrate_to on xfc1 * Pseudo action: xu-test9_start_0 * Resource action: xu-test3 monitor=10000 on xfc0 * Resource action: xu-test7 migrate_from on xfc0 * Resource action: xu-test7 stop on xfc1 * Resource action: xu-test9 monitor=10000 on xfc2 * Pseudo action: load_stopped_xfc1 * Pseudo action: xu-test2_start_0 * Resource action: xu-test6 migrate_to on xfc3 * Pseudo action: xu-test7_start_0 * Resource action: xu-test2 monitor=10000 on xfc1 * Resource action: xu-test6 migrate_from on xfc1 * Resource action: xu-test6 stop on xfc3 * Resource action: xu-test7 monitor=10000 on xfc0 * Pseudo action: load_stopped_xfc3 * Pseudo action: all_stopped * Pseudo action: xu-test12_start_0 * Pseudo action: xu-test5_start_0 * Pseudo action: xu-test6_start_0 * Resource action: xu-test13 start on xfc3 * Resource action: xu-test12 monitor=10000 on xfc3 * Resource action: xu-test5 monitor=10000 on xfc3 * Resource action: xu-test6 monitor=10000 on xfc1 * Resource action: xu-test13 monitor=10000 on xfc3 Revised cluster status: Online: [ xfc0 xfc1 xfc2 xfc3 ] Clone Set: cl_glusterd [p_glusterd] Started: [ xfc0 xfc1 xfc2 xfc3 ] Clone Set: cl_p_bl_glusterfs [p_bl_glusterfs] Started: [ xfc0 xfc1 xfc2 xfc3 ] xu-test8 (ocf::heartbeat:Xen): Started xfc3 xu-test1 (ocf::heartbeat:Xen): Started xfc3 xu-test10 (ocf::heartbeat:Xen): Started xfc3 xu-test11 (ocf::heartbeat:Xen): Started xfc3 xu-test12 (ocf::heartbeat:Xen): Started xfc3 xu-test13 (ocf::heartbeat:Xen): Started xfc3 xu-test14 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test15 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test16 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test17 (ocf::heartbeat:Xen): Stopped ( disabled ) xu-test2 (ocf::heartbeat:Xen): Started xfc1 xu-test3 (ocf::heartbeat:Xen): Started xfc0 xu-test4 (ocf::heartbeat:Xen): Started xfc2 xu-test5 (ocf::heartbeat:Xen): Started xfc3 xu-test6 (ocf::heartbeat:Xen): Started xfc1 xu-test7 (ocf::heartbeat:Xen): Started xfc0 xu-test9 (ocf::heartbeat:Xen): Started xfc2 diff --git a/pengine/test10/load-stopped-loop.summary b/pengine/test10/load-stopped-loop.summary index 2ff5bae3a4..bc5a0f91cb 100644 --- a/pengine/test10/load-stopped-loop.summary +++ b/pengine/test10/load-stopped-loop.summary @@ -1,335 +1,335 @@ 32 of 308 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ mgmt01 v03-a v03-b ] stonith-v02-a (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-b (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-d (stonith:fence_ipmilan): Stopped ( disabled ) stonith-mgmt01 (stonith:fence_xvm): Started v03-b stonith-mgmt02 (stonith:meatware): Started mgmt01 stonith-v03-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v03-a (stonith:fence_ipmilan): Started v03-b stonith-v03-b (stonith:fence_ipmilan): Started v03-a stonith-v03-d (stonith:fence_ipmilan): Stopped ( disabled ) Clone Set: cl-clvmd [clvmd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-dlm [dlm] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-iscsid [iscsid] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirtd [libvirtd] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-multipathd [multipathd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-node-params [node-params] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan1-if [vlan1-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan101-if [vlan101-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan102-if [vlan102-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan103-if [vlan103-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan104-if [vlan104-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan3-if [vlan3-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan4-if [vlan4-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan5-if [vlan5-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan900-if [vlan900-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan909-if [vlan909-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-libvirt-images-fs [libvirt-images-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-install-fs [libvirt-install-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-images-pool [libvirt-images-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Clone Set: cl-vlan200-if [vlan200-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b Clone Set: cl-mcast-test-net [mcast-test-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-libvirt-qpid [libvirt-qpid] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-gleb-net [mcast-gleb-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Transition Summary: - * Reload vds-ok-pool-0-iscsi:0 (Started mgmt01) - * Reload vds-ok-pool-0-iscsi:1 (Started v03-b) - * Reload vds-ok-pool-0-iscsi:2 (Started v03-a) - * Reload vds-ok-pool-1-iscsi:0 (Started mgmt01) - * Reload vds-ok-pool-1-iscsi:1 (Started v03-b) - * Reload vds-ok-pool-1-iscsi:2 (Started v03-a) - * Restart stonith-v03-b (Started v03-a) - * Restart stonith-v03-a (Started v03-b) - * Migrate license.anbriz.vds-ok.com-vm (Started v03-b -> v03-a) - * Migrate terminal0.anbriz.vds-ok.com-vm (Started v03-a -> v03-b) + * Reload vds-ok-pool-0-iscsi:0 ( mgmt01 ) + * Reload vds-ok-pool-0-iscsi:1 ( v03-b ) + * Reload vds-ok-pool-0-iscsi:2 ( v03-a ) + * Reload vds-ok-pool-1-iscsi:0 ( mgmt01 ) + * Reload vds-ok-pool-1-iscsi:1 ( v03-b ) + * Reload vds-ok-pool-1-iscsi:2 ( v03-a ) + * Restart stonith-v03-b ( v03-a ) + * Restart stonith-v03-a ( v03-b ) + * Migrate license.anbriz.vds-ok.com-vm ( v03-b -> v03-a ) + * Migrate terminal0.anbriz.vds-ok.com-vm ( v03-a -> v03-b ) * Start vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (v03-a) Executing cluster transition: * Resource action: vds-ok-pool-0-iscsi:1 reload on mgmt01 * Resource action: vds-ok-pool-0-iscsi:1 monitor=30000 on mgmt01 * Resource action: vds-ok-pool-0-iscsi:0 reload on v03-b * Resource action: vds-ok-pool-0-iscsi:0 monitor=30000 on v03-b * Resource action: vds-ok-pool-0-iscsi:2 reload on v03-a * Resource action: vds-ok-pool-0-iscsi:2 monitor=30000 on v03-a * Resource action: vds-ok-pool-1-iscsi:1 reload on mgmt01 * Resource action: vds-ok-pool-1-iscsi:1 monitor=30000 on mgmt01 * Resource action: vds-ok-pool-1-iscsi:0 reload on v03-b * Resource action: vds-ok-pool-1-iscsi:0 monitor=30000 on v03-b * Resource action: vds-ok-pool-1-iscsi:2 reload on v03-a * Resource action: vds-ok-pool-1-iscsi:2 monitor=30000 on v03-a * Resource action: stonith-v03-b stop on v03-a * Resource action: stonith-v03-b start on v03-a * Resource action: stonith-v03-b monitor=60000 on v03-a * Resource action: stonith-v03-a stop on v03-b * Resource action: stonith-v03-a start on v03-b * Resource action: stonith-v03-a monitor=60000 on v03-b * Resource action: license.anbriz.vds-ok.com-vm migrate_to on v03-b * Pseudo action: load_stopped_mgmt01 * Resource action: license.anbriz.vds-ok.com-vm migrate_from on v03-a * Resource action: license.anbriz.vds-ok.com-vm stop on v03-b * Pseudo action: load_stopped_v03-b * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_from on v03-b * Resource action: terminal0.anbriz.vds-ok.com-vm stop on v03-a * Pseudo action: load_stopped_v03-a * Pseudo action: all_stopped * Pseudo action: license.anbriz.vds-ok.com-vm_start_0 * Pseudo action: terminal0.anbriz.vds-ok.com-vm_start_0 * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm start on v03-a * Resource action: license.anbriz.vds-ok.com-vm monitor=10000 on v03-a * Resource action: terminal0.anbriz.vds-ok.com-vm monitor=10000 on v03-b * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-a Revised cluster status: Online: [ mgmt01 v03-a v03-b ] stonith-v02-a (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-b (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-d (stonith:fence_ipmilan): Stopped ( disabled ) stonith-mgmt01 (stonith:fence_xvm): Started v03-b stonith-mgmt02 (stonith:meatware): Started mgmt01 stonith-v03-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v03-a (stonith:fence_ipmilan): Started v03-b stonith-v03-b (stonith:fence_ipmilan): Started v03-a stonith-v03-d (stonith:fence_ipmilan): Stopped ( disabled ) Clone Set: cl-clvmd [clvmd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-dlm [dlm] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-iscsid [iscsid] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirtd [libvirtd] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-multipathd [multipathd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-node-params [node-params] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan1-if [vlan1-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan101-if [vlan101-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan102-if [vlan102-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan103-if [vlan103-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan104-if [vlan104-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan3-if [vlan3-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan4-if [vlan4-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan5-if [vlan5-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan900-if [vlan900-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan909-if [vlan909-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-libvirt-images-fs [libvirt-images-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-install-fs [libvirt-install-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-images-pool [libvirt-images-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Clone Set: cl-vlan200-if [vlan200-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b Clone Set: cl-mcast-test-net [mcast-test-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-libvirt-qpid [libvirt-qpid] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b Clone Set: cl-mcast-gleb-net [mcast-gleb-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] diff --git a/pengine/test10/master-1.summary b/pengine/test10/master-1.summary index a45943c376..b0e502585a 100644 --- a/pengine/test10/master-1.summary +++ b/pengine/test10/master-1.summary @@ -1,49 +1,48 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) - * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 child_rsc1:2 (ocf::heartbeat:apache): Slave node1 child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-10.summary b/pengine/test10/master-10.summary index cd0efc3123..d736f7c97d 100644 --- a/pengine/test10/master-10.summary +++ b/pengine/test10/master-10.summary @@ -1,73 +1,72 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: - * Start child_rsc1:0 (node1) * Promote child_rsc1:0 (Stopped -> Master node1) * Start child_rsc1:1 (node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc1_pre_notify_promote_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_promote_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:0 promote on node1 * Pseudo action: rsc1_promoted_0 * Pseudo action: rsc1_post_notify_promoted_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_promoted_0 * Resource action: child_rsc1:0 monitor=11000 on node1 * Resource action: child_rsc1:1 monitor=1000 on node2 * Resource action: child_rsc1:2 monitor=1000 on node1 * Resource action: child_rsc1:3 monitor=1000 on node2 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Master node1 child_rsc1:1 (ocf::heartbeat:apache): Slave node2 child_rsc1:2 (ocf::heartbeat:apache): Slave node1 child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-11.summary b/pengine/test10/master-11.summary index a13760f950..b88e5beee2 100644 --- a/pengine/test10/master-11.summary +++ b/pengine/test10/master-11.summary @@ -1,39 +1,38 @@ Current cluster status: Online: [ node1 node2 ] simple-rsc (heartbeat:apache): Stopped Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped Transition Summary: * Start simple-rsc (node2) * Start child_rsc1:0 (node1) - * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) Executing cluster transition: * Resource action: simple-rsc monitor on node2 * Resource action: simple-rsc monitor on node1 * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: simple-rsc start on node2 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node1 node2 ] simple-rsc (heartbeat:apache): Started node2 Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 diff --git a/pengine/test10/master-2.summary b/pengine/test10/master-2.summary index b8f5447c7f..391bf83feb 100644 --- a/pengine/test10/master-2.summary +++ b/pengine/test10/master-2.summary @@ -1,69 +1,68 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: - * Start child_rsc1:0 (node1) * Promote child_rsc1:0 (Stopped -> Master node1) * Start child_rsc1:1 (node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc1_pre_notify_promote_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_promote_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:0 promote on node1 * Pseudo action: rsc1_promoted_0 * Pseudo action: rsc1_post_notify_promoted_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_promoted_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Master node1 child_rsc1:1 (ocf::heartbeat:apache): Slave node2 child_rsc1:2 (ocf::heartbeat:apache): Slave node1 child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-3.summary b/pengine/test10/master-3.summary index a45943c376..b0e502585a 100644 --- a/pengine/test10/master-3.summary +++ b/pengine/test10/master-3.summary @@ -1,49 +1,48 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Stopped child_rsc1:1 (ocf::heartbeat:apache): Stopped child_rsc1:2 (ocf::heartbeat:apache): Stopped child_rsc1:3 (ocf::heartbeat:apache): Stopped child_rsc1:4 (ocf::heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 (node1) - * Start child_rsc1:1 (node2) * Promote child_rsc1:1 (Stopped -> Master node2) * Start child_rsc1:2 (node1) * Start child_rsc1:3 (node2) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (ocf::heartbeat:apache): Slave node1 child_rsc1:1 (ocf::heartbeat:apache): Master node2 child_rsc1:2 (ocf::heartbeat:apache): Slave node1 child_rsc1:3 (ocf::heartbeat:apache): Slave node2 child_rsc1:4 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/master-7.summary b/pengine/test10/master-7.summary index 58ef2758f5..105966faed 100644 --- a/pengine/test10/master-7.summary +++ b/pengine/test10/master-7.summary @@ -1,121 +1,121 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 ( UNCLEAN ) ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: * Fence (reboot) c001n01 'peer is no longer part of the cluster' - * Move DcIPaddr (Started c001n01 -> c001n03) - * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) - * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) - * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) - * Move lsb_dummy (Started c001n02 -> c001n08) - * Move rsc_c001n01 (Started c001n01 -> c001n03) + * Move DcIPaddr ( c001n01 -> c001n03 ) + * Move ocf_192.168.100.181 ( c001n03 -> c001n02 ) + * Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 ) + * Move ocf_192.168.100.183 ( c001n03 -> c001n02 ) + * Move lsb_dummy ( c001n02 -> c001n08 ) + * Move rsc_c001n01 ( c001n01 -> c001n03 ) * Stop child_DoFencing:0 (c001n01) due to node availability - * Demote ocf_msdummy:0 (Master -> Stopped c001n01) - * Stop ocf_msdummy:4 (c001n01) due to node availability + * Stop ocf_msdummy:0 ( Master c001n01 ) due to node availability + * Stop ocf_msdummy:4 ( Slave c001n01 ) due to node availability Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: ocf_msdummy:4_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-8.summary b/pengine/test10/master-8.summary index c18e8848a1..5792e16abf 100644 --- a/pengine/test10/master-8.summary +++ b/pengine/test10/master-8.summary @@ -1,125 +1,124 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: * Fence (reboot) c001n01 'peer is no longer part of the cluster' - * Move DcIPaddr (Started c001n01 -> c001n03) - * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) - * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) - * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) - * Move lsb_dummy (Started c001n02 -> c001n08) - * Move rsc_c001n01 (Started c001n01 -> c001n03) + * Move DcIPaddr ( c001n01 -> c001n03 ) + * Move ocf_192.168.100.181 ( c001n03 -> c001n02 ) + * Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 ) + * Move ocf_192.168.100.183 ( c001n03 -> c001n02 ) + * Move lsb_dummy ( c001n02 -> c001n08 ) + * Move rsc_c001n01 ( c001n01 -> c001n03 ) * Stop child_DoFencing:0 (c001n01) due to node availability - * Demote ocf_msdummy:0 (Master -> Slave c001n01 - blocked) - * Move ocf_msdummy:0 (Slave c001n01 -> c001n03) + * Move ocf_msdummy:0 ( Master c001n01 -> Slave c001n03 ) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Resource action: ocf_msdummy:0 start on c001n03 * Pseudo action: master_rsc_1_running_0 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 * Resource action: ocf_msdummy:0 monitor=5000 on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-9.summary b/pengine/test10/master-9.summary index ef596022e7..55ccf507e1 100644 --- a/pengine/test10/master-9.summary +++ b/pengine/test10/master-9.summary @@ -1,100 +1,100 @@ Current cluster status: Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline) Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline) Online: [ ibm1 va1 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_127.0.0.11 (ocf::heartbeat:IPaddr): Stopped heartbeat_127.0.0.12 (heartbeat:IPaddr): Stopped ocf_127.0.0.13 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped rsc_ibm1 (ocf::heartbeat:IPaddr): Stopped rsc_va1 (ocf::heartbeat:IPaddr): Stopped rsc_test02 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started va1 child_DoFencing:1 (stonith:ssh): Started ibm1 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped Transition Summary: * Shutdown ibm1 - * Start DcIPaddr (va1 - blocked) due to no quorum - * Start ocf_127.0.0.11 (va1 - blocked) due to no quorum - * Start heartbeat_127.0.0.12 (va1 - blocked) due to no quorum - * Start ocf_127.0.0.13 (va1 - blocked) due to no quorum - * Start lsb_dummy (va1 - blocked) due to no quorum - * Start rsc_sgi2 (va1 - blocked) due to no quorum - * Start rsc_ibm1 (va1 - blocked) due to no quorum - * Start rsc_va1 (va1 - blocked) due to no quorum - * Start rsc_test02 (va1 - blocked) due to no quorum + * Start DcIPaddr ( va1 ) due to no quorum (blocked) + * Start ocf_127.0.0.11 ( va1 ) due to no quorum (blocked) + * Start heartbeat_127.0.0.12 ( va1 ) due to no quorum (blocked) + * Start ocf_127.0.0.13 ( va1 ) due to no quorum (blocked) + * Start lsb_dummy ( va1 ) due to no quorum (blocked) + * Start rsc_sgi2 ( va1 ) due to no quorum (blocked) + * Start rsc_ibm1 ( va1 ) due to no quorum (blocked) + * Start rsc_va1 ( va1 ) due to no quorum (blocked) + * Start rsc_test02 ( va1 ) due to no quorum (blocked) * Stop child_DoFencing:1 (ibm1) due to node availability - * Start ocf_msdummy:0 (va1 - blocked) due to no quorum - * Start ocf_msdummy:1 (va1 - blocked) due to no quorum + * Promote ocf_msdummy:0 ( Stopped -> Master va1 ) due to node availability (blocked) + * Start ocf_msdummy:1 ( va1 ) due to no quorum (blocked) Executing cluster transition: * Resource action: child_DoFencing:1 monitor on va1 * Resource action: child_DoFencing:2 monitor on va1 * Resource action: child_DoFencing:2 monitor on ibm1 * Resource action: child_DoFencing:3 monitor on va1 * Resource action: child_DoFencing:3 monitor on ibm1 * Pseudo action: DoFencing_stop_0 * Resource action: ocf_msdummy:2 monitor on va1 * Resource action: ocf_msdummy:2 monitor on ibm1 * Resource action: ocf_msdummy:3 monitor on va1 * Resource action: ocf_msdummy:3 monitor on ibm1 * Resource action: ocf_msdummy:4 monitor on va1 * Resource action: ocf_msdummy:4 monitor on ibm1 * Resource action: ocf_msdummy:5 monitor on va1 * Resource action: ocf_msdummy:5 monitor on ibm1 * Resource action: ocf_msdummy:6 monitor on va1 * Resource action: ocf_msdummy:6 monitor on ibm1 * Resource action: ocf_msdummy:7 monitor on va1 * Resource action: ocf_msdummy:7 monitor on ibm1 * Resource action: child_DoFencing:1 stop on ibm1 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on ibm1 * Pseudo action: all_stopped Revised cluster status: Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline) Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline) Online: [ ibm1 va1 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_127.0.0.11 (ocf::heartbeat:IPaddr): Stopped heartbeat_127.0.0.12 (heartbeat:IPaddr): Stopped ocf_127.0.0.13 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped rsc_ibm1 (ocf::heartbeat:IPaddr): Stopped rsc_va1 (ocf::heartbeat:IPaddr): Stopped rsc_test02 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started va1 child_DoFencing:1 (stonith:ssh): Stopped child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped diff --git a/pengine/test10/master-asymmetrical-order.summary b/pengine/test10/master-asymmetrical-order.summary index d09f62e342..50f717e411 100644 --- a/pengine/test10/master-asymmetrical-order.summary +++ b/pengine/test10/master-asymmetrical-order.summary @@ -1,35 +1,35 @@ 2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Master/Slave Set: ms2 [rsc2] Masters: [ node2 ] Slaves: [ node1 ] Transition Summary: - * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) due to node availability + * Stop rsc1:0 ( Master node1 ) due to node availability + * Stop rsc1:1 ( Slave node2 ) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:0 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:0 stop on node1 * Resource action: rsc1:1 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Stopped (disabled): [ node1 node2 ] Master/Slave Set: ms2 [rsc2] Masters: [ node2 ] Slaves: [ node1 ] diff --git a/pengine/test10/master-demote.summary b/pengine/test10/master-demote.summary index 678e20c0a5..b50fb90d2b 100644 --- a/pengine/test10/master-demote.summary +++ b/pengine/test10/master-demote.summary @@ -1,69 +1,69 @@ Current cluster status: Online: [ cxa1 cxb1 ] cyrus_address (ocf::heartbeat:IPaddr2): Started cxa1 cyrus_master (ocf::heartbeat:cyrus-imap): Stopped cyrus_syslogd (ocf::heartbeat:syslogd): Stopped cyrus_filesys (ocf::heartbeat:Filesystem): Stopped cyrus_volgroup (ocf::heartbeat:VolGroup): Stopped Master/Slave Set: cyrus_drbd [cyrus_drbd_node] Masters: [ cxa1 ] Slaves: [ cxb1 ] named_address (ocf::heartbeat:IPaddr2): Started cxa1 named_filesys (ocf::heartbeat:Filesystem): Stopped named_volgroup (ocf::heartbeat:VolGroup): Stopped named_daemon (ocf::heartbeat:recursor): Stopped named_syslogd (ocf::heartbeat:syslogd): Stopped Master/Slave Set: named_drbd [named_drbd_node] Slaves: [ cxa1 cxb1 ] Clone Set: pingd_clone [pingd_node] Started: [ cxa1 cxb1 ] Clone Set: fence_clone [fence_node] Started: [ cxa1 cxb1 ] Transition Summary: - * Move named_address (Started cxa1 -> cxb1) + * Move named_address ( cxa1 -> cxb1 ) * Promote named_drbd_node:1 (Slave -> Master cxb1) Executing cluster transition: * Resource action: named_address stop on cxa1 * Pseudo action: named_drbd_pre_notify_promote_0 * Pseudo action: all_stopped * Resource action: named_address start on cxb1 * Resource action: named_drbd_node:1 notify on cxa1 * Resource action: named_drbd_node:0 notify on cxb1 * Pseudo action: named_drbd_confirmed-pre_notify_promote_0 * Pseudo action: named_drbd_promote_0 * Resource action: named_drbd_node:0 promote on cxb1 * Pseudo action: named_drbd_promoted_0 * Pseudo action: named_drbd_post_notify_promoted_0 * Resource action: named_drbd_node:1 notify on cxa1 * Resource action: named_drbd_node:0 notify on cxb1 * Pseudo action: named_drbd_confirmed-post_notify_promoted_0 * Resource action: named_drbd_node:0 monitor=10000 on cxb1 Revised cluster status: Online: [ cxa1 cxb1 ] cyrus_address (ocf::heartbeat:IPaddr2): Started cxa1 cyrus_master (ocf::heartbeat:cyrus-imap): Stopped cyrus_syslogd (ocf::heartbeat:syslogd): Stopped cyrus_filesys (ocf::heartbeat:Filesystem): Stopped cyrus_volgroup (ocf::heartbeat:VolGroup): Stopped Master/Slave Set: cyrus_drbd [cyrus_drbd_node] Masters: [ cxa1 ] Slaves: [ cxb1 ] named_address (ocf::heartbeat:IPaddr2): Started cxb1 named_filesys (ocf::heartbeat:Filesystem): Stopped named_volgroup (ocf::heartbeat:VolGroup): Stopped named_daemon (ocf::heartbeat:recursor): Stopped named_syslogd (ocf::heartbeat:syslogd): Stopped Master/Slave Set: named_drbd [named_drbd_node] Masters: [ cxb1 ] Slaves: [ cxa1 ] Clone Set: pingd_clone [pingd_node] Started: [ cxa1 cxb1 ] Clone Set: fence_clone [fence_node] Started: [ cxa1 cxb1 ] diff --git a/pengine/test10/master-failed-demote-2.summary b/pengine/test10/master-failed-demote-2.summary index 086d02e16b..f5f535c703 100644 --- a/pengine/test10/master-failed-demote-2.summary +++ b/pengine/test10/master-failed-demote-2.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): FAILED dl380g5b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Slave dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Slave dl380g5a Transition Summary: - * Stop stateful-1:0 (dl380g5b) due to node availability + * Stop stateful-1:0 ( Slave dl380g5b ) due to node availability * Promote stateful-1:1 (Slave -> Master dl380g5a) * Promote stateful-2:1 (Slave -> Master dl380g5a) Executing cluster transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: all_stopped * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Stopped stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Master dl380g5a diff --git a/pengine/test10/master-failed-demote.summary b/pengine/test10/master-failed-demote.summary index 0f6c410bf1..ec31e42598 100644 --- a/pengine/test10/master-failed-demote.summary +++ b/pengine/test10/master-failed-demote.summary @@ -1,63 +1,63 @@ Current cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): FAILED dl380g5b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Slave dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Slave dl380g5a Transition Summary: - * Stop stateful-1:0 (dl380g5b) due to node availability + * Stop stateful-1:0 ( Slave dl380g5b ) due to node availability * Promote stateful-1:1 (Slave -> Master dl380g5a) * Promote stateful-2:1 (Slave -> Master dl380g5a) Executing cluster transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_pre_notify_stop_0 * Resource action: stateful-1:0 notify on dl380g5b * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_stop_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_post_notify_stopped_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms-sf_pre_notify_promote_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_promote_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 * Pseudo action: ms-sf_post_notify_promoted_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a Revised cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Stopped stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Master dl380g5a diff --git a/pengine/test10/master-move.summary b/pengine/test10/master-move.summary index 2f7d877718..e42fa27d69 100644 --- a/pengine/test10/master-move.summary +++ b/pengine/test10/master-move.summary @@ -1,71 +1,71 @@ Current cluster status: Online: [ bl460g1n13 bl460g1n14 ] Resource Group: grpDRBD dummy01 (ocf::pacemaker:Dummy): FAILED bl460g1n13 dummy02 (ocf::pacemaker:Dummy): Started bl460g1n13 dummy03 (ocf::pacemaker:Dummy): Stopped Master/Slave Set: msDRBD [prmDRBD] Masters: [ bl460g1n13 ] Slaves: [ bl460g1n14 ] Transition Summary: - * Recover dummy01 (Started bl460g1n13 -> bl460g1n14) - * Move dummy02 (Started bl460g1n13 -> bl460g1n14) + * Recover dummy01 ( bl460g1n13 -> bl460g1n14 ) + * Move dummy02 ( bl460g1n13 -> bl460g1n14 ) * Start dummy03 (bl460g1n14) * Demote prmDRBD:0 (Master -> Slave bl460g1n13) * Promote prmDRBD:1 (Slave -> Master bl460g1n14) Executing cluster transition: * Pseudo action: grpDRBD_stop_0 * Resource action: dummy02 stop on bl460g1n13 * Resource action: prmDRBD:0 cancel=10000 on bl460g1n13 * Resource action: prmDRBD:1 cancel=20000 on bl460g1n14 * Pseudo action: msDRBD_pre_notify_demote_0 * Resource action: dummy01 stop on bl460g1n13 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-pre_notify_demote_0 * Pseudo action: all_stopped * Pseudo action: grpDRBD_stopped_0 * Pseudo action: msDRBD_demote_0 * Resource action: prmDRBD:0 demote on bl460g1n13 * Pseudo action: msDRBD_demoted_0 * Pseudo action: msDRBD_post_notify_demoted_0 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-post_notify_demoted_0 * Pseudo action: msDRBD_pre_notify_promote_0 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-pre_notify_promote_0 * Pseudo action: msDRBD_promote_0 * Resource action: prmDRBD:1 promote on bl460g1n14 * Pseudo action: msDRBD_promoted_0 * Pseudo action: msDRBD_post_notify_promoted_0 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-post_notify_promoted_0 * Pseudo action: grpDRBD_start_0 * Resource action: dummy01 start on bl460g1n14 * Resource action: dummy02 start on bl460g1n14 * Resource action: dummy03 start on bl460g1n14 * Resource action: prmDRBD:0 monitor=20000 on bl460g1n13 * Resource action: prmDRBD:1 monitor=10000 on bl460g1n14 * Pseudo action: grpDRBD_running_0 * Resource action: dummy01 monitor=10000 on bl460g1n14 * Resource action: dummy02 monitor=10000 on bl460g1n14 * Resource action: dummy03 monitor=10000 on bl460g1n14 Revised cluster status: Online: [ bl460g1n13 bl460g1n14 ] Resource Group: grpDRBD dummy01 (ocf::pacemaker:Dummy): Started bl460g1n14 dummy02 (ocf::pacemaker:Dummy): Started bl460g1n14 dummy03 (ocf::pacemaker:Dummy): Started bl460g1n14 Master/Slave Set: msDRBD [prmDRBD] Masters: [ bl460g1n14 ] Slaves: [ bl460g1n13 ] diff --git a/pengine/test10/master-partially-demoted-group.summary b/pengine/test10/master-partially-demoted-group.summary index 0bda6050d0..0abf07c154 100644 --- a/pengine/test10/master-partially-demoted-group.summary +++ b/pengine/test10/master-partially-demoted-group.summary @@ -1,117 +1,117 @@ Current cluster status: Online: [ sd01-0 sd01-1 ] stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 Resource Group: cdev-pool-0-iscsi-export cdev-pool-0-iscsi-target (ocf::vds-ok:iSCSITarget): Started sd01-1 cdev-pool-0-iscsi-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started sd01-1 Master/Slave Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] Masters: [ sd01-1 ] Slaves: [ sd01-0 ] Clone Set: cl-ietd [ietd] Started: [ sd01-0 sd01-1 ] Clone Set: cl-vlan1-net [vlan1-net] Started: [ sd01-0 sd01-1 ] Resource Group: cdev-pool-0-iscsi-vips vip-164 (ocf::heartbeat:IPaddr2): Started sd01-1 vip-165 (ocf::heartbeat:IPaddr2): Started sd01-1 Master/Slave Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] Masters: [ sd01-1 ] Slaves: [ sd01-0 ] Transition Summary: - * Move vip-164 (Started sd01-1 -> sd01-0) - * Move vip-165 (Started sd01-1 -> sd01-0) - * Move cdev-pool-0-iscsi-target (Started sd01-1 -> sd01-0) - * Move cdev-pool-0-iscsi-lun-1 (Started sd01-1 -> sd01-0) - * Demote vip-164-fw:0 (Master -> Slave sd01-1) + * Move vip-164 ( sd01-1 -> sd01-0 ) + * Move vip-165 ( sd01-1 -> sd01-0 ) + * Move cdev-pool-0-iscsi-target ( sd01-1 -> sd01-0 ) + * Move cdev-pool-0-iscsi-lun-1 ( sd01-1 -> sd01-0 ) + * Demote vip-164-fw:0 ( Master -> Slave sd01-1 ) * Promote vip-164-fw:1 (Slave -> Master sd01-0) * Promote vip-165-fw:1 (Slave -> Master sd01-0) - * Demote cdev-pool-0-drbd:0 (Master -> Slave sd01-1) + * Demote cdev-pool-0-drbd:0 ( Master -> Slave sd01-1 ) * Promote cdev-pool-0-drbd:1 (Slave -> Master sd01-0) Executing cluster transition: * Resource action: vip-165-fw monitor=10000 on sd01-1 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demote_0 * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_demote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demote_0 * Resource action: vip-164-fw demote on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_demote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demoted_0 * Resource action: vip-164-fw monitor=10000 on sd01-1 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demoted_0 * Pseudo action: cdev-pool-0-iscsi-vips_stop_0 * Resource action: vip-165 stop on sd01-1 * Resource action: vip-164 stop on sd01-1 * Pseudo action: cdev-pool-0-iscsi-vips_stopped_0 * Pseudo action: cdev-pool-0-iscsi-export_stop_0 * Resource action: cdev-pool-0-iscsi-lun-1 stop on sd01-1 * Resource action: cdev-pool-0-iscsi-target stop on sd01-1 * Pseudo action: all_stopped * Pseudo action: cdev-pool-0-iscsi-export_stopped_0 * Pseudo action: ms-cdev-pool-0-drbd_demote_0 * Resource action: cdev-pool-0-drbd demote on sd01-1 * Pseudo action: ms-cdev-pool-0-drbd_demoted_0 * Pseudo action: ms-cdev-pool-0-drbd_post_notify_demoted_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_promote_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms-cdev-pool-0-drbd_promote_0 * Resource action: cdev-pool-0-drbd promote on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_promoted_0 * Pseudo action: ms-cdev-pool-0-drbd_post_notify_promoted_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_promoted_0 * Pseudo action: cdev-pool-0-iscsi-export_start_0 * Resource action: cdev-pool-0-iscsi-target start on sd01-0 * Resource action: cdev-pool-0-iscsi-lun-1 start on sd01-0 * Resource action: cdev-pool-0-drbd monitor=20000 on sd01-1 * Resource action: cdev-pool-0-drbd monitor=10000 on sd01-0 * Pseudo action: cdev-pool-0-iscsi-export_running_0 * Resource action: cdev-pool-0-iscsi-target monitor=10000 on sd01-0 * Resource action: cdev-pool-0-iscsi-lun-1 monitor=10000 on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips_start_0 * Resource action: vip-164 start on sd01-0 * Resource action: vip-165 start on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips_running_0 * Resource action: vip-164 monitor=30000 on sd01-0 * Resource action: vip-165 monitor=30000 on sd01-0 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_promote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promote_0 * Resource action: vip-164-fw promote on sd01-0 * Resource action: vip-165-fw promote on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promoted_0 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promoted_0 Revised cluster status: Online: [ sd01-0 sd01-1 ] stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 Resource Group: cdev-pool-0-iscsi-export cdev-pool-0-iscsi-target (ocf::vds-ok:iSCSITarget): Started sd01-0 cdev-pool-0-iscsi-lun-1 (ocf::vds-ok:iSCSILogicalUnit): Started sd01-0 Master/Slave Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] Masters: [ sd01-0 ] Slaves: [ sd01-1 ] Clone Set: cl-ietd [ietd] Started: [ sd01-0 sd01-1 ] Clone Set: cl-vlan1-net [vlan1-net] Started: [ sd01-0 sd01-1 ] Resource Group: cdev-pool-0-iscsi-vips vip-164 (ocf::heartbeat:IPaddr2): Started sd01-0 vip-165 (ocf::heartbeat:IPaddr2): Started sd01-0 Master/Slave Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] Masters: [ sd01-0 ] Slaves: [ sd01-1 ] diff --git a/pengine/test10/master-probed-score.summary b/pengine/test10/master-probed-score.summary index e7f2ba37fb..3c67fe9281 100644 --- a/pengine/test10/master-probed-score.summary +++ b/pengine/test10/master-probed-score.summary @@ -1,328 +1,326 @@ 2 of 60 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Master/Slave Set: AdminClone [AdminDrbd] Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] CronAmbientTemperature (ocf::heartbeat:symlink): Stopped StonithHypatia (stonith:fence_nut): Stopped StonithOrestes (stonith:fence_nut): Stopped Resource Group: DhcpGroup SymlinkDhcpdConf (ocf::heartbeat:symlink): Stopped SymlinkSysconfigDhcpd (ocf::heartbeat:symlink): Stopped SymlinkDhcpdLeases (ocf::heartbeat:symlink): Stopped Dhcpd (lsb:dhcpd): Stopped ( disabled ) DhcpIP (ocf::heartbeat:IPaddr2): Stopped Clone Set: CupsClone [CupsGroup] Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: IPClone [IPGroup] (unique) Resource Group: IPGroup:0 ClusterIP:0 (ocf::heartbeat:IPaddr2): Stopped ClusterIPLocal:0 (ocf::heartbeat:IPaddr2): Stopped ClusterIPSandbox:0 (ocf::heartbeat:IPaddr2): Stopped Resource Group: IPGroup:1 ClusterIP:1 (ocf::heartbeat:IPaddr2): Stopped ClusterIPLocal:1 (ocf::heartbeat:IPaddr2): Stopped ClusterIPSandbox:1 (ocf::heartbeat:IPaddr2): Stopped Clone Set: LibvirtdClone [LibvirtdGroup] Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: TftpClone [TftpGroup] Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: ExportsClone [ExportsGroup] Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: FilesystemClone [FilesystemGroup] Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] KVM-guest (ocf::heartbeat:VirtualDomain): Stopped Proxy (ocf::heartbeat:VirtualDomain): Stopped Transition Summary: - * Start AdminDrbd:0 (hypatia-corosync.nevis.columbia.edu) - * Promote AdminDrbd:0 (Stopped -> Master hypatia-corosync.nevis.columbia.edu) - * Start AdminDrbd:1 (orestes-corosync.nevis.columbia.edu) - * Promote AdminDrbd:1 (Stopped -> Master orestes-corosync.nevis.columbia.edu) + * Promote AdminDrbd:0 ( Stopped -> Master hypatia-corosync.nevis.columbia.edu ) + * Promote AdminDrbd:1 ( Stopped -> Master orestes-corosync.nevis.columbia.edu ) * Start CronAmbientTemperature (hypatia-corosync.nevis.columbia.edu) * Start StonithHypatia (orestes-corosync.nevis.columbia.edu) * Start StonithOrestes (hypatia-corosync.nevis.columbia.edu) * Start SymlinkDhcpdConf (orestes-corosync.nevis.columbia.edu) * Start SymlinkSysconfigDhcpd (orestes-corosync.nevis.columbia.edu) * Start SymlinkDhcpdLeases (orestes-corosync.nevis.columbia.edu) * Start SymlinkUsrShareCups:0 (hypatia-corosync.nevis.columbia.edu) * Start SymlinkCupsdConf:0 (hypatia-corosync.nevis.columbia.edu) * Start Cups:0 (hypatia-corosync.nevis.columbia.edu) * Start SymlinkUsrShareCups:1 (orestes-corosync.nevis.columbia.edu) * Start SymlinkCupsdConf:1 (orestes-corosync.nevis.columbia.edu) * Start Cups:1 (orestes-corosync.nevis.columbia.edu) * Start ClusterIP:0 (hypatia-corosync.nevis.columbia.edu) * Start ClusterIPLocal:0 (hypatia-corosync.nevis.columbia.edu) * Start ClusterIPSandbox:0 (hypatia-corosync.nevis.columbia.edu) * Start ClusterIP:1 (orestes-corosync.nevis.columbia.edu) * Start ClusterIPLocal:1 (orestes-corosync.nevis.columbia.edu) * Start ClusterIPSandbox:1 (orestes-corosync.nevis.columbia.edu) * Start SymlinkEtcLibvirt:0 (hypatia-corosync.nevis.columbia.edu) * Start Libvirtd:0 (hypatia-corosync.nevis.columbia.edu) * Start SymlinkEtcLibvirt:1 (orestes-corosync.nevis.columbia.edu) * Start Libvirtd:1 (orestes-corosync.nevis.columbia.edu) * Start SymlinkTftp:0 (hypatia-corosync.nevis.columbia.edu) * Start Xinetd:0 (hypatia-corosync.nevis.columbia.edu) * Start SymlinkTftp:1 (orestes-corosync.nevis.columbia.edu) * Start Xinetd:1 (orestes-corosync.nevis.columbia.edu) * Start ExportMail:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportMailInbox:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportMailFolders:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportMailForward:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportMailProcmailrc:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportUsrNevis:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportUsrNevisOffsite:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportWWW:0 (hypatia-corosync.nevis.columbia.edu) * Start ExportMail:1 (orestes-corosync.nevis.columbia.edu) * Start ExportMailInbox:1 (orestes-corosync.nevis.columbia.edu) * Start ExportMailFolders:1 (orestes-corosync.nevis.columbia.edu) * Start ExportMailForward:1 (orestes-corosync.nevis.columbia.edu) * Start ExportMailProcmailrc:1 (orestes-corosync.nevis.columbia.edu) * Start ExportUsrNevis:1 (orestes-corosync.nevis.columbia.edu) * Start ExportUsrNevisOffsite:1 (orestes-corosync.nevis.columbia.edu) * Start ExportWWW:1 (orestes-corosync.nevis.columbia.edu) * Start AdminLvm:0 (hypatia-corosync.nevis.columbia.edu) * Start FSUsrNevis:0 (hypatia-corosync.nevis.columbia.edu) * Start FSVarNevis:0 (hypatia-corosync.nevis.columbia.edu) * Start FSVirtualMachines:0 (hypatia-corosync.nevis.columbia.edu) * Start FSMail:0 (hypatia-corosync.nevis.columbia.edu) * Start FSWork:0 (hypatia-corosync.nevis.columbia.edu) * Start AdminLvm:1 (orestes-corosync.nevis.columbia.edu) * Start FSUsrNevis:1 (orestes-corosync.nevis.columbia.edu) * Start FSVarNevis:1 (orestes-corosync.nevis.columbia.edu) * Start FSVirtualMachines:1 (orestes-corosync.nevis.columbia.edu) * Start FSMail:1 (orestes-corosync.nevis.columbia.edu) * Start FSWork:1 (orestes-corosync.nevis.columbia.edu) * Start KVM-guest (hypatia-corosync.nevis.columbia.edu) * Start Proxy (orestes-corosync.nevis.columbia.edu) Executing cluster transition: * Pseudo action: AdminClone_pre_notify_start_0 * Resource action: StonithHypatia start on orestes-corosync.nevis.columbia.edu * Resource action: StonithOrestes start on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkEtcLibvirt:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 monitor on orestes-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkTftp:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Xinetd:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkTftp:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: Xinetd:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMail:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailForward:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportWWW:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMail:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailForward:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportWWW:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: AdminLvm:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSVarNevis:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSMail:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSWork:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: AdminLvm:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSVarNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSMail:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSWork:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: KVM-guest monitor on orestes-corosync.nevis.columbia.edu * Resource action: KVM-guest monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Proxy monitor on orestes-corosync.nevis.columbia.edu * Resource action: Proxy monitor on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-pre_notify_start_0 * Pseudo action: AdminClone_start_0 * Resource action: AdminDrbd:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_running_0 * Pseudo action: AdminClone_post_notify_running_0 * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-post_notify_running_0 * Pseudo action: AdminClone_pre_notify_promote_0 * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-pre_notify_promote_0 * Pseudo action: AdminClone_promote_0 * Resource action: AdminDrbd:0 promote on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 promote on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_promoted_0 * Pseudo action: AdminClone_post_notify_promoted_0 * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-post_notify_promoted_0 * Pseudo action: FilesystemClone_start_0 * Resource action: AdminDrbd:0 monitor=59000 on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 monitor=59000 on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:0_start_0 * Resource action: AdminLvm:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSVarNevis:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSMail:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSWork:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:1_start_0 * Resource action: AdminLvm:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSVarNevis:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSMail:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSWork:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:0_running_0 * Resource action: AdminLvm:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSVarNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSMail:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSWork:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:1_running_0 * Resource action: AdminLvm:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSVarNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSMail:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSWork:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemClone_running_0 * Resource action: CronAmbientTemperature start on hypatia-corosync.nevis.columbia.edu * Pseudo action: DhcpGroup_start_0 * Resource action: SymlinkDhcpdConf start on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkSysconfigDhcpd start on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkDhcpdLeases start on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsClone_start_0 * Pseudo action: IPClone_start_0 * Pseudo action: LibvirtdClone_start_0 * Pseudo action: TftpClone_start_0 * Pseudo action: ExportsClone_start_0 * Resource action: CronAmbientTemperature monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkDhcpdConf monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkSysconfigDhcpd monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkDhcpdLeases monitor=60000 on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:0_start_0 * Resource action: SymlinkUsrShareCups:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: Cups:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:1_start_0 * Resource action: SymlinkUsrShareCups:1 start on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:1 start on orestes-corosync.nevis.columbia.edu * Resource action: Cups:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: IPGroup:0_start_0 * Resource action: ClusterIP:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: IPGroup:1_start_0 * Resource action: ClusterIP:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: LibvirtdGroup:0_start_0 * Resource action: SymlinkEtcLibvirt:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: LibvirtdGroup:1_start_0 * Resource action: SymlinkEtcLibvirt:1 start on orestes-corosync.nevis.columbia.edu * Resource action: Libvirtd:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: TftpGroup:0_start_0 * Resource action: SymlinkTftp:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: Xinetd:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: TftpGroup:1_start_0 * Resource action: SymlinkTftp:1 start on orestes-corosync.nevis.columbia.edu * Resource action: Xinetd:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: ExportsGroup:0_start_0 * Resource action: ExportMail:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailForward:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportWWW:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: ExportsGroup:1_start_0 * Resource action: ExportMail:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailForward:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportWWW:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:0_running_0 * Resource action: SymlinkUsrShareCups:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: Cups:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:1_running_0 * Resource action: SymlinkUsrShareCups:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: Cups:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsClone_running_0 * Pseudo action: IPGroup:0_running_0 * Resource action: ClusterIP:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:0 monitor=31000 on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:0 monitor=32000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: IPGroup:1_running_0 * Resource action: ClusterIP:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:1 monitor=31000 on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:1 monitor=32000 on orestes-corosync.nevis.columbia.edu * Pseudo action: IPClone_running_0 * Pseudo action: LibvirtdGroup:0_running_0 * Resource action: SymlinkEtcLibvirt:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: LibvirtdGroup:1_running_0 * Resource action: SymlinkEtcLibvirt:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: Libvirtd:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Pseudo action: LibvirtdClone_running_0 * Pseudo action: TftpGroup:0_running_0 * Resource action: SymlinkTftp:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: TftpGroup:1_running_0 * Resource action: SymlinkTftp:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Pseudo action: TftpClone_running_0 * Pseudo action: ExportsGroup:0_running_0 * Pseudo action: ExportsGroup:1_running_0 * Pseudo action: ExportsClone_running_0 * Resource action: KVM-guest start on hypatia-corosync.nevis.columbia.edu * Resource action: Proxy start on orestes-corosync.nevis.columbia.edu Revised cluster status: Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Master/Slave Set: AdminClone [AdminDrbd] Masters: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] CronAmbientTemperature (ocf::heartbeat:symlink): Started hypatia-corosync.nevis.columbia.edu StonithHypatia (stonith:fence_nut): Started orestes-corosync.nevis.columbia.edu StonithOrestes (stonith:fence_nut): Started hypatia-corosync.nevis.columbia.edu Resource Group: DhcpGroup SymlinkDhcpdConf (ocf::heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu SymlinkSysconfigDhcpd (ocf::heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu SymlinkDhcpdLeases (ocf::heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu Dhcpd (lsb:dhcpd): Stopped ( disabled ) DhcpIP (ocf::heartbeat:IPaddr2): Stopped Clone Set: CupsClone [CupsGroup] Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: IPClone [IPGroup] (unique) Resource Group: IPGroup:0 ClusterIP:0 (ocf::heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu ClusterIPLocal:0 (ocf::heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu ClusterIPSandbox:0 (ocf::heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu Resource Group: IPGroup:1 ClusterIP:1 (ocf::heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu ClusterIPLocal:1 (ocf::heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu ClusterIPSandbox:1 (ocf::heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu Clone Set: LibvirtdClone [LibvirtdGroup] Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: TftpClone [TftpGroup] Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: ExportsClone [ExportsGroup] Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] Clone Set: FilesystemClone [FilesystemGroup] Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] KVM-guest (ocf::heartbeat:VirtualDomain): Started hypatia-corosync.nevis.columbia.edu Proxy (ocf::heartbeat:VirtualDomain): Started orestes-corosync.nevis.columbia.edu diff --git a/pengine/test10/master-pseudo.summary b/pengine/test10/master-pseudo.summary index 2ee2d03b50..8f67a68afb 100644 --- a/pengine/test10/master-pseudo.summary +++ b/pengine/test10/master-pseudo.summary @@ -1,60 +1,59 @@ Current cluster status: Node raki.linbit: standby Online: [ sambuca.linbit ] ip_float_right (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms_drbd_float [drbd_float] Slaves: [ sambuca.linbit ] Resource Group: nfsexport ip_nfs (ocf::heartbeat:IPaddr2): Stopped fs_float (ocf::heartbeat:Filesystem): Stopped Transition Summary: * Start ip_float_right (sambuca.linbit) - * Restart drbd_float:0 (Slave sambuca.linbit) due to required ip_float_right start - * Promote drbd_float:0 (Slave -> Master sambuca.linbit) + * Restart drbd_float:0 ( Slave -> Master sambuca.linbit ) due to required ip_float_right start * Start ip_nfs (sambuca.linbit) Executing cluster transition: * Resource action: ip_float_right start on sambuca.linbit * Pseudo action: ms_drbd_float_pre_notify_stop_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_float_stop_0 * Resource action: drbd_float:0 stop on sambuca.linbit * Pseudo action: ms_drbd_float_stopped_0 * Pseudo action: ms_drbd_float_post_notify_stopped_0 * Pseudo action: ms_drbd_float_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_float_pre_notify_start_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_float_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_float_start_0 * Resource action: drbd_float:0 start on sambuca.linbit * Pseudo action: ms_drbd_float_running_0 * Pseudo action: ms_drbd_float_post_notify_running_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-post_notify_running_0 * Pseudo action: ms_drbd_float_pre_notify_promote_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_float_promote_0 * Resource action: drbd_float:0 promote on sambuca.linbit * Pseudo action: ms_drbd_float_promoted_0 * Pseudo action: ms_drbd_float_post_notify_promoted_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-post_notify_promoted_0 * Pseudo action: nfsexport_start_0 * Resource action: ip_nfs start on sambuca.linbit Revised cluster status: Node raki.linbit: standby Online: [ sambuca.linbit ] ip_float_right (ocf::heartbeat:IPaddr2): Started sambuca.linbit Master/Slave Set: ms_drbd_float [drbd_float] Masters: [ sambuca.linbit ] Resource Group: nfsexport ip_nfs (ocf::heartbeat:IPaddr2): Started sambuca.linbit fs_float (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/master-stop.summary b/pengine/test10/master-stop.summary index 8b802d4d7a..8b861df811 100644 --- a/pengine/test10/master-stop.summary +++ b/pengine/test10/master-stop.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ node1 node2 node3 ] Master/Slave Set: m [dummy] Slaves: [ node1 node2 node3 ] Transition Summary: - * Stop dummy:2 (node3) due to node availability + * Stop dummy:2 ( Slave node3 ) due to node availability Executing cluster transition: * Pseudo action: m_stop_0 * Resource action: dummy:2 stop on node3 * Pseudo action: m_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 node3 ] Master/Slave Set: m [dummy] Slaves: [ node1 node2 ] Stopped: [ node3 ] diff --git a/pengine/test10/migrate-1.summary b/pengine/test10/migrate-1.summary index 8df4fdb2b4..ee3d352a1b 100644 --- a/pengine/test10/migrate-1.summary +++ b/pengine/test10/migrate-1.summary @@ -1,24 +1,24 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc3 (heartbeat:apache): Started node1 Transition Summary: - * Migrate rsc3 (Started node1 -> node2) + * Migrate rsc3 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc3 monitor on node2 * Resource action: rsc3 migrate_to on node1 * Resource action: rsc3 migrate_from on node2 * Resource action: rsc3 stop on node1 * Pseudo action: all_stopped * Pseudo action: rsc3_start_0 Revised cluster status: Node node1: standby Online: [ node2 ] rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/migrate-3.summary b/pengine/test10/migrate-3.summary index 9ccc0628cc..9288be3963 100644 --- a/pengine/test10/migrate-3.summary +++ b/pengine/test10/migrate-3.summary @@ -1,22 +1,22 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc3 (heartbeat:apache): FAILED node1 Transition Summary: - * Recover rsc3 (Started node1 -> node2) + * Recover rsc3 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc3 monitor on node2 * Resource action: rsc3 stop on node1 * Pseudo action: all_stopped * Resource action: rsc3 start on node2 Revised cluster status: Node node1: standby Online: [ node2 ] rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/migrate-4.summary b/pengine/test10/migrate-4.summary index 32449a77a8..b95521a535 100644 --- a/pengine/test10/migrate-4.summary +++ b/pengine/test10/migrate-4.summary @@ -1,21 +1,21 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc3 (heartbeat:apache): FAILED node2 Transition Summary: - * Recover rsc3 (Started node2) + * Recover rsc3 ( node2 ) Executing cluster transition: * Resource action: rsc3 stop on node2 * Pseudo action: all_stopped * Resource action: rsc3 start on node2 Revised cluster status: Node node1: standby Online: [ node2 ] rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/migrate-5.summary b/pengine/test10/migrate-5.summary index a9d5771ff3..0a939dd5db 100644 --- a/pengine/test10/migrate-5.summary +++ b/pengine/test10/migrate-5.summary @@ -1,34 +1,34 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Transition Summary: - * Migrate domU-test01 (Started dom0-02 -> dom0-01) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability Executing cluster transition: * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-begin.summary b/pengine/test10/migrate-begin.summary index b4b58703f1..56e0f335ad 100644 --- a/pengine/test10/migrate-begin.summary +++ b/pengine/test10/migrate-begin.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-14 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Migrate test-vm (Started hex-14 -> hex-13) + * Migrate test-vm ( hex-14 -> hex-13 ) Executing cluster transition: * Pseudo action: load_stopped_hex-13 * Resource action: test-vm migrate_to on hex-14 * Resource action: test-vm migrate_from on hex-13 * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-14 * Pseudo action: all_stopped * Pseudo action: test-vm_start_0 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-both-vms.summary b/pengine/test10/migrate-both-vms.summary index 5b60727bb7..d45d267bd7 100644 --- a/pengine/test10/migrate-both-vms.summary +++ b/pengine/test10/migrate-both-vms.summary @@ -1,101 +1,101 @@ Current cluster status: Node cvmh03: standby Node cvmh04: standby Online: [ cvmh01 cvmh02 ] fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 Clone Set: c-fs-libvirt-VM-xcm [fs-libvirt-VM-xcm] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-p-libvirtd [p-libvirtd] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-watch-ib0 [p-watch-ib0] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-fs-gpfs [p-fs-gpfs] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] vm-compute-test (ocf::ccni:xcatVirtualDomain): Started cvmh03 vm-swbuildsl6 (ocf::ccni:xcatVirtualDomain): Started cvmh04 Transition Summary: * Stop fs-libvirt-VM-xcm:0 (cvmh04) due to node availability * Stop fs-libvirt-VM-xcm:2 (cvmh03) due to node availability * Stop p-watch-ib0:0 (cvmh04) due to node availability * Stop p-watch-ib0:2 (cvmh03) due to node availability * Stop p-fs-gpfs:0 (cvmh04) due to node availability * Stop p-fs-gpfs:2 (cvmh03) due to node availability * Stop p-libvirtd:0 (cvmh04) due to node availability * Stop p-libvirtd:2 (cvmh03) due to node availability * Stop fs-bind-libvirt-VM-cvmh:0 (cvmh04) due to node availability * Stop fs-bind-libvirt-VM-cvmh:2 (cvmh03) due to node availability - * Migrate vm-compute-test (Started cvmh03 -> cvmh01) - * Migrate vm-swbuildsl6 (Started cvmh04 -> cvmh02) + * Migrate vm-compute-test ( cvmh03 -> cvmh01 ) + * Migrate vm-swbuildsl6 ( cvmh04 -> cvmh02 ) Executing cluster transition: * Pseudo action: c-watch-ib0_stop_0 * Pseudo action: load_stopped_cvmh01 * Pseudo action: load_stopped_cvmh02 * Resource action: p-watch-ib0 stop on cvmh03 * Resource action: vm-compute-test migrate_to on cvmh03 * Resource action: p-watch-ib0 stop on cvmh04 * Pseudo action: c-watch-ib0_stopped_0 * Resource action: vm-compute-test migrate_from on cvmh01 * Resource action: vm-swbuildsl6 migrate_to on cvmh04 * Resource action: vm-swbuildsl6 migrate_from on cvmh02 * Resource action: vm-swbuildsl6 stop on cvmh04 * Pseudo action: load_stopped_cvmh04 * Resource action: vm-compute-test stop on cvmh03 * Pseudo action: load_stopped_cvmh03 * Pseudo action: c-p-libvirtd_stop_0 * Pseudo action: vm-compute-test_start_0 * Pseudo action: vm-swbuildsl6_start_0 * Resource action: p-libvirtd stop on cvmh03 * Resource action: vm-compute-test monitor=45000 on cvmh01 * Resource action: vm-swbuildsl6 monitor=45000 on cvmh02 * Resource action: p-libvirtd stop on cvmh04 * Pseudo action: c-p-libvirtd_stopped_0 * Pseudo action: c-fs-bind-libvirt-VM-cvmh_stop_0 * Pseudo action: c-fs-libvirt-VM-xcm_stop_0 * Resource action: fs-bind-libvirt-VM-cvmh stop on cvmh03 * Resource action: fs-libvirt-VM-xcm stop on cvmh03 * Resource action: fs-bind-libvirt-VM-cvmh stop on cvmh04 * Pseudo action: c-fs-bind-libvirt-VM-cvmh_stopped_0 * Resource action: fs-libvirt-VM-xcm stop on cvmh04 * Pseudo action: c-fs-libvirt-VM-xcm_stopped_0 * Pseudo action: c-fs-gpfs_stop_0 * Resource action: p-fs-gpfs stop on cvmh03 * Resource action: p-fs-gpfs stop on cvmh04 * Pseudo action: c-fs-gpfs_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node cvmh03: standby Node cvmh04: standby Online: [ cvmh01 cvmh02 ] fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 Clone Set: c-fs-libvirt-VM-xcm [fs-libvirt-VM-xcm] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-p-libvirtd [p-libvirtd] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-watch-ib0 [p-watch-ib0] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-fs-gpfs [p-fs-gpfs] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] vm-compute-test (ocf::ccni:xcatVirtualDomain): Started cvmh01 vm-swbuildsl6 (ocf::ccni:xcatVirtualDomain): Started cvmh02 diff --git a/pengine/test10/migrate-fail-2.summary b/pengine/test10/migrate-fail-2.summary index 378653389d..f3709c4d47 100644 --- a/pengine/test10/migrate-fail-2.summary +++ b/pengine/test10/migrate-fail-2.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): FAILED [ hex-13 hex-14 ] Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Recover test-vm (Started hex-13) + * Recover test-vm ( hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-14 * Resource action: test-vm stop on hex-13 * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fail-3.summary b/pengine/test10/migrate-fail-3.summary index 547c110765..35ec75f2d9 100644 --- a/pengine/test10/migrate-fail-3.summary +++ b/pengine/test10/migrate-fail-3.summary @@ -1,25 +1,25 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): FAILED hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Recover test-vm (Started hex-13) + * Recover test-vm ( hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-13 * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fail-4.summary b/pengine/test10/migrate-fail-4.summary index cda25adede..d36c61bbd2 100644 --- a/pengine/test10/migrate-fail-4.summary +++ b/pengine/test10/migrate-fail-4.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started [ hex-13 hex-14 ] Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Restart test-vm (Started hex-13) + * Restart test-vm ( hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-14 * Resource action: test-vm stop on hex-13 * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fail-6.summary b/pengine/test10/migrate-fail-6.summary index dfde8bbe1a..7cabcf3a3f 100644 --- a/pengine/test10/migrate-fail-6.summary +++ b/pengine/test10/migrate-fail-6.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): FAILED [ hex-13 hex-14 ] Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Recover test-vm (Started hex-13) + * Recover test-vm ( hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-13 * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fail-7.summary b/pengine/test10/migrate-fail-7.summary index 7a417583e6..f05bb75f02 100644 --- a/pengine/test10/migrate-fail-7.summary +++ b/pengine/test10/migrate-fail-7.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Stopped hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Restart test-vm (Started hex-13) + * Restart test-vm ( hex-13 ) Executing cluster transition: * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fail-8.summary b/pengine/test10/migrate-fail-8.summary index 9a651eb52d..018a4666ba 100644 --- a/pengine/test10/migrate-fail-8.summary +++ b/pengine/test10/migrate-fail-8.summary @@ -1,25 +1,25 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): FAILED hex-14 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Recover test-vm (Started hex-14 -> hex-13) + * Recover test-vm ( hex-14 -> hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: load_stopped_hex-14 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fail-9.summary b/pengine/test10/migrate-fail-9.summary index 82ded68be1..0f99e9220a 100644 --- a/pengine/test10/migrate-fail-9.summary +++ b/pengine/test10/migrate-fail-9.summary @@ -1,25 +1,25 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Stopped Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Start test-vm (hex-13) + * Restart test-vm ( hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-fencing.summary b/pengine/test10/migrate-fencing.summary index cfd4eb8f9f..44f3262f82 100644 --- a/pengine/test10/migrate-fencing.summary +++ b/pengine/test10/migrate-fencing.summary @@ -1,108 +1,108 @@ Current cluster status: Node pcmk-4: UNCLEAN (online) Online: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-4 r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-4 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-4 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-4 migrator (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-4 ] Slaves: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: * Fence (reboot) pcmk-4 'termination was requested' * Stop FencingChild:0 (pcmk-4) due to node availability - * Move r192.168.101.181 (Started pcmk-4 -> pcmk-1) - * Move r192.168.101.182 (Started pcmk-4 -> pcmk-1) - * Move r192.168.101.183 (Started pcmk-4 -> pcmk-1) - * Move rsc_pcmk-4 (Started pcmk-4 -> pcmk-2) - * Move lsb-dummy (Started pcmk-4 -> pcmk-1) - * Migrate migrator (Started pcmk-1 -> pcmk-3) + * Move r192.168.101.181 ( pcmk-4 -> pcmk-1 ) + * Move r192.168.101.182 ( pcmk-4 -> pcmk-1 ) + * Move r192.168.101.183 ( pcmk-4 -> pcmk-1 ) + * Move rsc_pcmk-4 ( pcmk-4 -> pcmk-2 ) + * Move lsb-dummy ( pcmk-4 -> pcmk-1 ) + * Migrate migrator ( pcmk-1 -> pcmk-3 ) * Stop ping-1:0 (pcmk-4) due to node availability - * Demote stateful-1:0 (Master -> Stopped pcmk-4) + * Stop stateful-1:0 ( Master pcmk-4 ) due to node availability * Promote stateful-1:1 (Slave -> Master pcmk-1) Executing cluster transition: * Resource action: stateful-1:3 monitor=15000 on pcmk-3 * Resource action: stateful-1:2 monitor=15000 on pcmk-2 * Fencing pcmk-4 (reboot) * Pseudo action: Fencing_stop_0 * Pseudo action: rsc_pcmk-4_stop_0 * Pseudo action: lsb-dummy_stop_0 * Pseudo action: Connectivity_stop_0 * Pseudo action: stonith_complete * Pseudo action: FencingChild:0_stop_0 * Pseudo action: Fencing_stopped_0 * Pseudo action: group-1_stop_0 * Pseudo action: r192.168.101.183_stop_0 * Resource action: rsc_pcmk-4 start on pcmk-2 * Resource action: migrator migrate_to on pcmk-1 * Pseudo action: ping-1:0_stop_0 * Pseudo action: Connectivity_stopped_0 * Pseudo action: r192.168.101.182_stop_0 * Resource action: rsc_pcmk-4 monitor=5000 on pcmk-2 * Resource action: migrator migrate_from on pcmk-3 * Resource action: migrator stop on pcmk-1 * Pseudo action: r192.168.101.181_stop_0 * Pseudo action: migrator_start_0 * Pseudo action: group-1_stopped_0 * Resource action: migrator monitor=10000 on pcmk-3 * Pseudo action: master-1_demote_0 * Pseudo action: stateful-1:0_demote_0 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Pseudo action: stateful-1:0_stop_0 * Pseudo action: master-1_stopped_0 * Pseudo action: all_stopped * Pseudo action: master-1_promote_0 * Resource action: stateful-1:1 promote on pcmk-1 * Pseudo action: master-1_promoted_0 * Pseudo action: group-1_start_0 * Resource action: r192.168.101.181 start on pcmk-1 * Resource action: r192.168.101.182 start on pcmk-1 * Resource action: r192.168.101.183 start on pcmk-1 * Resource action: stateful-1:1 monitor=16000 on pcmk-1 * Pseudo action: group-1_running_0 * Resource action: r192.168.101.181 monitor=5000 on pcmk-1 * Resource action: r192.168.101.182 monitor=5000 on pcmk-1 * Resource action: r192.168.101.183 monitor=5000 on pcmk-1 * Resource action: lsb-dummy start on pcmk-1 * Resource action: lsb-dummy monitor=5000 on pcmk-1 Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] OFFLINE: [ pcmk-4 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-2 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 migrator (ocf::pacemaker:Dummy): Started pcmk-3 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] diff --git a/pengine/test10/migrate-partial-2.summary b/pengine/test10/migrate-partial-2.summary index 0cf3c75265..7ef1a5d328 100644 --- a/pengine/test10/migrate-partial-2.summary +++ b/pengine/test10/migrate-partial-2.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started [ hex-13 hex-14 ] Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: - * Migrate test-vm (Started hex-14 -> hex-13) + * Migrate test-vm ( hex-14 -> hex-13 ) Executing cluster transition: * Resource action: test-vm migrate_from on hex-13 * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-14 * Pseudo action: load_stopped_hex-13 * Pseudo action: all_stopped * Pseudo action: test-vm_start_0 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/migrate-partial-3.summary b/pengine/test10/migrate-partial-3.summary index 516e470250..0dbe46ad38 100644 --- a/pengine/test10/migrate-partial-3.summary +++ b/pengine/test10/migrate-partial-3.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ hex-13 hex-14 ] OFFLINE: [ hex-15 ] test-vm (ocf::heartbeat:Xen): FAILED hex-14 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Stopped: [ hex-15 ] Transition Summary: - * Recover test-vm (Started hex-14 -> hex-13) + * Recover test-vm ( hex-14 -> hex-13 ) Executing cluster transition: * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-15 * Pseudo action: load_stopped_hex-13 * Pseudo action: load_stopped_hex-14 * Pseudo action: all_stopped * Resource action: test-vm start on hex-13 Revised cluster status: Online: [ hex-13 hex-14 ] OFFLINE: [ hex-15 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Stopped: [ hex-15 ] diff --git a/pengine/test10/migrate-partial-4.summary b/pengine/test10/migrate-partial-4.summary index c3f7012212..8fd1d4cfa5 100644 --- a/pengine/test10/migrate-partial-4.summary +++ b/pengine/test10/migrate-partial-4.summary @@ -1,125 +1,125 @@ Current cluster status: Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] drbd-local (ocf::vds-ok:Ticketer): Started lustre01-left drbd-stacked (ocf::vds-ok:Ticketer): Stopped drbd-testfs-local (ocf::vds-ok:Ticketer): Stopped drbd-testfs-stacked (ocf::vds-ok:Ticketer): Stopped ip-testfs-mdt0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0001-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0002-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0003-left (ocf::heartbeat:IPaddr2): Stopped lustre (ocf::vds-ok:Ticketer): Started lustre03-left mgs (ocf::vds-ok:lustre-server): Stopped testfs (ocf::vds-ok:Ticketer): Started lustre02-left testfs-mdt0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0001 (ocf::vds-ok:lustre-server): Stopped testfs-ost0002 (ocf::vds-ok:lustre-server): Stopped testfs-ost0003 (ocf::vds-ok:lustre-server): Stopped Resource Group: booth ip-booth (ocf::heartbeat:IPaddr2): Started lustre02-left boothd (ocf::pacemaker:booth-site): Started lustre02-left Master/Slave Set: ms-drbd-mgs [drbd-mgs] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Transition Summary: * Start drbd-stacked (lustre02-left) * Start drbd-testfs-local (lustre03-left) - * Migrate lustre (Started lustre03-left -> lustre04-left) - * Move testfs (Started lustre02-left -> lustre03-left) + * Migrate lustre ( lustre03-left -> lustre04-left ) + * Move testfs ( lustre02-left -> lustre03-left ) * Start drbd-mgs:0 (lustre01-left) * Start drbd-mgs:1 (lustre02-left) Executing cluster transition: * Resource action: drbd-stacked start on lustre02-left * Resource action: drbd-testfs-local start on lustre03-left * Resource action: lustre migrate_to on lustre03-left * Resource action: testfs stop on lustre02-left * Resource action: testfs stop on lustre01-left * Pseudo action: ms-drbd-mgs_pre_notify_start_0 * Resource action: lustre migrate_from on lustre04-left * Resource action: lustre stop on lustre03-left * Resource action: testfs start on lustre03-left * Pseudo action: ms-drbd-mgs_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd-mgs_start_0 * Pseudo action: all_stopped * Pseudo action: lustre_start_0 * Resource action: drbd-mgs:0 start on lustre01-left * Resource action: drbd-mgs:1 start on lustre02-left * Pseudo action: ms-drbd-mgs_running_0 * Pseudo action: ms-drbd-mgs_post_notify_running_0 * Resource action: drbd-mgs:0 notify on lustre01-left * Resource action: drbd-mgs:1 notify on lustre02-left * Pseudo action: ms-drbd-mgs_confirmed-post_notify_running_0 * Resource action: drbd-mgs:0 monitor=30000 on lustre01-left * Resource action: drbd-mgs:1 monitor=30000 on lustre02-left Revised cluster status: Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] drbd-local (ocf::vds-ok:Ticketer): Started lustre01-left drbd-stacked (ocf::vds-ok:Ticketer): Started lustre02-left drbd-testfs-local (ocf::vds-ok:Ticketer): Started lustre03-left drbd-testfs-stacked (ocf::vds-ok:Ticketer): Stopped ip-testfs-mdt0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0000-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0001-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0002-left (ocf::heartbeat:IPaddr2): Stopped ip-testfs-ost0003-left (ocf::heartbeat:IPaddr2): Stopped lustre (ocf::vds-ok:Ticketer): Started lustre04-left mgs (ocf::vds-ok:lustre-server): Stopped testfs (ocf::vds-ok:Ticketer): Started lustre03-left testfs-mdt0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0000 (ocf::vds-ok:lustre-server): Stopped testfs-ost0001 (ocf::vds-ok:lustre-server): Stopped testfs-ost0002 (ocf::vds-ok:lustre-server): Stopped testfs-ost0003 (ocf::vds-ok:lustre-server): Stopped Resource Group: booth ip-booth (ocf::heartbeat:IPaddr2): Started lustre02-left boothd (ocf::pacemaker:booth-site): Started lustre02-left Master/Slave Set: ms-drbd-mgs [drbd-mgs] Slaves: [ lustre01-left lustre02-left ] Master/Slave Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Master/Slave Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] diff --git a/pengine/test10/migrate-shutdown.summary b/pengine/test10/migrate-shutdown.summary index b9aa5b2d88..508c399f44 100644 --- a/pengine/test10/migrate-shutdown.summary +++ b/pengine/test10/migrate-shutdown.summary @@ -1,95 +1,95 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Fencing (stonith:fence_xvm): Started pcmk-1 Resource Group: group-1 r192.168.122.105 (ocf::heartbeat:IPaddr): Started pcmk-2 r192.168.122.106 (ocf::heartbeat:IPaddr): Started pcmk-2 r192.168.122.107 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 migrator (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-4 ] Stopped: [ pcmk-3 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-2 ] Slaves: [ pcmk-1 pcmk-4 ] Stopped: [ pcmk-3 ] Transition Summary: * Shutdown pcmk-4 * Shutdown pcmk-3 * Shutdown pcmk-2 * Shutdown pcmk-1 * Stop Fencing (pcmk-1) * Stop r192.168.122.105 (pcmk-2) due to node availability * Stop r192.168.122.106 (pcmk-2) due to node availability * Stop r192.168.122.107 (pcmk-2) due to node availability * Stop rsc_pcmk-1 (pcmk-1) * Stop rsc_pcmk-2 (pcmk-2) * Stop rsc_pcmk-4 (pcmk-4) * Stop lsb-dummy (pcmk-2) * Stop migrator (pcmk-1) * Stop ping-1:0 (pcmk-1) due to node availability * Stop ping-1:1 (pcmk-2) due to node availability * Stop ping-1:2 (pcmk-4) due to node availability - * Stop stateful-1:0 (pcmk-1) due to node availability - * Demote stateful-1:1 (Master -> Stopped pcmk-2) - * Stop stateful-1:2 (pcmk-4) due to node availability + * Stop stateful-1:0 ( Slave pcmk-1 ) due to node availability + * Stop stateful-1:1 ( Master pcmk-2 ) due to node availability + * Stop stateful-1:2 ( Slave pcmk-4 ) due to node availability Executing cluster transition: * Resource action: Fencing stop on pcmk-1 * Resource action: rsc_pcmk-1 stop on pcmk-1 * Resource action: rsc_pcmk-2 stop on pcmk-2 * Resource action: rsc_pcmk-4 stop on pcmk-4 * Resource action: lsb-dummy stop on pcmk-2 * Resource action: migrator stop on pcmk-1 * Resource action: migrator stop on pcmk-3 * Pseudo action: Connectivity_stop_0 * Cluster action: do_shutdown on pcmk-3 * Pseudo action: group-1_stop_0 * Resource action: r192.168.122.107 stop on pcmk-2 * Resource action: ping-1:0 stop on pcmk-1 * Resource action: ping-1:1 stop on pcmk-2 * Resource action: ping-1:3 stop on pcmk-4 * Pseudo action: Connectivity_stopped_0 * Resource action: r192.168.122.106 stop on pcmk-2 * Resource action: r192.168.122.105 stop on pcmk-2 * Pseudo action: group-1_stopped_0 * Pseudo action: master-1_demote_0 * Resource action: stateful-1:0 demote on pcmk-2 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Resource action: stateful-1:2 stop on pcmk-1 * Resource action: stateful-1:0 stop on pcmk-2 * Resource action: stateful-1:3 stop on pcmk-4 * Pseudo action: master-1_stopped_0 * Cluster action: do_shutdown on pcmk-4 * Cluster action: do_shutdown on pcmk-2 * Cluster action: do_shutdown on pcmk-1 * Pseudo action: all_stopped Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Fencing (stonith:fence_xvm): Stopped Resource Group: group-1 r192.168.122.105 (ocf::heartbeat:IPaddr): Stopped r192.168.122.106 (ocf::heartbeat:IPaddr): Stopped r192.168.122.107 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-1 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-2 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-3 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-4 (ocf::heartbeat:IPaddr): Stopped lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped migrator (ocf::pacemaker:Dummy): Stopped Clone Set: Connectivity [ping-1] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] diff --git a/pengine/test10/migrate-start-complex.summary b/pengine/test10/migrate-start-complex.summary index ed88398a33..9dd0924a9a 100644 --- a/pengine/test10/migrate-start-complex.summary +++ b/pengine/test10/migrate-start-complex.summary @@ -1,49 +1,49 @@ Current cluster status: Online: [ dom0-01 dom0-02 ] top (ocf::heartbeat:Dummy): Started dom0-02 domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-02 ] Stopped: [ dom0-01 ] Clone Set: clone-bottom [bottom] Stopped: [ dom0-01 dom0-02 ] Transition Summary: - * Move top (Started dom0-02 -> dom0-01) - * Migrate domU-test01 (Started dom0-02 -> dom0-01) + * Move top ( dom0-02 -> dom0-01 ) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Start dom0-iscsi1-cnx1:1 (dom0-01) * Start bottom:0 (dom0-01) * Start bottom:1 (dom0-02) Executing cluster transition: * Resource action: top stop on dom0-02 * Pseudo action: clone-dom0-iscsi1_start_0 * Resource action: bottom:0 monitor on dom0-01 * Resource action: bottom:1 monitor on dom0-02 * Pseudo action: clone-bottom_start_0 * Pseudo action: dom0-iscsi1:1_start_0 * Resource action: dom0-iscsi1-cnx1:1 start on dom0-01 * Resource action: bottom:0 start on dom0-01 * Resource action: bottom:1 start on dom0-02 * Pseudo action: clone-bottom_running_0 * Pseudo action: dom0-iscsi1:1_running_0 * Pseudo action: clone-dom0-iscsi1_running_0 * Resource action: domU-test01 migrate_to on dom0-02 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Resource action: top start on dom0-01 Revised cluster status: Online: [ dom0-01 dom0-02 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 dom0-02 ] diff --git a/pengine/test10/migrate-start.summary b/pengine/test10/migrate-start.summary index 8bbe3e5729..fb6fdef15f 100644 --- a/pengine/test10/migrate-start.summary +++ b/pengine/test10/migrate-start.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ dom0-01 dom0-02 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-02 ] Stopped: [ dom0-01 ] Transition Summary: - * Migrate domU-test01 (Started dom0-02 -> dom0-01) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Start dom0-iscsi1-cnx1:1 (dom0-01) Executing cluster transition: * Pseudo action: clone-dom0-iscsi1_start_0 * Pseudo action: dom0-iscsi1:1_start_0 * Resource action: dom0-iscsi1-cnx1:1 start on dom0-01 * Pseudo action: dom0-iscsi1:1_running_0 * Pseudo action: clone-dom0-iscsi1_running_0 * Resource action: domU-test01 migrate_to on dom0-02 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 Revised cluster status: Online: [ dom0-01 dom0-02 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] diff --git a/pengine/test10/migrate-stop-complex.summary b/pengine/test10/migrate-stop-complex.summary index 04dce1a4fb..4aa6cf797a 100644 --- a/pengine/test10/migrate-stop-complex.summary +++ b/pengine/test10/migrate-stop-complex.summary @@ -1,48 +1,48 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-02 domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 dom0-02 ] Transition Summary: - * Move top (Started dom0-02 -> dom0-01) - * Migrate domU-test01 (Started dom0-02 -> dom0-01) + * Move top ( dom0-02 -> dom0-01 ) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability * Stop bottom:1 (dom0-02) due to node availability Executing cluster transition: * Resource action: top stop on dom0-02 * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Pseudo action: clone-bottom_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Resource action: bottom:0 stop on dom0-02 * Pseudo action: clone-bottom_stopped_0 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 * Resource action: top start on dom0-01 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-stop-start-complex.summary b/pengine/test10/migrate-stop-start-complex.summary index 5408bb6117..f0f58439fe 100644 --- a/pengine/test10/migrate-stop-start-complex.summary +++ b/pengine/test10/migrate-stop-start-complex.summary @@ -1,49 +1,49 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-02 ] Stopped: [ dom0-01 ] Transition Summary: - * Migrate domU-test01 (Started dom0-02 -> dom0-01) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability - * Move bottom:0 (Started dom0-02 -> dom0-01) + * Move bottom:0 ( dom0-02 -> dom0-01 ) Executing cluster transition: * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 * Pseudo action: clone-bottom_stop_0 * Resource action: bottom:0 stop on dom0-02 * Pseudo action: clone-bottom_stopped_0 * Pseudo action: clone-bottom_start_0 * Pseudo action: all_stopped * Resource action: bottom:0 start on dom0-01 * Pseudo action: clone-bottom_running_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-stop.summary b/pengine/test10/migrate-stop.summary index a9d5771ff3..0a939dd5db 100644 --- a/pengine/test10/migrate-stop.summary +++ b/pengine/test10/migrate-stop.summary @@ -1,34 +1,34 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Transition Summary: - * Migrate domU-test01 (Started dom0-02 -> dom0-01) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability Executing cluster transition: * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-stop_start.summary b/pengine/test10/migrate-stop_start.summary index cadb24f261..de89651e3b 100644 --- a/pengine/test10/migrate-stop_start.summary +++ b/pengine/test10/migrate-stop_start.summary @@ -1,40 +1,40 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-02 ] Stopped: [ dom0-01 ] Transition Summary: - * Migrate domU-test01 (Started dom0-02 -> dom0-01) - * Move dom0-iscsi1-cnx1:0 (Started dom0-02 -> dom0-01) + * Migrate domU-test01 ( dom0-02 -> dom0-01 ) + * Move dom0-iscsi1-cnx1:0 ( dom0-02 -> dom0-01 ) Executing cluster transition: * Pseudo action: clone-dom0-iscsi1_stop_0 * Pseudo action: dom0-iscsi1:0_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: dom0-iscsi1:0_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 * Pseudo action: clone-dom0-iscsi1_start_0 * Pseudo action: dom0-iscsi1:0_start_0 * Resource action: dom0-iscsi1-cnx1:0 start on dom0-01 * Pseudo action: dom0-iscsi1:0_running_0 * Pseudo action: clone-dom0-iscsi1_running_0 * Resource action: domU-test01 migrate_to on dom0-02 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-versioned.summary b/pengine/test10/migrate-versioned.summary index 44fe25a5b1..d9e40bb1ce 100644 --- a/pengine/test10/migrate-versioned.summary +++ b/pengine/test10/migrate-versioned.summary @@ -1,19 +1,19 @@ Current cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move A (Started node1 -> node2) + * Move A ( node1 -> node2 ) Executing cluster transition: * Resource action: A stop on node1 * Resource action: A start on node2 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/mon-rsc-2.summary b/pengine/test10/mon-rsc-2.summary index 3f649e5d52..6c219b5055 100644 --- a/pengine/test10/mon-rsc-2.summary +++ b/pengine/test10/mon-rsc-2.summary @@ -1,23 +1,23 @@ Current cluster status: Node node2 (uuid2): standby Online: [ node1 ] rsc1 (heartbeat:apache): Started node2 Transition Summary: - * Move rsc1 (Started node2 -> node1) + * Move rsc1 ( node2 -> node1 ) Executing cluster transition: * Resource action: rsc1 monitor on node1 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=5000 on node1 Revised cluster status: Node node2 (uuid2): standby Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 diff --git a/pengine/test10/mon-rsc-4.summary b/pengine/test10/mon-rsc-4.summary index 92b0f68946..d4debf3829 100644 --- a/pengine/test10/mon-rsc-4.summary +++ b/pengine/test10/mon-rsc-4.summary @@ -1,23 +1,23 @@ Current cluster status: Node node2 (uuid2): standby Online: [ node1 ] rsc1 (heartbeat:apache): Starting node2 Transition Summary: - * Move rsc1 (Started node2 -> node1) + * Move rsc1 ( node2 -> node1 ) Executing cluster transition: * Resource action: rsc1 monitor on node1 * Resource action: rsc1 stop on node2 * Resource action: rsc1 start on node1 * Pseudo action: all_stopped * Resource action: rsc1 monitor=5000 on node1 Revised cluster status: Node node2 (uuid2): standby Online: [ node1 ] rsc1 (heartbeat:apache): Started [ node1 node2 ] diff --git a/pengine/test10/monitor-onfail-restart.summary b/pengine/test10/monitor-onfail-restart.summary index 34bc76aa64..b15c321e92 100644 --- a/pengine/test10/monitor-onfail-restart.summary +++ b/pengine/test10/monitor-onfail-restart.summary @@ -1,22 +1,22 @@ Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): FAILED fc16-builder Transition Summary: - * Recover A (Started fc16-builder) + * Recover A ( fc16-builder ) Executing cluster transition: * Resource action: A stop on fc16-builder * Pseudo action: all_stopped * Resource action: A start on fc16-builder * Resource action: A monitor=20000 on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder diff --git a/pengine/test10/multi1.summary b/pengine/test10/multi1.summary index c782e98efc..40222fc89c 100644 --- a/pengine/test10/multi1.summary +++ b/pengine/test10/multi1.summary @@ -1,20 +1,20 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started [ node1 node2 ] Transition Summary: - * Restart rsc1 (Started node1) + * Restart rsc1 ( node1 ) Executing cluster transition: * Resource action: rsc1 stop on node2 * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 diff --git a/pengine/test10/multiple-monitor-one-failed.summary b/pengine/test10/multiple-monitor-one-failed.summary index b10abbe784..0691ebf462 100644 --- a/pengine/test10/multiple-monitor-one-failed.summary +++ b/pengine/test10/multiple-monitor-one-failed.summary @@ -1,21 +1,21 @@ Current cluster status: Online: [ dhcp180 dhcp69 ] Dummy-test2 (ocf::test:Dummy): FAILED dhcp180 Transition Summary: - * Recover Dummy-test2 (Started dhcp180) + * Recover Dummy-test2 ( dhcp180 ) Executing cluster transition: * Resource action: Dummy-test2 stop on dhcp180 * Pseudo action: all_stopped * Resource action: Dummy-test2 start on dhcp180 * Resource action: Dummy-test2 monitor=30000 on dhcp180 * Resource action: Dummy-test2 monitor=10000 on dhcp180 Revised cluster status: Online: [ dhcp180 dhcp69 ] Dummy-test2 (ocf::test:Dummy): Started dhcp180 diff --git a/pengine/test10/not-installed-agent.summary b/pengine/test10/not-installed-agent.summary index 5d0b311ca3..b32cdd466b 100644 --- a/pengine/test10/not-installed-agent.summary +++ b/pengine/test10/not-installed-agent.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ sles11-1 sles11-2 ] st_sbd (stonith:external/sbd): Started sles11-1 rsc1 (ocf::pacemaker:Dummy): FAILED sles11-1 rsc2 (ocf::pacemaker:Dummy): FAILED sles11-1 Transition Summary: - * Recover rsc1 (Started sles11-1 -> sles11-2) - * Recover rsc2 (Started sles11-1 -> sles11-2) + * Recover rsc1 ( sles11-1 -> sles11-2 ) + * Recover rsc2 ( sles11-1 -> sles11-2 ) Executing cluster transition: * Resource action: rsc1 stop on sles11-1 * Resource action: rsc2 stop on sles11-1 * Pseudo action: all_stopped * Resource action: rsc1 start on sles11-2 * Resource action: rsc2 start on sles11-2 * Resource action: rsc1 monitor=10000 on sles11-2 * Resource action: rsc2 monitor=10000 on sles11-2 Revised cluster status: Online: [ sles11-1 sles11-2 ] st_sbd (stonith:external/sbd): Started sles11-1 rsc1 (ocf::pacemaker:Dummy): Started sles11-2 rsc2 (ocf::pacemaker:Dummy): Started sles11-2 diff --git a/pengine/test10/not-installed-tools.summary b/pengine/test10/not-installed-tools.summary index e429333eb7..01ae7f787b 100644 --- a/pengine/test10/not-installed-tools.summary +++ b/pengine/test10/not-installed-tools.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ sles11-1 sles11-2 ] st_sbd (stonith:external/sbd): Started sles11-1 rsc1 (ocf::pacemaker:Dummy): FAILED sles11-1 rsc2 (ocf::pacemaker:Dummy): Started sles11-1 (failure ignored) Transition Summary: - * Recover rsc1 (Started sles11-1 -> sles11-2) + * Recover rsc1 ( sles11-1 -> sles11-2 ) Executing cluster transition: * Resource action: rsc1 stop on sles11-1 * Pseudo action: all_stopped * Resource action: rsc1 start on sles11-2 * Resource action: rsc1 monitor=10000 on sles11-2 Revised cluster status: Online: [ sles11-1 sles11-2 ] st_sbd (stonith:external/sbd): Started sles11-1 rsc1 (ocf::pacemaker:Dummy): Started sles11-2 rsc2 (ocf::pacemaker:Dummy): Started sles11-1 (failure ignored) diff --git a/pengine/test10/not-reschedule-unneeded-monitor.summary b/pengine/test10/not-reschedule-unneeded-monitor.summary index 2a872d5598..429ea24cf1 100644 --- a/pengine/test10/not-reschedule-unneeded-monitor.summary +++ b/pengine/test10/not-reschedule-unneeded-monitor.summary @@ -1,37 +1,37 @@ 1 of 11 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ castor kimball ] sbd (stonith:external/sbd): Started kimball Clone Set: base-clone [dlm] Started: [ castor kimball ] Clone Set: c-vm-fs [vm1] Started: [ castor kimball ] xen-f (ocf::heartbeat:VirtualDomain): Stopped ( disabled ) sle12-kvm (ocf::heartbeat:VirtualDomain): FAILED castor Clone Set: cl_sgdisk [sgdisk] Started: [ castor kimball ] Transition Summary: - * Recover sle12-kvm (Started castor -> kimball) + * Recover sle12-kvm ( castor -> kimball ) Executing cluster transition: * Resource action: sle12-kvm stop on castor * Pseudo action: all_stopped * Resource action: sle12-kvm start on kimball * Resource action: sle12-kvm monitor=10000 on kimball Revised cluster status: Online: [ castor kimball ] sbd (stonith:external/sbd): Started kimball Clone Set: base-clone [dlm] Started: [ castor kimball ] Clone Set: c-vm-fs [vm1] Started: [ castor kimball ] xen-f (ocf::heartbeat:VirtualDomain): Stopped ( disabled ) sle12-kvm (ocf::heartbeat:VirtualDomain): Started kimball Clone Set: cl_sgdisk [sgdisk] Started: [ castor kimball ] diff --git a/pengine/test10/notify-3.summary b/pengine/test10/notify-3.summary index 03a3d4e3ce..6c3367e1ef 100644 --- a/pengine/test10/notify-3.summary +++ b/pengine/test10/notify-3.summary @@ -1,61 +1,61 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node2 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Stopped Transition Summary: - * Move child_rsc1:1 (Started node2 -> node1) + * Move child_rsc1:1 ( node2 -> node1 ) * Stop child_rsc2:0 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_pre_notify_stop_0 * Resource action: child_rsc2:0 monitor on node2 * Resource action: child_rsc2:1 monitor on node2 * Resource action: child_rsc2:1 monitor on node1 * Pseudo action: rsc2_pre_notify_stop_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_stop_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc2:0 notify on node1 * Pseudo action: rsc2_confirmed-pre_notify_stop_0 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc1:1 stop on node2 * Pseudo action: rsc1_stopped_0 * Resource action: child_rsc2:0 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_post_notify_stopped_0 * Pseudo action: rsc2_post_notify_stopped_0 * Resource action: child_rsc1:0 notify on node1 * Pseudo action: rsc1_confirmed-post_notify_stopped_0 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc2_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Resource action: child_rsc1:0 notify on node1 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:1 start on node1 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node1 * Pseudo action: rsc1_confirmed-post_notify_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Stopped child_rsc2:1 (heartbeat:apache): Stopped diff --git a/pengine/test10/novell-239082.summary b/pengine/test10/novell-239082.summary index b596de485b..2bafd1b380 100644 --- a/pengine/test10/novell-239082.summary +++ b/pengine/test10/novell-239082.summary @@ -1,59 +1,59 @@ Current cluster status: Online: [ xen-1 xen-2 ] fs_1 (ocf::heartbeat:Filesystem): Started xen-1 Master/Slave Set: ms-drbd0 [drbd0] Masters: [ xen-1 ] Slaves: [ xen-2 ] Transition Summary: * Shutdown xen-1 - * Move fs_1 (Started xen-1 -> xen-2) + * Move fs_1 ( xen-1 -> xen-2 ) * Promote drbd0:0 (Slave -> Master xen-2) - * Demote drbd0:1 (Master -> Stopped xen-1) + * Stop drbd0:1 ( Master xen-1 ) due to node availability Executing cluster transition: * Resource action: fs_1 stop on xen-1 * Pseudo action: ms-drbd0_pre_notify_demote_0 * Resource action: drbd0:0 notify on xen-2 * Resource action: drbd0:1 notify on xen-1 * Pseudo action: ms-drbd0_confirmed-pre_notify_demote_0 * Pseudo action: ms-drbd0_demote_0 * Resource action: drbd0:1 demote on xen-1 * Pseudo action: ms-drbd0_demoted_0 * Pseudo action: ms-drbd0_post_notify_demoted_0 * Resource action: drbd0:0 notify on xen-2 * Resource action: drbd0:1 notify on xen-1 * Pseudo action: ms-drbd0_confirmed-post_notify_demoted_0 * Pseudo action: ms-drbd0_pre_notify_stop_0 * Resource action: drbd0:0 notify on xen-2 * Resource action: drbd0:1 notify on xen-1 * Pseudo action: ms-drbd0_confirmed-pre_notify_stop_0 * Pseudo action: ms-drbd0_stop_0 * Resource action: drbd0:1 stop on xen-1 * Pseudo action: ms-drbd0_stopped_0 * Cluster action: do_shutdown on xen-1 * Pseudo action: ms-drbd0_post_notify_stopped_0 * Resource action: drbd0:0 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms-drbd0_pre_notify_promote_0 * Resource action: drbd0:0 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-pre_notify_promote_0 * Pseudo action: ms-drbd0_promote_0 * Resource action: drbd0:0 promote on xen-2 * Pseudo action: ms-drbd0_promoted_0 * Pseudo action: ms-drbd0_post_notify_promoted_0 * Resource action: drbd0:0 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-post_notify_promoted_0 * Resource action: fs_1 start on xen-2 Revised cluster status: Online: [ xen-1 xen-2 ] fs_1 (ocf::heartbeat:Filesystem): Started xen-2 Master/Slave Set: ms-drbd0 [drbd0] Masters: [ xen-2 ] Stopped: [ xen-1 ] diff --git a/pengine/test10/novell-252693-2.summary b/pengine/test10/novell-252693-2.summary index 2e8e4b5c37..392d26ef0e 100644 --- a/pengine/test10/novell-252693-2.summary +++ b/pengine/test10/novell-252693-2.summary @@ -1,98 +1,98 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] sles10 (ocf::heartbeat:Xen): Started node2 Transition Summary: * Start stonithclone:1 (node1) * Start evmsdclone:1 (node1) * Start evmsclone:1 (node1) * Start imagestoreclone:1 (node1) * Start configstoreclone:1 (node1) - * Migrate sles10 (Started node2 -> node1) + * Migrate sles10 ( node2 -> node1 ) Executing cluster transition: * Resource action: stonithclone:1 monitor on node1 * Pseudo action: stonithcloneset_start_0 * Resource action: evmsdclone:1 monitor on node1 * Pseudo action: evmsdcloneset_start_0 * Resource action: evmsclone:1 monitor on node1 * Pseudo action: evmscloneset_pre_notify_start_0 * Resource action: imagestoreclone:1 monitor on node1 * Pseudo action: imagestorecloneset_pre_notify_start_0 * Resource action: configstoreclone:1 monitor on node1 * Pseudo action: configstorecloneset_pre_notify_start_0 * Resource action: sles10 monitor on node1 * Resource action: stonithclone:1 start on node1 * Pseudo action: stonithcloneset_running_0 * Resource action: evmsdclone:1 start on node1 * Pseudo action: evmsdcloneset_running_0 * Resource action: evmsclone:0 notify on node2 * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 * Pseudo action: evmscloneset_start_0 * Resource action: imagestoreclone:0 notify on node2 * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 * Resource action: configstoreclone:0 notify on node2 * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 * Resource action: stonithclone:1 monitor=5000 on node1 * Resource action: evmsdclone:1 monitor=5000 on node1 * Resource action: evmsclone:1 start on node1 * Pseudo action: evmscloneset_running_0 * Pseudo action: evmscloneset_post_notify_running_0 * Resource action: evmsclone:0 notify on node2 * Resource action: evmsclone:1 notify on node1 * Pseudo action: evmscloneset_confirmed-post_notify_running_0 * Pseudo action: imagestorecloneset_start_0 * Pseudo action: configstorecloneset_start_0 * Resource action: imagestoreclone:1 start on node1 * Pseudo action: imagestorecloneset_running_0 * Resource action: configstoreclone:1 start on node1 * Pseudo action: configstorecloneset_running_0 * Pseudo action: imagestorecloneset_post_notify_running_0 * Pseudo action: configstorecloneset_post_notify_running_0 * Resource action: imagestoreclone:0 notify on node2 * Resource action: imagestoreclone:1 notify on node1 * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 * Resource action: configstoreclone:0 notify on node2 * Resource action: configstoreclone:1 notify on node1 * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 * Resource action: sles10 migrate_to on node2 * Resource action: imagestoreclone:1 monitor=20000 on node1 * Resource action: configstoreclone:1 monitor=20000 on node1 * Resource action: sles10 migrate_from on node1 * Resource action: sles10 stop on node2 * Pseudo action: all_stopped * Pseudo action: sles10_start_0 * Resource action: sles10 monitor=10000 on node1 Revised cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] sles10 (ocf::heartbeat:Xen): Started node1 diff --git a/pengine/test10/novell-252693-3.summary b/pengine/test10/novell-252693-3.summary index 741c9573f1..5c3b0caaa0 100644 --- a/pengine/test10/novell-252693-3.summary +++ b/pengine/test10/novell-252693-3.summary @@ -1,108 +1,108 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] imagestoreclone (ocf::heartbeat:Filesystem): FAILED node2 Stopped: [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] sles10 (ocf::heartbeat:Xen): Started node2 Transition Summary: * Start stonithclone:1 (node1) * Start evmsdclone:1 (node1) * Start evmsclone:1 (node1) - * Recover imagestoreclone:0 (Started node2 -> node1) + * Recover imagestoreclone:0 ( node2 -> node1 ) * Start imagestoreclone:1 (node2) * Start configstoreclone:1 (node1) - * Migrate sles10 (Started node2 -> node1) + * Migrate sles10 ( node2 -> node1 ) Executing cluster transition: * Resource action: stonithclone:1 monitor on node1 * Pseudo action: stonithcloneset_start_0 * Resource action: evmsdclone:1 monitor on node1 * Pseudo action: evmsdcloneset_start_0 * Resource action: evmsclone:1 monitor on node1 * Pseudo action: evmscloneset_pre_notify_start_0 * Resource action: imagestoreclone:0 monitor on node1 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Resource action: configstoreclone:1 monitor on node1 * Pseudo action: configstorecloneset_pre_notify_start_0 * Resource action: sles10 monitor on node1 * Resource action: stonithclone:1 start on node1 * Pseudo action: stonithcloneset_running_0 * Resource action: evmsdclone:1 start on node1 * Pseudo action: evmsdcloneset_running_0 * Resource action: evmsclone:0 notify on node2 * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 * Pseudo action: evmscloneset_start_0 * Resource action: imagestoreclone:0 notify on node2 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:0 notify on node2 * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 * Resource action: stonithclone:1 monitor=5000 on node1 * Resource action: evmsdclone:1 monitor=5000 on node1 * Resource action: evmsclone:1 start on node1 * Pseudo action: evmscloneset_running_0 * Resource action: imagestoreclone:0 stop on node2 * Pseudo action: imagestorecloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_running_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Resource action: evmsclone:0 notify on node2 * Resource action: evmsclone:1 notify on node1 * Pseudo action: evmscloneset_confirmed-post_notify_running_0 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: imagestorecloneset_pre_notify_start_0 * Pseudo action: configstorecloneset_start_0 * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 * Pseudo action: imagestorecloneset_start_0 * Resource action: configstoreclone:1 start on node1 * Pseudo action: configstorecloneset_running_0 * Resource action: imagestoreclone:0 start on node1 * Resource action: imagestoreclone:1 start on node2 * Pseudo action: imagestorecloneset_running_0 * Pseudo action: configstorecloneset_post_notify_running_0 * Pseudo action: imagestorecloneset_post_notify_running_0 * Resource action: configstoreclone:0 notify on node2 * Resource action: configstoreclone:1 notify on node1 * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 * Resource action: imagestoreclone:0 notify on node1 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 * Resource action: configstoreclone:1 monitor=20000 on node1 * Resource action: sles10 migrate_to on node2 * Resource action: imagestoreclone:0 monitor=20000 on node1 * Resource action: imagestoreclone:1 monitor=20000 on node2 * Resource action: sles10 migrate_from on node1 * Resource action: sles10 stop on node2 * Pseudo action: all_stopped * Pseudo action: sles10_start_0 * Resource action: sles10 monitor=10000 on node1 Revised cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] sles10 (ocf::heartbeat:Xen): Started node1 diff --git a/pengine/test10/novell-252693.summary b/pengine/test10/novell-252693.summary index 0682119d01..27cf3154e1 100644 --- a/pengine/test10/novell-252693.summary +++ b/pengine/test10/novell-252693.summary @@ -1,90 +1,90 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] sles10 (ocf::heartbeat:Xen): Started node1 Transition Summary: * Shutdown node1 * Stop stonithclone:1 (node1) due to node availability * Stop evmsdclone:1 (node1) due to node availability * Stop evmsclone:1 (node1) due to node availability * Stop imagestoreclone:1 (node1) due to node availability * Stop configstoreclone:1 (node1) due to node availability - * Migrate sles10 (Started node1 -> node2) + * Migrate sles10 ( node1 -> node2 ) Executing cluster transition: * Pseudo action: stonithcloneset_stop_0 * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Resource action: sles10 migrate_to on node1 * Resource action: stonithclone:1 stop on node1 * Pseudo action: stonithcloneset_stopped_0 * Resource action: evmsclone:0 notify on node2 * Resource action: evmsclone:1 notify on node1 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:0 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:0 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Resource action: sles10 migrate_from on node2 * Resource action: sles10 stop on node1 * Resource action: imagestoreclone:0 stop on node1 * Pseudo action: imagestorecloneset_stopped_0 * Resource action: configstoreclone:0 stop on node1 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: sles10_start_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: sles10 monitor=10000 on node2 * Resource action: imagestoreclone:0 notify on node2 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Resource action: configstoreclone:0 notify on node2 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_stop_0 * Resource action: evmsclone:1 stop on node1 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:0 notify on node2 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmsdcloneset_stop_0 * Resource action: evmsdclone:1 stop on node1 * Pseudo action: evmsdcloneset_stopped_0 * Cluster action: do_shutdown on node1 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] sles10 (ocf::heartbeat:Xen): Started node2 diff --git a/pengine/test10/one-or-more-1.summary b/pengine/test10/one-or-more-1.summary index af31a4656f..d2e79bc522 100644 --- a/pengine/test10/one-or-more-1.summary +++ b/pengine/test10/one-or-more-1.summary @@ -1,31 +1,31 @@ 1 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped ( disabled ) B (ocf::pacemaker:Dummy): Stopped C (ocf::pacemaker:Dummy): Stopped D (ocf::pacemaker:Dummy): Stopped Transition Summary: - * Start B (fc16-builder - blocked) due to unrunnable A start - * Start C (fc16-builder - blocked) due to unrunnable A start - * Start D (fc16-builder - blocked) due to unrunnable one-or-more:require-all-set-1 + * Start B ( fc16-builder ) due to unrunnable A start (blocked) + * Start C ( fc16-builder ) due to unrunnable A start (blocked) + * Start D ( fc16-builder ) due to unrunnable one-or-more:require-all-set-1 (blocked) Executing cluster transition: * Resource action: A monitor on fc16-builder * Resource action: B monitor on fc16-builder * Resource action: C monitor on fc16-builder * Resource action: D monitor on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped ( disabled ) B (ocf::pacemaker:Dummy): Stopped C (ocf::pacemaker:Dummy): Stopped D (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/one-or-more-3.summary b/pengine/test10/one-or-more-3.summary index abf4081c74..97511e83ab 100644 --- a/pengine/test10/one-or-more-3.summary +++ b/pengine/test10/one-or-more-3.summary @@ -1,31 +1,31 @@ 2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Stopped ( disabled ) C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped Transition Summary: * Start A (fc16-builder) - * Start D (fc16-builder - blocked) due to unrunnable one-or-more:require-all-set-1 + * Start D ( fc16-builder ) due to unrunnable one-or-more:require-all-set-1 (blocked) Executing cluster transition: * Resource action: A monitor on fc16-builder * Resource action: B monitor on fc16-builder * Resource action: C monitor on fc16-builder * Resource action: D monitor on fc16-builder * Resource action: A start on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Stopped ( disabled ) C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/order-mandatory.summary b/pengine/test10/order-mandatory.summary index 021d8fe1f7..955a2814b8 100644 --- a/pengine/test10/order-mandatory.summary +++ b/pengine/test10/order-mandatory.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) due to required rsc1 start - * Stop rsc4 (Started node1) due to unrunnable rsc3 start + * Restart rsc2 ( node1 ) due to required rsc1 start + * Stop rsc4 ( node1 ) due to unrunnable rsc3 start Executing cluster transition: * Resource action: rsc1 start on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Stopped diff --git a/pengine/test10/order-required.summary b/pengine/test10/order-required.summary index 021d8fe1f7..955a2814b8 100644 --- a/pengine/test10/order-required.summary +++ b/pengine/test10/order-required.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) due to required rsc1 start - * Stop rsc4 (Started node1) due to unrunnable rsc3 start + * Restart rsc2 ( node1 ) due to required rsc1 start + * Stop rsc4 ( node1 ) due to unrunnable rsc3 start Executing cluster transition: * Resource action: rsc1 start on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Stopped diff --git a/pengine/test10/order-serialize-set.summary b/pengine/test10/order-serialize-set.summary index 5483cca8ae..a9f97a3ad8 100644 --- a/pengine/test10/order-serialize-set.summary +++ b/pengine/test10/order-serialize-set.summary @@ -1,72 +1,72 @@ Current cluster status: Node xen-a (445a93d5-655e-430b-b45d-47d79a2f78c7): standby Online: [ xen-b ] xen-a-fencing (stonith:external/ipmi): Started xen-b xen-b-fencing (stonith:external/ipmi): Started xen-a db (ocf::heartbeat:Xen): Started xen-a dbreplica (ocf::heartbeat:Xen): Started xen-b core-101 (ocf::heartbeat:Xen): Started xen-a core-200 (ocf::heartbeat:Xen): Started xen-a sysadmin (ocf::heartbeat:Xen): Started xen-b edge (ocf::heartbeat:Xen): Started xen-a base (ocf::heartbeat:Xen): Started xen-a Email_Alerting (ocf::heartbeat:MailTo): Started xen-b Transition Summary: - * Restart xen-a-fencing (Started xen-b) + * Restart xen-a-fencing ( xen-b ) * Stop xen-b-fencing (xen-a) - * Migrate db (Started xen-a -> xen-b) - * Migrate core-101 (Started xen-a -> xen-b) - * Migrate core-200 (Started xen-a -> xen-b) - * Migrate edge (Started xen-a -> xen-b) - * Migrate base (Started xen-a -> xen-b) + * Migrate db ( xen-a -> xen-b ) + * Migrate core-101 ( xen-a -> xen-b ) + * Migrate core-200 ( xen-a -> xen-b ) + * Migrate edge ( xen-a -> xen-b ) + * Migrate base ( xen-a -> xen-b ) Executing cluster transition: * Resource action: xen-a-fencing stop on xen-b * Resource action: xen-a-fencing start on xen-b * Resource action: xen-a-fencing monitor=60000 on xen-b * Resource action: xen-b-fencing stop on xen-a * Resource action: db migrate_to on xen-a * Resource action: db migrate_from on xen-b * Resource action: db stop on xen-a * Resource action: core-101 migrate_to on xen-a * Pseudo action: db_start_0 * Resource action: core-101 migrate_from on xen-b * Resource action: core-101 stop on xen-a * Resource action: core-200 migrate_to on xen-a * Resource action: db monitor=10000 on xen-b * Pseudo action: core-101_start_0 * Resource action: core-200 migrate_from on xen-b * Resource action: core-200 stop on xen-a * Resource action: edge migrate_to on xen-a * Resource action: core-101 monitor=10000 on xen-b * Pseudo action: core-200_start_0 * Resource action: edge migrate_from on xen-b * Resource action: edge stop on xen-a * Resource action: base migrate_to on xen-a * Resource action: core-200 monitor=10000 on xen-b * Pseudo action: edge_start_0 * Resource action: base migrate_from on xen-b * Resource action: base stop on xen-a * Pseudo action: all_stopped * Resource action: edge monitor=10000 on xen-b * Pseudo action: base_start_0 * Resource action: base monitor=10000 on xen-b Revised cluster status: Node xen-a (445a93d5-655e-430b-b45d-47d79a2f78c7): standby Online: [ xen-b ] xen-a-fencing (stonith:external/ipmi): Started xen-b xen-b-fencing (stonith:external/ipmi): Stopped db (ocf::heartbeat:Xen): Started xen-b dbreplica (ocf::heartbeat:Xen): Started xen-b core-101 (ocf::heartbeat:Xen): Started xen-b core-200 (ocf::heartbeat:Xen): Started xen-b sysadmin (ocf::heartbeat:Xen): Started xen-b edge (ocf::heartbeat:Xen): Started xen-b base (ocf::heartbeat:Xen): Started xen-b Email_Alerting (ocf::heartbeat:MailTo): Started xen-b diff --git a/pengine/test10/order-serialize.summary b/pengine/test10/order-serialize.summary index b2114ebceb..f58a6bc804 100644 --- a/pengine/test10/order-serialize.summary +++ b/pengine/test10/order-serialize.summary @@ -1,72 +1,72 @@ Current cluster status: Node xen-a (445a93d5-655e-430b-b45d-47d79a2f78c7): standby Online: [ xen-b ] xen-a-fencing (stonith:external/ipmi): Started xen-b xen-b-fencing (stonith:external/ipmi): Started xen-a db (ocf::heartbeat:Xen): Started xen-a dbreplica (ocf::heartbeat:Xen): Started xen-b core-101 (ocf::heartbeat:Xen): Started xen-a core-200 (ocf::heartbeat:Xen): Started xen-a sysadmin (ocf::heartbeat:Xen): Started xen-b edge (ocf::heartbeat:Xen): Started xen-a base (ocf::heartbeat:Xen): Started xen-a Email_Alerting (ocf::heartbeat:MailTo): Started xen-b Transition Summary: - * Restart xen-a-fencing (Started xen-b) + * Restart xen-a-fencing ( xen-b ) * Stop xen-b-fencing (xen-a) - * Migrate db (Started xen-a -> xen-b) - * Migrate core-101 (Started xen-a -> xen-b) - * Migrate core-200 (Started xen-a -> xen-b) - * Migrate edge (Started xen-a -> xen-b) - * Migrate base (Started xen-a -> xen-b) + * Migrate db ( xen-a -> xen-b ) + * Migrate core-101 ( xen-a -> xen-b ) + * Migrate core-200 ( xen-a -> xen-b ) + * Migrate edge ( xen-a -> xen-b ) + * Migrate base ( xen-a -> xen-b ) Executing cluster transition: * Resource action: xen-a-fencing stop on xen-b * Resource action: xen-a-fencing start on xen-b * Resource action: xen-a-fencing monitor=60000 on xen-b * Resource action: xen-b-fencing stop on xen-a * Resource action: db migrate_to on xen-a * Resource action: core-101 migrate_to on xen-a * Resource action: edge migrate_to on xen-a * Resource action: db migrate_from on xen-b * Resource action: db stop on xen-a * Resource action: core-101 migrate_from on xen-b * Resource action: core-101 stop on xen-a * Resource action: core-200 migrate_to on xen-a * Resource action: edge migrate_from on xen-b * Resource action: edge stop on xen-a * Resource action: base migrate_to on xen-a * Pseudo action: db_start_0 * Pseudo action: core-101_start_0 * Resource action: core-200 migrate_from on xen-b * Resource action: core-200 stop on xen-a * Pseudo action: edge_start_0 * Resource action: base migrate_from on xen-b * Resource action: base stop on xen-a * Pseudo action: all_stopped * Resource action: db monitor=10000 on xen-b * Resource action: core-101 monitor=10000 on xen-b * Pseudo action: core-200_start_0 * Resource action: edge monitor=10000 on xen-b * Pseudo action: base_start_0 * Resource action: core-200 monitor=10000 on xen-b * Resource action: base monitor=10000 on xen-b Revised cluster status: Node xen-a (445a93d5-655e-430b-b45d-47d79a2f78c7): standby Online: [ xen-b ] xen-a-fencing (stonith:external/ipmi): Started xen-b xen-b-fencing (stonith:external/ipmi): Stopped db (ocf::heartbeat:Xen): Started xen-b dbreplica (ocf::heartbeat:Xen): Started xen-b core-101 (ocf::heartbeat:Xen): Started xen-b core-200 (ocf::heartbeat:Xen): Started xen-b sysadmin (ocf::heartbeat:Xen): Started xen-b edge (ocf::heartbeat:Xen): Started xen-b base (ocf::heartbeat:Xen): Started xen-b Email_Alerting (ocf::heartbeat:MailTo): Started xen-b diff --git a/pengine/test10/order-sets.summary b/pengine/test10/order-sets.summary index 0d5de5e9f3..e16e1f42c0 100644 --- a/pengine/test10/order-sets.summary +++ b/pengine/test10/order-sets.summary @@ -1,40 +1,40 @@ Current cluster status: Node ubuntu_2: standby Online: [ ubuntu_1 ] world1 (ocf::bbnd:world1test): Started ubuntu_2 world2 (ocf::bbnd:world2test): Started ubuntu_2 world3 (ocf::bbnd:world3test): Started ubuntu_2 world4 (ocf::bbnd:world4test): Started ubuntu_2 Transition Summary: - * Move world1 (Started ubuntu_2 -> ubuntu_1) - * Move world2 (Started ubuntu_2 -> ubuntu_1) - * Move world3 (Started ubuntu_2 -> ubuntu_1) - * Move world4 (Started ubuntu_2 -> ubuntu_1) + * Move world1 ( ubuntu_2 -> ubuntu_1 ) + * Move world2 ( ubuntu_2 -> ubuntu_1 ) + * Move world3 ( ubuntu_2 -> ubuntu_1 ) + * Move world4 ( ubuntu_2 -> ubuntu_1 ) Executing cluster transition: * Resource action: world4 stop on ubuntu_2 * Resource action: world3 stop on ubuntu_2 * Resource action: world2 stop on ubuntu_2 * Resource action: world1 stop on ubuntu_2 * Pseudo action: all_stopped * Resource action: world1 start on ubuntu_1 * Resource action: world2 start on ubuntu_1 * Resource action: world3 start on ubuntu_1 * Resource action: world4 start on ubuntu_1 * Resource action: world1 monitor=10000 on ubuntu_1 * Resource action: world2 monitor=10000 on ubuntu_1 * Resource action: world3 monitor=10000 on ubuntu_1 * Resource action: world4 monitor=10000 on ubuntu_1 Revised cluster status: Node ubuntu_2: standby Online: [ ubuntu_1 ] world1 (ocf::bbnd:world1test): Started ubuntu_1 world2 (ocf::bbnd:world2test): Started ubuntu_1 world3 (ocf::bbnd:world3test): Started ubuntu_1 world4 (ocf::bbnd:world4test): Started ubuntu_1 diff --git a/pengine/test10/order-wrong-kind.summary b/pengine/test10/order-wrong-kind.summary index c498cafd9b..df3bd80fac 100644 --- a/pengine/test10/order-wrong-kind.summary +++ b/pengine/test10/order-wrong-kind.summary @@ -1,28 +1,28 @@ Configuration validation is currently disabled. It is highly encouraged and prevents many common cluster issues. Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) due to required rsc1 start + * Restart rsc2 ( node1 ) due to required rsc1 start Executing cluster transition: * Resource action: rsc1 start on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/order3.summary b/pengine/test10/order3.summary index 4289a47e03..6694c0ee43 100644 --- a/pengine/test10/order3.summary +++ b/pengine/test10/order3.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 Transition Summary: - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) - * Move rsc3 (Started node1 -> node2) - * Move rsc4 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) + * Move rsc3 ( node1 -> node2 ) + * Move rsc4 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc4 monitor on node2 * Resource action: rsc3 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc1 stop on node1 * Resource action: rsc4 start on node2 * Pseudo action: all_stopped * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: rsc3 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node2 rsc4 (heartbeat:apache): Started node2 diff --git a/pengine/test10/order5.summary b/pengine/test10/order5.summary index 0c3e0bcc69..db222c47d9 100644 --- a/pengine/test10/order5.summary +++ b/pengine/test10/order5.summary @@ -1,50 +1,50 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 rsc5 (heartbeat:apache): Started node2 rsc6 (heartbeat:apache): Started node2 rsc7 (heartbeat:apache): Started node2 rsc8 (heartbeat:apache): Started node2 Transition Summary: - * Move rsc2 (Started node1 -> node2) - * Move rsc4 (Started node1 -> node2) - * Move rsc6 (Started node2 -> node1) - * Move rsc8 (Started node2 -> node1) + * Move rsc2 ( node1 -> node2 ) + * Move rsc4 ( node1 -> node2 ) + * Move rsc6 ( node2 -> node1 ) + * Move rsc8 ( node2 -> node1 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc4 monitor on node2 * Resource action: rsc5 monitor on node1 * Resource action: rsc6 monitor on node1 * Resource action: rsc7 monitor on node1 * Resource action: rsc8 monitor on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc6 stop on node2 * Resource action: rsc8 stop on node2 * Pseudo action: all_stopped * Resource action: rsc2 start on node2 * Resource action: rsc4 start on node2 * Resource action: rsc6 start on node1 * Resource action: rsc8 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node2 rsc5 (heartbeat:apache): Started node2 rsc6 (heartbeat:apache): Started node1 rsc7 (heartbeat:apache): Started node2 rsc8 (heartbeat:apache): Started node1 diff --git a/pengine/test10/order6.summary b/pengine/test10/order6.summary index 0c3e0bcc69..db222c47d9 100644 --- a/pengine/test10/order6.summary +++ b/pengine/test10/order6.summary @@ -1,50 +1,50 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 rsc5 (heartbeat:apache): Started node2 rsc6 (heartbeat:apache): Started node2 rsc7 (heartbeat:apache): Started node2 rsc8 (heartbeat:apache): Started node2 Transition Summary: - * Move rsc2 (Started node1 -> node2) - * Move rsc4 (Started node1 -> node2) - * Move rsc6 (Started node2 -> node1) - * Move rsc8 (Started node2 -> node1) + * Move rsc2 ( node1 -> node2 ) + * Move rsc4 ( node1 -> node2 ) + * Move rsc6 ( node2 -> node1 ) + * Move rsc8 ( node2 -> node1 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc4 monitor on node2 * Resource action: rsc5 monitor on node1 * Resource action: rsc6 monitor on node1 * Resource action: rsc7 monitor on node1 * Resource action: rsc8 monitor on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc6 stop on node2 * Resource action: rsc8 stop on node2 * Pseudo action: all_stopped * Resource action: rsc2 start on node2 * Resource action: rsc4 start on node2 * Resource action: rsc6 start on node1 * Resource action: rsc8 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node2 rsc5 (heartbeat:apache): Started node2 rsc6 (heartbeat:apache): Started node1 rsc7 (heartbeat:apache): Started node2 rsc8 (heartbeat:apache): Started node1 diff --git a/pengine/test10/order7.summary b/pengine/test10/order7.summary index 6648bff2ea..a8610e9a06 100644 --- a/pengine/test10/order7.summary +++ b/pengine/test10/order7.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Stopped rsc3 (heartbeat:apache): Stopped rscA (heartbeat:apache): FAILED node1 ( blocked ) rscB (heartbeat:apache): Stopped rscC (heartbeat:apache): Stopped Transition Summary: * Start rsc2 (node1) * Start rsc3 (node1) * Start rscB (node1) - * Start rscC (node1 - blocked) due to unrunnable rscA start + * Start rscC ( node1 ) due to unrunnable rscA start (blocked) Executing cluster transition: * Resource action: rsc2 monitor on node1 * Resource action: rsc3 monitor on node1 * Resource action: rscB monitor on node1 * Resource action: rscC monitor on node1 * Resource action: rsc2 start on node1 * Resource action: rsc3 start on node1 * Resource action: rscB start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rscA (heartbeat:apache): FAILED node1 ( blocked ) rscB (heartbeat:apache): Started node1 rscC (heartbeat:apache): Stopped diff --git a/pengine/test10/ordered-set-basic-startup.summary b/pengine/test10/ordered-set-basic-startup.summary index cfa8f8b3de..ef546ed37b 100644 --- a/pengine/test10/ordered-set-basic-startup.summary +++ b/pengine/test10/ordered-set-basic-startup.summary @@ -1,39 +1,39 @@ 2 of 6 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Stopped C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped ( disabled ) E (ocf::pacemaker:Dummy): Stopped F (ocf::pacemaker:Dummy): Stopped Transition Summary: - * Start A (fc16-builder - blocked) due to unrunnable C start + * Start A ( fc16-builder ) due to unrunnable C start (blocked) * Start B (fc16-builder) - * Start E (fc16-builder - blocked) due to unrunnable A start - * Start F (fc16-builder - blocked) due to unrunnable D start + * Start E ( fc16-builder ) due to unrunnable A start (blocked) + * Start F ( fc16-builder ) due to unrunnable D start (blocked) Executing cluster transition: * Resource action: A monitor on fc16-builder * Resource action: B monitor on fc16-builder * Resource action: C monitor on fc16-builder * Resource action: D monitor on fc16-builder * Resource action: E monitor on fc16-builder * Resource action: F monitor on fc16-builder * Resource action: B start on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped ( disabled ) E (ocf::pacemaker:Dummy): Stopped F (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ordered-set-natural.summary b/pengine/test10/ordered-set-natural.summary index 1888d66e31..e9667fdf21 100644 --- a/pengine/test10/ordered-set-natural.summary +++ b/pengine/test10/ordered-set-natural.summary @@ -1,52 +1,52 @@ 4 of 15 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ node1 node2 ] Resource Group: rgroup dummy1-1 (ocf::heartbeat:Dummy): Stopped dummy1-2 (ocf::heartbeat:Dummy): Stopped dummy1-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy1-4 (ocf::heartbeat:Dummy): Stopped dummy1-5 (ocf::heartbeat:Dummy): Stopped dummy2-1 (ocf::heartbeat:Dummy): Stopped dummy2-2 (ocf::heartbeat:Dummy): Stopped dummy2-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-1 (ocf::heartbeat:Dummy): Stopped dummy3-2 (ocf::heartbeat:Dummy): Stopped dummy3-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-4 (ocf::heartbeat:Dummy): Stopped dummy3-5 (ocf::heartbeat:Dummy): Stopped dummy2-4 (ocf::heartbeat:Dummy): Stopped dummy2-5 (ocf::heartbeat:Dummy): Stopped Transition Summary: - * Start dummy1-1 (node1 - blocked) due to no quorum - * Start dummy1-2 (node1 - blocked) due to no quorum - * Start dummy2-1 (node2 - blocked) due to no quorum - * Start dummy2-2 (node2 - blocked) due to no quorum - * Start dummy3-4 (node1 - blocked) due to no quorum - * Start dummy3-5 (node1 - blocked) due to no quorum + * Start dummy1-1 ( node1 ) due to no quorum (blocked) + * Start dummy1-2 ( node1 ) due to no quorum (blocked) + * Start dummy2-1 ( node2 ) due to no quorum (blocked) + * Start dummy2-2 ( node2 ) due to no quorum (blocked) + * Start dummy3-4 ( node1 ) due to no quorum (blocked) + * Start dummy3-5 ( node1 ) due to no quorum (blocked) Executing cluster transition: Revised cluster status: Online: [ node1 node2 ] Resource Group: rgroup dummy1-1 (ocf::heartbeat:Dummy): Stopped dummy1-2 (ocf::heartbeat:Dummy): Stopped dummy1-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy1-4 (ocf::heartbeat:Dummy): Stopped dummy1-5 (ocf::heartbeat:Dummy): Stopped dummy2-1 (ocf::heartbeat:Dummy): Stopped dummy2-2 (ocf::heartbeat:Dummy): Stopped dummy2-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-1 (ocf::heartbeat:Dummy): Stopped dummy3-2 (ocf::heartbeat:Dummy): Stopped dummy3-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-4 (ocf::heartbeat:Dummy): Stopped dummy3-5 (ocf::heartbeat:Dummy): Stopped dummy2-4 (ocf::heartbeat:Dummy): Stopped dummy2-5 (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/params-1.summary b/pengine/test10/params-1.summary index 357c2acf93..26fdecf673 100644 --- a/pengine/test10/params-1.summary +++ b/pengine/test10/params-1.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Transition Summary: - * Restart DcIPaddr (Started c001n02) + * Restart DcIPaddr ( c001n02 ) Executing cluster transition: * Resource action: DcIPaddr monitor on c001n08 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n02 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n08 monitor=5000 on c001n08 * Resource action: rsc_c001n02 monitor=6000 on c001n02 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n02 cancel=5000 on c001n02 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n02 * Resource action: DcIPaddr stop on c001n02 * Resource action: DcIPaddr start on c001n02 * Resource action: DcIPaddr monitor=5000 on c001n02 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 diff --git a/pengine/test10/params-2.summary b/pengine/test10/params-2.summary index f4169f2d80..1e73e77f79 100644 --- a/pengine/test10/params-2.summary +++ b/pengine/test10/params-2.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ node1 node2 node3 ] rsc1 (lsb:apache): Started node1 rsc2 (lsb:apache): Started node2 rsc3 (lsb:apache): Stopped Transition Summary: * Shutdown node1 * Stop rsc1 (node1) - * Restart rsc2 (Started node2) + * Restart rsc2 ( node2 ) * Start rsc3 (node3) Executing cluster transition: * Resource action: rsc1 monitor on node3 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node3 * Resource action: rsc2 monitor on node1 * Resource action: rsc2 stop on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc3 delete on node3 * Resource action: rsc1 stop on node1 * Resource action: rsc2 delete on node2 * Resource action: rsc3 start on node3 * Cluster action: do_shutdown on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node1 node2 node3 ] rsc1 (lsb:apache): Stopped rsc2 (lsb:apache): Started node2 rsc3 (lsb:apache): Started node3 diff --git a/pengine/test10/params-4.summary b/pengine/test10/params-4.summary index 67b04260cb..dfd3122fc4 100644 --- a/pengine/test10/params-4.summary +++ b/pengine/test10/params-4.summary @@ -1,44 +1,44 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Transition Summary: - * Reload DcIPaddr (Started c001n02) + * Reload DcIPaddr ( c001n02 ) Executing cluster transition: * Resource action: DcIPaddr monitor on c001n08 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: DcIPaddr reload on c001n02 * Resource action: DcIPaddr monitor=5000 on c001n02 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n02 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n08 monitor=5000 on c001n08 * Resource action: rsc_c001n02 monitor=6000 on c001n02 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n02 cancel=5000 on c001n02 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n02 Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 diff --git a/pengine/test10/params-5.summary b/pengine/test10/params-5.summary index 357c2acf93..26fdecf673 100644 --- a/pengine/test10/params-5.summary +++ b/pengine/test10/params-5.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Transition Summary: - * Restart DcIPaddr (Started c001n02) + * Restart DcIPaddr ( c001n02 ) Executing cluster transition: * Resource action: DcIPaddr monitor on c001n08 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n02 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n08 monitor=5000 on c001n08 * Resource action: rsc_c001n02 monitor=6000 on c001n02 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n02 cancel=5000 on c001n02 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n02 * Resource action: DcIPaddr stop on c001n02 * Resource action: DcIPaddr start on c001n02 * Resource action: DcIPaddr monitor=5000 on c001n02 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 diff --git a/pengine/test10/params-6.summary b/pengine/test10/params-6.summary index b7c9ff433b..20ba7453ed 100644 --- a/pengine/test10/params-6.summary +++ b/pengine/test10/params-6.summary @@ -1,376 +1,376 @@ 90 of 337 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ mgmt01 v03-a v03-b ] stonith-v02-a (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-b (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-d (stonith:fence_ipmilan): Stopped ( disabled ) stonith-mgmt01 (stonith:fence_xvm): Started v03-a stonith-mgmt02 (stonith:meatware): Started v03-a stonith-v03-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v03-a (stonith:fence_ipmilan): Started v03-b stonith-v03-b (stonith:fence_ipmilan): Started mgmt01 stonith-v03-d (stonith:fence_ipmilan): Stopped ( disabled ) Clone Set: cl-clvmd [clvmd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-dlm [dlm] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-iscsid [iscsid] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirtd [libvirtd] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-multipathd [multipathd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-node-params [node-params] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan1-if [vlan1-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan101-if [vlan101-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan102-if [vlan102-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan103-if [vlan103-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan104-if [vlan104-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan3-if [vlan3-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan4-if [vlan4-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan5-if [vlan5-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan900-if [vlan900-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan909-if [vlan909-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-libvirt-images-fs [libvirt-images-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-install-fs [libvirt-install-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-images-pool [libvirt-images-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Clone Set: cl-libvirt-qpid [libvirt-qpid] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Clone Set: cl-vlan200-if [vlan200-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] anbriz-gw-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) anbriz-work-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a vptest1.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest2.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest3.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest4.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest5.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest6.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest7.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest8.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest9.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest10.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest11.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest12.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest13.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest14.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest15.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest16.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest17.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest18.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest19.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest20.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest21.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest22.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest23.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest24.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest25.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest26.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest27.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest28.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest29.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest30.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest31.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest32.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest33.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest34.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest35.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest36.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest37.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest38.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest39.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest40.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest41.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest42.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest43.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest44.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest45.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest46.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest47.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest48.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest49.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest50.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest51.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest52.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest53.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest54.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest55.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest56.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest57.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest58.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest59.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest60.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) sl6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-test-net [mcast-test-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Transition Summary: - * Reload c5-x64-devel.vds-ok.com-vm (Started v03-a) + * Reload c5-x64-devel.vds-ok.com-vm ( v03-a ) Executing cluster transition: * Resource action: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-b * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-b * Resource action: c5-x64-devel.vds-ok.com-vm reload on v03-a * Resource action: c5-x64-devel.vds-ok.com-vm monitor=10000 on v03-a * Pseudo action: load_stopped_mgmt01 * Pseudo action: load_stopped_v03-b * Pseudo action: load_stopped_v03-a Revised cluster status: Online: [ mgmt01 v03-a v03-b ] stonith-v02-a (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-b (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v02-d (stonith:fence_ipmilan): Stopped ( disabled ) stonith-mgmt01 (stonith:fence_xvm): Started v03-a stonith-mgmt02 (stonith:meatware): Started v03-a stonith-v03-c (stonith:fence_ipmilan): Stopped ( disabled ) stonith-v03-a (stonith:fence_ipmilan): Started v03-b stonith-v03-b (stonith:fence_ipmilan): Started mgmt01 stonith-v03-d (stonith:fence_ipmilan): Stopped ( disabled ) Clone Set: cl-clvmd [clvmd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-dlm [dlm] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-iscsid [iscsid] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirtd [libvirtd] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-multipathd [multipathd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-node-params [node-params] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan1-if [vlan1-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan101-if [vlan101-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan102-if [vlan102-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan103-if [vlan103-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan104-if [vlan104-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan3-if [vlan3-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan4-if [vlan4-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan5-if [vlan5-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan900-if [vlan900-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan909-if [vlan909-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-libvirt-images-fs [libvirt-images-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-install-fs [libvirt-install-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-images-pool [libvirt-images-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Clone Set: cl-libvirt-qpid [libvirt-qpid] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) Clone Set: cl-vlan200-if [vlan200-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] anbriz-gw-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) anbriz-work-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a vptest1.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest2.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest3.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest4.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest5.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest6.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest7.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest8.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest9.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest10.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest11.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest12.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest13.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest14.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest15.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest16.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest17.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest18.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest19.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest20.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest21.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest22.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest23.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest24.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest25.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest26.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest27.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest28.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest29.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest30.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest31.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest32.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest33.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest34.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest35.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest36.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest37.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest38.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest39.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest40.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest41.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest42.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest43.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest44.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest45.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest46.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest47.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest48.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest49.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest50.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest51.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest52.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest53.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest54.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest55.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest56.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest57.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest58.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest59.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) vptest60.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) sl6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-test-net [mcast-test-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped ( disabled ) diff --git a/pengine/test10/per-op-failcount.summary b/pengine/test10/per-op-failcount.summary index 6decb36a12..110d6b0d41 100644 --- a/pengine/test10/per-op-failcount.summary +++ b/pengine/test10/per-op-failcount.summary @@ -1,34 +1,34 @@ Using the original execution date of: 2017-04-06 09:04:22Z Current cluster status: Node rh73-01-snmp (3232238265): UNCLEAN (online) Online: [ rh73-02-snmp ] prmDummy (ocf::pacemaker:Dummy): FAILED rh73-01-snmp prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp prmStonith2-1 (stonith:external/ssh): Started rh73-01-snmp Transition Summary: * Fence (reboot) rh73-01-snmp 'prmDummy failed there' - * Recover prmDummy (Started rh73-01-snmp -> rh73-02-snmp) - * Move prmStonith2-1 (Started rh73-01-snmp -> rh73-02-snmp) + * Recover prmDummy ( rh73-01-snmp -> rh73-02-snmp ) + * Move prmStonith2-1 ( rh73-01-snmp -> rh73-02-snmp ) Executing cluster transition: * Pseudo action: prmStonith2-1_stop_0 * Fencing rh73-01-snmp (reboot) * Pseudo action: prmDummy_stop_0 * Resource action: prmStonith2-1 start on rh73-02-snmp * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: prmDummy start on rh73-02-snmp * Resource action: prmDummy monitor=10000 on rh73-02-snmp Using the original execution date of: 2017-04-06 09:04:22Z Revised cluster status: Online: [ rh73-02-snmp ] OFFLINE: [ rh73-01-snmp ] prmDummy (ocf::pacemaker:Dummy): Started rh73-02-snmp prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp prmStonith2-1 (stonith:external/ssh): Started rh73-02-snmp diff --git a/pengine/test10/probe-2.summary b/pengine/test10/probe-2.summary index 05588c845c..7e74efcea1 100644 --- a/pengine/test10/probe-2.summary +++ b/pengine/test10/probe-2.summary @@ -1,162 +1,162 @@ Current cluster status: Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby Online: [ wc01 ] Resource Group: group_www_data fs_www_data (ocf::heartbeat:Filesystem): Started wc01 nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 intip_nfs (ocf::heartbeat:IPaddr2): Started wc01 Master/Slave Set: ms_drbd_mysql [drbd_mysql] Masters: [ wc02 ] Slaves: [ wc01 ] Resource Group: group_mysql fs_mysql (ocf::heartbeat:Filesystem): Started wc02 intip_sql (ocf::heartbeat:IPaddr2): Started wc02 mysql-server (ocf::heartbeat:mysql): Started wc02 Master/Slave Set: ms_drbd_www [drbd_www] Masters: [ wc01 ] Slaves: [ wc02 ] Clone Set: clone_nfs-common [group_nfs-common] Started: [ wc01 wc02 ] Clone Set: clone_mysql-proxy [group_mysql-proxy] Started: [ wc01 wc02 ] Clone Set: clone_webservice [group_webservice] Started: [ wc01 wc02 ] Resource Group: group_ftpd extip_ftp (ocf::heartbeat:IPaddr2): Started wc01 pure-ftpd (ocf::heartbeat:Pure-FTPd): Started wc01 Clone Set: DoFencing [stonith_rackpdu] (unique) stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 stonith_rackpdu:1 (stonith:external/rackpdu): Started wc02 Transition Summary: * Promote drbd_mysql:0 (Slave -> Master wc01) - * Demote drbd_mysql:1 (Master -> Stopped wc02) - * Move fs_mysql (Started wc02 -> wc01) - * Move intip_sql (Started wc02 -> wc01) - * Move mysql-server (Started wc02 -> wc01) - * Stop drbd_www:1 (wc02) due to node availability + * Stop drbd_mysql:1 ( Master wc02 ) due to node availability + * Move fs_mysql ( wc02 -> wc01 ) + * Move intip_sql ( wc02 -> wc01 ) + * Move mysql-server ( wc02 -> wc01 ) + * Stop drbd_www:1 ( Slave wc02 ) due to node availability * Stop nfs-common:1 (wc02) due to node availability * Stop mysql-proxy:1 (wc02) due to node availability * Stop fs_www:1 (wc02) due to node availability * Stop apache2:1 (wc02) due to node availability - * Restart stonith_rackpdu:0 (Started wc01) + * Restart stonith_rackpdu:0 ( wc01 ) * Stop stonith_rackpdu:1 (wc02) due to node availability Executing cluster transition: * Resource action: drbd_mysql:0 cancel=10000 on wc01 * Pseudo action: ms_drbd_mysql_pre_notify_demote_0 * Pseudo action: group_mysql_stop_0 * Resource action: mysql-server stop on wc02 * Pseudo action: ms_drbd_www_pre_notify_stop_0 * Pseudo action: clone_mysql-proxy_stop_0 * Pseudo action: clone_webservice_stop_0 * Pseudo action: DoFencing_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_demote_0 * Resource action: intip_sql stop on wc02 * Resource action: drbd_www:0 notify on wc01 * Resource action: drbd_www:1 notify on wc02 * Pseudo action: ms_drbd_www_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_www_stop_0 * Pseudo action: group_mysql-proxy:1_stop_0 * Resource action: mysql-proxy:1 stop on wc02 * Pseudo action: group_webservice:1_stop_0 * Resource action: apache2:1 stop on wc02 * Resource action: stonith_rackpdu:0 stop on wc01 * Resource action: stonith_rackpdu:1 stop on wc02 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Resource action: fs_mysql stop on wc02 * Resource action: drbd_www:1 stop on wc02 * Pseudo action: ms_drbd_www_stopped_0 * Pseudo action: group_mysql-proxy:1_stopped_0 * Pseudo action: clone_mysql-proxy_stopped_0 * Resource action: fs_www:1 stop on wc02 * Resource action: stonith_rackpdu:0 start on wc01 * Pseudo action: DoFencing_running_0 * Pseudo action: group_mysql_stopped_0 * Pseudo action: ms_drbd_www_post_notify_stopped_0 * Pseudo action: group_webservice:1_stopped_0 * Pseudo action: clone_webservice_stopped_0 * Resource action: stonith_rackpdu:0 monitor=5000 on wc01 * Pseudo action: ms_drbd_mysql_demote_0 * Resource action: drbd_www:0 notify on wc01 * Pseudo action: ms_drbd_www_confirmed-post_notify_stopped_0 * Pseudo action: clone_nfs-common_stop_0 * Resource action: drbd_mysql:1 demote on wc02 * Pseudo action: ms_drbd_mysql_demoted_0 * Pseudo action: group_nfs-common:1_stop_0 * Resource action: nfs-common:1 stop on wc02 * Pseudo action: ms_drbd_mysql_post_notify_demoted_0 * Pseudo action: group_nfs-common:1_stopped_0 * Pseudo action: clone_nfs-common_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_mysql_pre_notify_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_mysql_stop_0 * Resource action: drbd_mysql:1 stop on wc02 * Pseudo action: ms_drbd_mysql_stopped_0 * Pseudo action: ms_drbd_mysql_post_notify_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_mysql_pre_notify_promote_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_mysql_promote_0 * Resource action: drbd_mysql:0 promote on wc01 * Pseudo action: ms_drbd_mysql_promoted_0 * Pseudo action: ms_drbd_mysql_post_notify_promoted_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_promoted_0 * Pseudo action: group_mysql_start_0 * Resource action: fs_mysql start on wc01 * Resource action: intip_sql start on wc01 * Resource action: mysql-server start on wc01 * Resource action: drbd_mysql:0 monitor=5000 on wc01 * Pseudo action: group_mysql_running_0 * Resource action: fs_mysql monitor=30000 on wc01 * Resource action: intip_sql monitor=30000 on wc01 * Resource action: mysql-server monitor=30000 on wc01 Revised cluster status: Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby Online: [ wc01 ] Resource Group: group_www_data fs_www_data (ocf::heartbeat:Filesystem): Started wc01 nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 intip_nfs (ocf::heartbeat:IPaddr2): Started wc01 Master/Slave Set: ms_drbd_mysql [drbd_mysql] Masters: [ wc01 ] Stopped: [ wc02 ] Resource Group: group_mysql fs_mysql (ocf::heartbeat:Filesystem): Started wc01 intip_sql (ocf::heartbeat:IPaddr2): Started wc01 mysql-server (ocf::heartbeat:mysql): Started wc01 Master/Slave Set: ms_drbd_www [drbd_www] Masters: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_nfs-common [group_nfs-common] Started: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_mysql-proxy [group_mysql-proxy] Started: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_webservice [group_webservice] Started: [ wc01 ] Stopped: [ wc02 ] Resource Group: group_ftpd extip_ftp (ocf::heartbeat:IPaddr2): Started wc01 pure-ftpd (ocf::heartbeat:Pure-FTPd): Started wc01 Clone Set: DoFencing [stonith_rackpdu] (unique) stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 stonith_rackpdu:1 (stonith:external/rackpdu): Stopped diff --git a/pengine/test10/probe-4.summary b/pengine/test10/probe-4.summary index 00ecbc2d8b..691c4b64cf 100644 --- a/pengine/test10/probe-4.summary +++ b/pengine/test10/probe-4.summary @@ -1,56 +1,56 @@ Current cluster status: Node pcmk-4: pending Online: [ pcmk-1 pcmk-2 pcmk-3 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.182 (heartbeat:IPaddr): Started pcmk-1 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-2 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 migrator (ocf::pacemaker:Dummy): Stopped Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Transition Summary: - * Start migrator (pcmk-3 - blocked) + * Start migrator ( pcmk-3 ) blocked Executing cluster transition: Revised cluster status: Node pcmk-4: pending Online: [ pcmk-1 pcmk-2 pcmk-3 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.182 (heartbeat:IPaddr): Started pcmk-1 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-2 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 migrator (ocf::pacemaker:Dummy): Stopped Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] diff --git a/pengine/test10/quorum-1.summary b/pengine/test10/quorum-1.summary index 83428d4edb..fd01941060 100644 --- a/pengine/test10/quorum-1.summary +++ b/pengine/test10/quorum-1.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped Transition Summary: - * Move rsc2 (Started node1 -> node2) + * Move rsc2 ( node1 -> node2 ) * Start rsc3 (node1) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc3 start on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node1 diff --git a/pengine/test10/quorum-2.summary b/pengine/test10/quorum-2.summary index 1dce96cebf..f603bfa003 100644 --- a/pengine/test10/quorum-2.summary +++ b/pengine/test10/quorum-2.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped Transition Summary: - * Move rsc2 (Started node1 -> node2) - * Start rsc3 (node1 - blocked) due to quorum freeze + * Move rsc2 ( node1 -> node2 ) + * Start rsc3 ( node1 ) due to quorum freeze (blocked) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/quorum-3.summary b/pengine/test10/quorum-3.summary index e8e4bf3662..cda0278347 100644 --- a/pengine/test10/quorum-3.summary +++ b/pengine/test10/quorum-3.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped Transition Summary: - * Stop rsc1 (Started node1) due to no quorum - * Stop rsc2 (Started node1) due to no quorum - * Start rsc3 (node1 - blocked) due to no quorum + * Stop rsc1 ( node1 ) due to no quorum + * Stop rsc2 ( node1 ) due to no quorum + * Start rsc3 ( node1 ) due to no quorum (blocked) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/rec-node-10.summary b/pengine/test10/rec-node-10.summary index 503dd0d13e..296a01867b 100644 --- a/pengine/test10/rec-node-10.summary +++ b/pengine/test10/rec-node-10.summary @@ -1,27 +1,27 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) Transition Summary: - * Start stonith-1 (node2 - blocked) due to no quorum - * Stop rsc1 (Started node1 - blocked) due to no quorum - * Stop rsc2 (Started node1 - blocked) due to no quorum + * Start stonith-1 ( node2 ) due to no quorum (blocked) + * Stop rsc1 ( node1 ) due to no quorum (blocked) + * Stop rsc2 ( node1 ) due to no quorum (blocked) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 Revised cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) diff --git a/pengine/test10/rec-node-11.summary b/pengine/test10/rec-node-11.summary index e884ffa4c2..a354e49361 100644 --- a/pengine/test10/rec-node-11.summary +++ b/pengine/test10/rec-node-11.summary @@ -1,47 +1,47 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (online) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped Resource Group: group1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node2 Transition Summary: * Fence (reboot) node1 'peer process is no longer available' * Start stonith-1 (node2) - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) - * Restart rsc3 (Started node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) + * Restart rsc3 ( node2 ) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: group1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: group1_stopped_0 * Resource action: rsc3 stop on node2 * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc3 start on node2 * Pseudo action: group1_start_0 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Pseudo action: group1_running_0 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 Resource Group: group1 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-13.summary b/pengine/test10/rec-node-13.summary index de2fa28a26..8bab0dbcaf 100644 --- a/pengine/test10/rec-node-13.summary +++ b/pengine/test10/rec-node-13.summary @@ -1,80 +1,80 @@ Current cluster status: Node c001n04 (9e080e6d-7a25-4dac-be89-f6f4f128623d): UNCLEAN (online) Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 Transition Summary: * Fence (reboot) c001n04 'ocf_msdummy:6 failed there' - * Stop ocf_msdummy:6 (c001n04) due to node availability + * Stop ocf_msdummy:6 ( Slave c001n04 ) due to node availability Executing cluster transition: * Fencing c001n04 (reboot) * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Pseudo action: ocf_msdummy:6_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n04 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 diff --git a/pengine/test10/rec-node-15.summary b/pengine/test10/rec-node-15.summary index 760942c16f..07355c7484 100644 --- a/pengine/test10/rec-node-15.summary +++ b/pengine/test10/rec-node-15.summary @@ -1,88 +1,88 @@ Current cluster status: Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby Node sapcl03 (0bfb78a2-fcd2-4f52-8a06-2d17437a6750): UNCLEAN (offline) Online: [ sapcl01 ] stonith-1 (stonith:dummy): Stopped Resource Group: app01 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_2 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: app02 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl02 LVM_12 (ocf::heartbeat:LVM): Started sapcl02 Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl02 Resource Group: oracle IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Stopped LVM_22 (ocf::heartbeat:LVM): Stopped Filesystem_23 (ocf::heartbeat:Filesystem): Stopped oracle_24 (ocf::heartbeat:oracle): Stopped oralsnr_25 (ocf::heartbeat:oralsnr): Stopped Transition Summary: * Fence (reboot) sapcl03 'peer is no longer part of the cluster' * Start stonith-1 (sapcl01) - * Move IPaddr_192_168_1_102 (Started sapcl02 -> sapcl01) - * Move LVM_12 (Started sapcl02 -> sapcl01) - * Move Filesystem_13 (Started sapcl02 -> sapcl01) + * Move IPaddr_192_168_1_102 ( sapcl02 -> sapcl01 ) + * Move LVM_12 ( sapcl02 -> sapcl01 ) + * Move Filesystem_13 ( sapcl02 -> sapcl01 ) * Start IPaddr_192_168_1_104 (sapcl01) * Start LVM_22 (sapcl01) * Start Filesystem_23 (sapcl01) * Start oracle_24 (sapcl01) * Start oralsnr_25 (sapcl01) Executing cluster transition: * Resource action: stonith-1 monitor on sapcl02 * Resource action: stonith-1 monitor on sapcl01 * Pseudo action: app02_stop_0 * Resource action: Filesystem_13 stop on sapcl02 * Pseudo action: oracle_start_0 * Fencing sapcl03 (reboot) * Resource action: LVM_12 stop on sapcl02 * Pseudo action: stonith_complete * Resource action: IPaddr_192_168_1_102 stop on sapcl02 * Resource action: IPaddr_192_168_1_104 start on sapcl01 * Resource action: LVM_22 start on sapcl01 * Resource action: Filesystem_23 start on sapcl01 * Resource action: oracle_24 start on sapcl01 * Resource action: oralsnr_25 start on sapcl01 * Pseudo action: all_stopped * Resource action: stonith-1 start on sapcl01 * Pseudo action: app02_stopped_0 * Pseudo action: app02_start_0 * Resource action: IPaddr_192_168_1_102 start on sapcl01 * Resource action: LVM_12 start on sapcl01 * Resource action: Filesystem_13 start on sapcl01 * Pseudo action: oracle_running_0 * Resource action: IPaddr_192_168_1_104 monitor=5000 on sapcl01 * Resource action: LVM_22 monitor=120000 on sapcl01 * Resource action: Filesystem_23 monitor=120000 on sapcl01 * Resource action: oracle_24 monitor=120000 on sapcl01 * Resource action: oralsnr_25 monitor=120000 on sapcl01 * Pseudo action: app02_running_0 * Resource action: IPaddr_192_168_1_102 monitor=5000 on sapcl01 * Resource action: LVM_12 monitor=120000 on sapcl01 * Resource action: Filesystem_13 monitor=120000 on sapcl01 Revised cluster status: Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby Online: [ sapcl01 ] OFFLINE: [ sapcl03 ] stonith-1 (stonith:dummy): Started sapcl01 Resource Group: app01 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_2 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: app02 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_12 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: oracle IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_22 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_23 (ocf::heartbeat:Filesystem): Started sapcl01 oracle_24 (ocf::heartbeat:oracle): Started sapcl01 oralsnr_25 (ocf::heartbeat:oralsnr): Started sapcl01 diff --git a/pengine/test10/rec-node-4.summary b/pengine/test10/rec-node-4.summary index 4a39615d83..30c9667a45 100644 --- a/pengine/test10/rec-node-4.summary +++ b/pengine/test10/rec-node-4.summary @@ -1,36 +1,36 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) Transition Summary: * Fence (reboot) node1 'peer is no longer part of the cluster' * Start stonith-1 (node2) - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-6.summary b/pengine/test10/rec-node-6.summary index 6cb8d0194d..9a453604c8 100644 --- a/pengine/test10/rec-node-6.summary +++ b/pengine/test10/rec-node-6.summary @@ -1,36 +1,36 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (online) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 Transition Summary: * Fence (reboot) node1 'peer process is no longer available' * Start stonith-1 (node2) - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-7.summary b/pengine/test10/rec-node-7.summary index 4a39615d83..30c9667a45 100644 --- a/pengine/test10/rec-node-7.summary +++ b/pengine/test10/rec-node-7.summary @@ -1,36 +1,36 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) Transition Summary: * Fence (reboot) node1 'peer is no longer part of the cluster' * Start stonith-1 (node2) - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Fencing node1 (reboot) * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on node2 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] stonith-1 (stonith:dummy): Started node2 rsc1 (heartbeat:apache): Started node2 rsc2 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-node-8.summary b/pengine/test10/rec-node-8.summary index 1a7232e605..bc23fb4d91 100644 --- a/pengine/test10/rec-node-8.summary +++ b/pengine/test10/rec-node-8.summary @@ -1,31 +1,31 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) rsc3 (heartbeat:apache): Stopped Transition Summary: - * Start stonith-1 (node2 - blocked) due to quorum freeze - * Stop rsc1 (Started node1 - blocked) - * Stop rsc2 (Started node1 - blocked) - * Start rsc3 (node2 - blocked) due to quorum freeze + * Start stonith-1 ( node2 ) due to quorum freeze (blocked) + * Stop rsc1 ( node1 ) blocked + * Stop rsc2 ( node1 ) blocked + * Start rsc3 ( node2 ) due to quorum freeze (blocked) Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 Revised cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/rec-node-9.summary b/pengine/test10/rec-node-9.summary index 7f6d8aea7e..545d16c784 100644 --- a/pengine/test10/rec-node-9.summary +++ b/pengine/test10/rec-node-9.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped Transition Summary: - * Start rsc1 (node2 - blocked) due to no quorum - * Start rsc2 (node2 - blocked) due to no quorum + * Start rsc1 ( node2 ) due to no quorum (blocked) + * Start rsc2 ( node2 ) due to no quorum (blocked) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped diff --git a/pengine/test10/rec-rsc-1.summary b/pengine/test10/rec-rsc-1.summary index 13e8f6088e..c8f27d660d 100644 --- a/pengine/test10/rec-rsc-1.summary +++ b/pengine/test10/rec-rsc-1.summary @@ -1,20 +1,20 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): FAILED node1 Transition Summary: - * Recover rsc1 (Started node1 -> node2) + * Recover rsc1 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node2 diff --git a/pengine/test10/rec-rsc-2.summary b/pengine/test10/rec-rsc-2.summary index b639599d21..8dd3a5a989 100644 --- a/pengine/test10/rec-rsc-2.summary +++ b/pengine/test10/rec-rsc-2.summary @@ -1,21 +1,21 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): FAILED node1 Transition Summary: - * Recover rsc1 (Started node1) + * Recover rsc1 ( node1 ) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc1 stop on node1 * Resource action: rsc1 cancel=1 on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 diff --git a/pengine/test10/rec-rsc-5.summary b/pengine/test10/rec-rsc-5.summary index 28e29b56b7..4717750caf 100644 --- a/pengine/test10/rec-rsc-5.summary +++ b/pengine/test10/rec-rsc-5.summary @@ -1,36 +1,36 @@ Current cluster status: Node node2 (uuid2): UNCLEAN (online) Online: [ node1 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): FAILED node2 rsc2 (heartbeat:apache): Started node2 Transition Summary: * Fence (reboot) node2 'rsc1 failed there' * Start stonith-1 (node1) - * Recover rsc1 (Started node2 -> node1) - * Move rsc2 (Started node2 -> node1) + * Recover rsc1 ( node2 -> node1 ) + * Move rsc2 ( node2 -> node1 ) Executing cluster transition: * Resource action: stonith-1 monitor on node1 * Resource action: rsc1 monitor on node1 * Resource action: rsc2 monitor on node1 * Fencing node2 (reboot) * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: stonith-1 start on node1 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] stonith-1 (stonith:dummy): Started node1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 diff --git a/pengine/test10/rec-rsc-6.summary b/pengine/test10/rec-rsc-6.summary index c782e98efc..40222fc89c 100644 --- a/pengine/test10/rec-rsc-6.summary +++ b/pengine/test10/rec-rsc-6.summary @@ -1,20 +1,20 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started [ node1 node2 ] Transition Summary: - * Restart rsc1 (Started node1) + * Restart rsc1 ( node1 ) Executing cluster transition: * Resource action: rsc1 stop on node2 * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 diff --git a/pengine/test10/rec-rsc-9.summary b/pengine/test10/rec-rsc-9.summary index 02a2fe5f43..f7beb147a5 100644 --- a/pengine/test10/rec-rsc-9.summary +++ b/pengine/test10/rec-rsc-9.summary @@ -1,41 +1,41 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 Resource Group: foo rsc2 (heartbeat:apache): Started node1 Resource Group: bar rsc3 (heartbeat:apache): FAILED node1 Transition Summary: - * Restart rsc1 (Started node1) due to required bar running - * Restart rsc2 (Started node1) due to required bar running - * Recover rsc3 (Started node1) + * Restart rsc1 ( node1 ) due to required bar running + * Restart rsc2 ( node1 ) due to required bar running + * Recover rsc3 ( node1 ) Executing cluster transition: * Resource action: rsc1 stop on node1 * Pseudo action: foo_stop_0 * Resource action: rsc2 stop on node1 * Pseudo action: foo_stopped_0 * Pseudo action: bar_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: all_stopped * Pseudo action: bar_stopped_0 * Pseudo action: bar_start_0 * Resource action: rsc3 start on node1 * Pseudo action: bar_running_0 * Resource action: rsc1 start on node1 * Pseudo action: foo_start_0 * Resource action: rsc2 start on node1 * Pseudo action: foo_running_0 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 Resource Group: foo rsc2 (heartbeat:apache): Started node1 Resource Group: bar rsc3 (heartbeat:apache): Started node1 diff --git a/pengine/test10/reload-versioned.summary b/pengine/test10/reload-versioned.summary index 5b9181b17a..5d087f57c3 100644 --- a/pengine/test10/reload-versioned.summary +++ b/pengine/test10/reload-versioned.summary @@ -1,17 +1,17 @@ Current cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Reload A (Started node1) + * Reload A ( node1 ) Executing cluster transition: * Resource action: A reload on node1 Revised cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/remote-fence-before-reconnect.summary b/pengine/test10/remote-fence-before-reconnect.summary index 7a225517ee..3aee89d990 100644 --- a/pengine/test10/remote-fence-before-reconnect.summary +++ b/pengine/test10/remote-fence-before-reconnect.summary @@ -1,39 +1,39 @@ Current cluster status: RemoteNode c7auto4: UNCLEAN (offline) Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto2 c7auto4 (ocf::pacemaker:remote): FAILED c7auto1 fake1 (ocf::heartbeat:Dummy): Started c7auto3 fake2 (ocf::heartbeat:Dummy): Started c7auto4 (UNCLEAN) fake3 (ocf::heartbeat:Dummy): Started c7auto1 fake4 (ocf::heartbeat:Dummy): Started c7auto2 fake5 (ocf::heartbeat:Dummy): Started c7auto3 Transition Summary: * Fence (reboot) c7auto4 'remote connection is unrecoverable' * Stop c7auto4 (c7auto1) - * Move fake2 (Started c7auto4 -> c7auto1) + * Move fake2 ( c7auto4 -> c7auto1 ) Executing cluster transition: * Fencing c7auto4 (reboot) * Pseudo action: fake2_stop_0 * Pseudo action: stonith_complete * Resource action: c7auto4 stop on c7auto1 * Resource action: fake2 start on c7auto1 * Pseudo action: all_stopped * Resource action: fake2 monitor=10000 on c7auto1 Revised cluster status: RemoteNode c7auto4: UNCLEAN (offline) Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto2 c7auto4 (ocf::pacemaker:remote): FAILED fake1 (ocf::heartbeat:Dummy): Started c7auto3 fake2 (ocf::heartbeat:Dummy): Started c7auto1 fake3 (ocf::heartbeat:Dummy): Started c7auto1 fake4 (ocf::heartbeat:Dummy): Started c7auto2 fake5 (ocf::heartbeat:Dummy): Started c7auto3 diff --git a/pengine/test10/remote-fence-unclean.summary b/pengine/test10/remote-fence-unclean.summary index 9830b965b3..adfe730505 100644 --- a/pengine/test10/remote-fence-unclean.summary +++ b/pengine/test10/remote-fence-unclean.summary @@ -1,47 +1,47 @@ Current cluster status: RemoteNode remote1: UNCLEAN (offline) Online: [ 18builder 18node1 18node2 ] shooter (stonith:fence_xvm): Started 18builder remote1 (ocf::pacemaker:remote): FAILED 18node1 FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started remote1 (UNCLEAN) FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: * Fence (reboot) remote1 'remote connection is unrecoverable' - * Recover remote1 (Started 18node1) - * Move FAKE2 (Started remote1 -> 18builder) - * Move FAKE3 (Started 18builder -> 18node1) - * Move FAKE4 (Started 18node1 -> 18node2) + * Recover remote1 ( 18node1 ) + * Move FAKE2 ( remote1 -> 18builder ) + * Move FAKE3 ( 18builder -> 18node1 ) + * Move FAKE4 ( 18node1 -> 18node2 ) Executing cluster transition: * Resource action: FAKE3 stop on 18builder * Resource action: FAKE4 stop on 18node1 * Fencing remote1 (reboot) * Pseudo action: FAKE2_stop_0 * Pseudo action: stonith_complete * Resource action: remote1 stop on 18node1 * Resource action: FAKE2 start on 18builder * Resource action: FAKE3 start on 18node1 * Resource action: FAKE4 start on 18node2 * Pseudo action: all_stopped * Resource action: remote1 start on 18node1 * Resource action: remote1 monitor=60000 on 18node1 * Resource action: FAKE2 monitor=60000 on 18builder * Resource action: FAKE3 monitor=60000 on 18node1 * Resource action: FAKE4 monitor=60000 on 18node2 Revised cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18builder remote1 (ocf::pacemaker:remote): Started 18node1 FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started 18builder FAKE3 (ocf::heartbeat:Dummy): Started 18node1 FAKE4 (ocf::heartbeat:Dummy): Started 18node2 diff --git a/pengine/test10/remote-move.summary b/pengine/test10/remote-move.summary index de223fd0be..d876a3f062 100644 --- a/pengine/test10/remote-move.summary +++ b/pengine/test10/remote-move.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Started 18builder FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started remote1 FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: - * Move shooter (Started 18node1 -> 18builder) - * Migrate remote1 (Started 18builder -> 18node1) + * Move shooter ( 18node1 -> 18builder ) + * Migrate remote1 ( 18builder -> 18node1 ) Executing cluster transition: * Resource action: shooter stop on 18node1 * Resource action: remote1 migrate_to on 18builder * Resource action: shooter start on 18builder * Resource action: remote1 migrate_from on 18node1 * Resource action: remote1 stop on 18builder * Pseudo action: all_stopped * Resource action: shooter monitor=60000 on 18builder * Pseudo action: remote1_start_0 * Resource action: remote1 monitor=60000 on 18node1 Revised cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18builder remote1 (ocf::pacemaker:remote): Started 18node1 FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started remote1 FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/remote-orphaned.summary b/pengine/test10/remote-orphaned.summary index 2b9add7618..f2050070f0 100644 --- a/pengine/test10/remote-orphaned.summary +++ b/pengine/test10/remote-orphaned.summary @@ -1,68 +1,68 @@ Current cluster status: Online: [ 18node1 18node3 ] OFFLINE: [ 18node2 ] RemoteOnline: [ remote1 ] Fencing (stonith:fence_xvm): Started 18node3 FencingPass (stonith:fence_dummy): Started 18node1 FencingFail (stonith:fence_dummy): Started 18node3 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started remote1 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node3 remote1 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node3 ] Stopped: [ 18node2 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 remote1 (ocf::pacemaker:remote): ORPHANED Started 18node1 Transition Summary: - * Move rsc_18node2 (Started remote1 -> 18node1) + * Move rsc_18node2 ( remote1 -> 18node1 ) * Stop ping-1:2 (remote1) due to node availability * Stop remote1 (18node1) due to node availability Executing cluster transition: * Resource action: rsc_18node2 stop on remote1 * Pseudo action: Connectivity_stop_0 * Resource action: rsc_18node2 start on 18node1 * Resource action: ping-1 stop on remote1 * Pseudo action: Connectivity_stopped_0 * Resource action: remote1 stop on 18node1 * Resource action: remote1 delete on 18node3 * Resource action: remote1 delete on 18node1 * Pseudo action: all_stopped * Resource action: rsc_18node2 monitor=5000 on 18node1 Revised cluster status: Online: [ 18node1 18node3 ] OFFLINE: [ 18node2 ] RemoteOFFLINE: [ remote1 ] Fencing (stonith:fence_xvm): Started 18node3 FencingPass (stonith:fence_dummy): Started 18node1 FencingFail (stonith:fence_dummy): Started 18node3 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node3 ] Stopped: [ 18node2 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node3 ] Stopped: [ 18node2 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 diff --git a/pengine/test10/remote-partial-migrate.summary b/pengine/test10/remote-partial-migrate.summary index e8abf92a1d..7d28db47f8 100644 --- a/pengine/test10/remote-partial-migrate.summary +++ b/pengine/test10/remote-partial-migrate.summary @@ -1,189 +1,189 @@ Current cluster status: Online: [ pcmk1 pcmk2 pcmk3 ] RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 ] RemoteOFFLINE: [ pcmk_remote5 ] shooter (stonith:fence_docker_cts): Started pcmk2 pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote2 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote3 (ocf::pacemaker:remote): Started [ pcmk2 pcmk1 ] pcmk_remote4 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote5 (ocf::pacemaker:remote): Stopped FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE4 (ocf::heartbeat:Dummy): Stopped FAKE5 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE9 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE10 (ocf::heartbeat:Dummy): Stopped FAKE11 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE12 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE13 (ocf::heartbeat:Dummy): Stopped FAKE14 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE15 (ocf::heartbeat:Dummy): Stopped FAKE16 (ocf::heartbeat:Dummy): Started pcmk1 FAKE17 (ocf::heartbeat:Dummy): Started pcmk3 FAKE18 (ocf::heartbeat:Dummy): Started pcmk2 FAKE19 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE20 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE21 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE22 (ocf::heartbeat:Dummy): Stopped FAKE23 (ocf::heartbeat:Dummy): Started pcmk1 FAKE24 (ocf::heartbeat:Dummy): Started pcmk3 FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE26 (ocf::heartbeat:Dummy): Stopped FAKE27 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE28 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE29 (ocf::heartbeat:Dummy): Stopped FAKE30 (ocf::heartbeat:Dummy): Started pcmk1 FAKE31 (ocf::heartbeat:Dummy): Started pcmk3 FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE35 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE36 (ocf::heartbeat:Dummy): Stopped FAKE37 (ocf::heartbeat:Dummy): Started pcmk1 FAKE38 (ocf::heartbeat:Dummy): Started pcmk3 FAKE39 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE43 (ocf::heartbeat:Dummy): Stopped FAKE44 (ocf::heartbeat:Dummy): Started pcmk1 FAKE45 (ocf::heartbeat:Dummy): Started pcmk3 FAKE46 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE48 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE50 (ocf::heartbeat:Dummy): Stopped Transition Summary: - * Migrate pcmk_remote3 (Started pcmk1 -> pcmk2) + * Migrate pcmk_remote3 ( pcmk1 -> pcmk2 ) * Start FAKE4 (pcmk_remote3) - * Move FAKE9 (Started pcmk_remote3 -> pcmk1) + * Move FAKE9 ( pcmk_remote3 -> pcmk1 ) * Start FAKE10 (pcmk1) * Start FAKE13 (pcmk2) * Start FAKE15 (pcmk3) - * Move FAKE16 (Started pcmk1 -> pcmk_remote3) + * Move FAKE16 ( pcmk1 -> pcmk_remote3 ) * Start FAKE22 (pcmk1) - * Move FAKE23 (Started pcmk1 -> pcmk_remote1) + * Move FAKE23 ( pcmk1 -> pcmk_remote1 ) * Start FAKE26 (pcmk1) * Start FAKE29 (pcmk2) - * Move FAKE30 (Started pcmk1 -> pcmk_remote2) + * Move FAKE30 ( pcmk1 -> pcmk_remote2 ) * Start FAKE36 (pcmk1) - * Move FAKE37 (Started pcmk1 -> pcmk2) + * Move FAKE37 ( pcmk1 -> pcmk2 ) * Start FAKE43 (pcmk1) - * Move FAKE44 (Started pcmk1 -> pcmk2) + * Move FAKE44 ( pcmk1 -> pcmk2 ) * Start FAKE50 (pcmk1) Executing cluster transition: * Resource action: pcmk_remote3 migrate_from on pcmk2 * Resource action: pcmk_remote3 stop on pcmk1 * Resource action: FAKE10 start on pcmk1 * Resource action: FAKE13 start on pcmk2 * Resource action: FAKE15 start on pcmk3 * Resource action: FAKE16 stop on pcmk1 * Resource action: FAKE22 start on pcmk1 * Resource action: FAKE23 stop on pcmk1 * Resource action: FAKE26 start on pcmk1 * Resource action: FAKE29 start on pcmk2 * Resource action: FAKE30 stop on pcmk1 * Resource action: FAKE36 start on pcmk1 * Resource action: FAKE37 stop on pcmk1 * Resource action: FAKE43 start on pcmk1 * Resource action: FAKE44 stop on pcmk1 * Resource action: FAKE50 start on pcmk1 * Pseudo action: pcmk_remote3_start_0 * Resource action: FAKE4 start on pcmk_remote3 * Resource action: FAKE9 stop on pcmk_remote3 * Resource action: FAKE10 monitor=10000 on pcmk1 * Resource action: FAKE13 monitor=10000 on pcmk2 * Resource action: FAKE15 monitor=10000 on pcmk3 * Resource action: FAKE16 start on pcmk_remote3 * Resource action: FAKE22 monitor=10000 on pcmk1 * Resource action: FAKE23 start on pcmk_remote1 * Resource action: FAKE26 monitor=10000 on pcmk1 * Resource action: FAKE29 monitor=10000 on pcmk2 * Resource action: FAKE30 start on pcmk_remote2 * Resource action: FAKE36 monitor=10000 on pcmk1 * Resource action: FAKE37 start on pcmk2 * Resource action: FAKE43 monitor=10000 on pcmk1 * Resource action: FAKE44 start on pcmk2 * Resource action: FAKE50 monitor=10000 on pcmk1 * Pseudo action: all_stopped * Resource action: pcmk_remote3 monitor=60000 on pcmk2 * Resource action: FAKE4 monitor=10000 on pcmk_remote3 * Resource action: FAKE9 start on pcmk1 * Resource action: FAKE16 monitor=10000 on pcmk_remote3 * Resource action: FAKE23 monitor=10000 on pcmk_remote1 * Resource action: FAKE30 monitor=10000 on pcmk_remote2 * Resource action: FAKE37 monitor=10000 on pcmk2 * Resource action: FAKE44 monitor=10000 on pcmk2 * Resource action: FAKE9 monitor=10000 on pcmk1 Revised cluster status: Online: [ pcmk1 pcmk2 pcmk3 ] RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 ] RemoteOFFLINE: [ pcmk_remote5 ] shooter (stonith:fence_docker_cts): Started pcmk2 pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote2 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote3 (ocf::pacemaker:remote): Started pcmk2 pcmk_remote4 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote5 (ocf::pacemaker:remote): Stopped FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE5 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE9 (ocf::heartbeat:Dummy): Started pcmk1 FAKE10 (ocf::heartbeat:Dummy): Started pcmk1 FAKE11 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE12 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE13 (ocf::heartbeat:Dummy): Started pcmk2 FAKE14 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE15 (ocf::heartbeat:Dummy): Started pcmk3 FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE17 (ocf::heartbeat:Dummy): Started pcmk3 FAKE18 (ocf::heartbeat:Dummy): Started pcmk2 FAKE19 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE20 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE21 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE22 (ocf::heartbeat:Dummy): Started pcmk1 FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE24 (ocf::heartbeat:Dummy): Started pcmk3 FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE26 (ocf::heartbeat:Dummy): Started pcmk1 FAKE27 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE28 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE29 (ocf::heartbeat:Dummy): Started pcmk2 FAKE30 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE31 (ocf::heartbeat:Dummy): Started pcmk3 FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE35 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE36 (ocf::heartbeat:Dummy): Started pcmk1 FAKE37 (ocf::heartbeat:Dummy): Started pcmk2 FAKE38 (ocf::heartbeat:Dummy): Started pcmk3 FAKE39 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE43 (ocf::heartbeat:Dummy): Started pcmk1 FAKE44 (ocf::heartbeat:Dummy): Started pcmk2 FAKE45 (ocf::heartbeat:Dummy): Started pcmk3 FAKE46 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE48 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE50 (ocf::heartbeat:Dummy): Started pcmk1 diff --git a/pengine/test10/remote-partial-migrate2.summary b/pengine/test10/remote-partial-migrate2.summary index 17fee27dcf..2a242bdfe9 100644 --- a/pengine/test10/remote-partial-migrate2.summary +++ b/pengine/test10/remote-partial-migrate2.summary @@ -1,208 +1,208 @@ Current cluster status: Node pcmk4 (4): UNCLEAN (offline) Online: [ pcmk1 pcmk2 pcmk3 ] RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote5 ] RemoteOFFLINE: [ pcmk_remote4 ] shooter (stonith:fence_docker_cts): Started pcmk3 pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote2 (ocf::pacemaker:remote): Started [ pcmk1 pcmk3 ] pcmk_remote3 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote4 (ocf::pacemaker:remote): Stopped pcmk_remote5 (ocf::pacemaker:remote): Started pcmk1 FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE5 (ocf::heartbeat:Dummy): Started pcmk1 FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE9 (ocf::heartbeat:Dummy): Started pcmk2 FAKE10 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE11 (ocf::heartbeat:Dummy): Started pcmk1 FAKE12 (ocf::heartbeat:Dummy): Started pcmk1 FAKE13 (ocf::heartbeat:Dummy): Started pcmk3 FAKE14 (ocf::heartbeat:Dummy): Started pcmk2 FAKE15 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE17 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE18 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE19 (ocf::heartbeat:Dummy): Started pcmk3 FAKE20 (ocf::heartbeat:Dummy): Started pcmk2 FAKE21 (ocf::heartbeat:Dummy): Started pcmk1 FAKE22 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE24 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE26 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE27 (ocf::heartbeat:Dummy): Started pcmk3 FAKE28 (ocf::heartbeat:Dummy): Started pcmk3 FAKE29 (ocf::heartbeat:Dummy): Started pcmk2 FAKE30 (ocf::heartbeat:Dummy): Started pcmk1 FAKE31 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE35 (ocf::heartbeat:Dummy): Started pcmk1 FAKE36 (ocf::heartbeat:Dummy): Started pcmk3 FAKE37 (ocf::heartbeat:Dummy): Started pcmk2 FAKE38 (ocf::heartbeat:Dummy): Started pcmk2 FAKE39 (ocf::heartbeat:Dummy): Started pcmk1 FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE43 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE44 (ocf::heartbeat:Dummy): Started pcmk2 FAKE45 (ocf::heartbeat:Dummy): Started pcmk3 FAKE46 (ocf::heartbeat:Dummy): Started pcmk1 FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE48 (ocf::heartbeat:Dummy): Started pcmk1 FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE50 (ocf::heartbeat:Dummy): Started pcmk_remote5 Transition Summary: * Fence (reboot) pcmk4 'peer is no longer part of the cluster' - * Migrate pcmk_remote2 (Started pcmk3 -> pcmk1) + * Migrate pcmk_remote2 ( pcmk3 -> pcmk1 ) * Start pcmk_remote4 (pcmk2) - * Migrate pcmk_remote5 (Started pcmk1 -> pcmk2) - * Move FAKE5 (Started pcmk1 -> pcmk_remote4) - * Move FAKE9 (Started pcmk2 -> pcmk_remote4) - * Move FAKE12 (Started pcmk1 -> pcmk2) - * Move FAKE14 (Started pcmk2 -> pcmk_remote1) - * Move FAKE17 (Started pcmk_remote1 -> pcmk_remote4) - * Move FAKE25 (Started pcmk_remote1 -> pcmk_remote4) - * Move FAKE28 (Started pcmk3 -> pcmk1) - * Move FAKE30 (Started pcmk1 -> pcmk_remote1) - * Move FAKE33 (Started pcmk_remote1 -> pcmk_remote4) - * Move FAKE38 (Started pcmk2 -> pcmk_remote1) - * Move FAKE39 (Started pcmk1 -> pcmk_remote2) - * Move FAKE41 (Started pcmk_remote2 -> pcmk_remote4) - * Move FAKE47 (Started pcmk_remote1 -> pcmk_remote2) - * Move FAKE48 (Started pcmk1 -> pcmk_remote3) - * Move FAKE49 (Started pcmk_remote3 -> pcmk_remote4) + * Migrate pcmk_remote5 ( pcmk1 -> pcmk2 ) + * Move FAKE5 ( pcmk1 -> pcmk_remote4 ) + * Move FAKE9 ( pcmk2 -> pcmk_remote4 ) + * Move FAKE12 ( pcmk1 -> pcmk2 ) + * Move FAKE14 ( pcmk2 -> pcmk_remote1 ) + * Move FAKE17 ( pcmk_remote1 -> pcmk_remote4 ) + * Move FAKE25 ( pcmk_remote1 -> pcmk_remote4 ) + * Move FAKE28 ( pcmk3 -> pcmk1 ) + * Move FAKE30 ( pcmk1 -> pcmk_remote1 ) + * Move FAKE33 ( pcmk_remote1 -> pcmk_remote4 ) + * Move FAKE38 ( pcmk2 -> pcmk_remote1 ) + * Move FAKE39 ( pcmk1 -> pcmk_remote2 ) + * Move FAKE41 ( pcmk_remote2 -> pcmk_remote4 ) + * Move FAKE47 ( pcmk_remote1 -> pcmk_remote2 ) + * Move FAKE48 ( pcmk1 -> pcmk_remote3 ) + * Move FAKE49 ( pcmk_remote3 -> pcmk_remote4 ) Executing cluster transition: * Resource action: FAKE5 stop on pcmk1 * Resource action: FAKE9 stop on pcmk2 * Resource action: FAKE12 stop on pcmk1 * Resource action: FAKE14 stop on pcmk2 * Resource action: FAKE17 stop on pcmk_remote1 * Resource action: FAKE25 stop on pcmk_remote1 * Resource action: FAKE28 stop on pcmk3 * Resource action: FAKE30 stop on pcmk1 * Resource action: FAKE33 stop on pcmk_remote1 * Resource action: FAKE38 stop on pcmk2 * Resource action: FAKE39 stop on pcmk1 * Resource action: FAKE47 stop on pcmk_remote1 * Resource action: FAKE48 stop on pcmk1 * Resource action: FAKE49 stop on pcmk_remote3 * Fencing pcmk4 (reboot) * Pseudo action: stonith_complete * Resource action: pcmk_remote2 migrate_from on pcmk1 * Resource action: pcmk_remote2 stop on pcmk3 * Resource action: pcmk_remote4 start on pcmk2 * Resource action: pcmk_remote5 migrate_to on pcmk1 * Resource action: FAKE5 start on pcmk_remote4 * Resource action: FAKE9 start on pcmk_remote4 * Resource action: FAKE12 start on pcmk2 * Resource action: FAKE14 start on pcmk_remote1 * Resource action: FAKE17 start on pcmk_remote4 * Resource action: FAKE25 start on pcmk_remote4 * Resource action: FAKE28 start on pcmk1 * Resource action: FAKE30 start on pcmk_remote1 * Resource action: FAKE33 start on pcmk_remote4 * Resource action: FAKE38 start on pcmk_remote1 * Resource action: FAKE48 start on pcmk_remote3 * Resource action: FAKE49 start on pcmk_remote4 * Pseudo action: pcmk_remote2_start_0 * Resource action: pcmk_remote4 monitor=60000 on pcmk2 * Resource action: pcmk_remote5 migrate_from on pcmk2 * Resource action: pcmk_remote5 stop on pcmk1 * Resource action: FAKE5 monitor=10000 on pcmk_remote4 * Resource action: FAKE9 monitor=10000 on pcmk_remote4 * Resource action: FAKE12 monitor=10000 on pcmk2 * Resource action: FAKE14 monitor=10000 on pcmk_remote1 * Resource action: FAKE17 monitor=10000 on pcmk_remote4 * Resource action: FAKE25 monitor=10000 on pcmk_remote4 * Resource action: FAKE28 monitor=10000 on pcmk1 * Resource action: FAKE30 monitor=10000 on pcmk_remote1 * Resource action: FAKE33 monitor=10000 on pcmk_remote4 * Resource action: FAKE38 monitor=10000 on pcmk_remote1 * Resource action: FAKE39 start on pcmk_remote2 * Resource action: FAKE41 stop on pcmk_remote2 * Resource action: FAKE47 start on pcmk_remote2 * Resource action: FAKE48 monitor=10000 on pcmk_remote3 * Resource action: FAKE49 monitor=10000 on pcmk_remote4 * Pseudo action: all_stopped * Resource action: pcmk_remote2 monitor=60000 on pcmk1 * Pseudo action: pcmk_remote5_start_0 * Resource action: FAKE39 monitor=10000 on pcmk_remote2 * Resource action: FAKE41 start on pcmk_remote4 * Resource action: FAKE47 monitor=10000 on pcmk_remote2 * Resource action: pcmk_remote5 monitor=60000 on pcmk2 * Resource action: FAKE41 monitor=10000 on pcmk_remote4 Revised cluster status: Online: [ pcmk1 pcmk2 pcmk3 ] OFFLINE: [ pcmk4 ] RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 pcmk_remote5 ] shooter (stonith:fence_docker_cts): Started pcmk3 pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote2 (ocf::pacemaker:remote): Started pcmk1 pcmk_remote3 (ocf::pacemaker:remote): Started pcmk3 pcmk_remote4 (ocf::pacemaker:remote): Started pcmk2 pcmk_remote5 (ocf::pacemaker:remote): Started pcmk2 FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE5 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE9 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE10 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE11 (ocf::heartbeat:Dummy): Started pcmk1 FAKE12 (ocf::heartbeat:Dummy): Started pcmk2 FAKE13 (ocf::heartbeat:Dummy): Started pcmk3 FAKE14 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE15 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE17 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE18 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE19 (ocf::heartbeat:Dummy): Started pcmk3 FAKE20 (ocf::heartbeat:Dummy): Started pcmk2 FAKE21 (ocf::heartbeat:Dummy): Started pcmk1 FAKE22 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE24 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE26 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE27 (ocf::heartbeat:Dummy): Started pcmk3 FAKE28 (ocf::heartbeat:Dummy): Started pcmk1 FAKE29 (ocf::heartbeat:Dummy): Started pcmk2 FAKE30 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE31 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE35 (ocf::heartbeat:Dummy): Started pcmk1 FAKE36 (ocf::heartbeat:Dummy): Started pcmk3 FAKE37 (ocf::heartbeat:Dummy): Started pcmk2 FAKE38 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE39 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote5 FAKE43 (ocf::heartbeat:Dummy): Started pcmk_remote1 FAKE44 (ocf::heartbeat:Dummy): Started pcmk2 FAKE45 (ocf::heartbeat:Dummy): Started pcmk3 FAKE46 (ocf::heartbeat:Dummy): Started pcmk1 FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote2 FAKE48 (ocf::heartbeat:Dummy): Started pcmk_remote3 FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote4 FAKE50 (ocf::heartbeat:Dummy): Started pcmk_remote5 diff --git a/pengine/test10/remote-recover-all.summary b/pengine/test10/remote-recover-all.summary index a8dac38be0..de312128d8 100644 --- a/pengine/test10/remote-recover-all.summary +++ b/pengine/test10/remote-recover-all.summary @@ -1,154 +1,154 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) galera-2 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Stop messaging-1 (controller-1) due to node availability - * Move galera-0 (Started controller-1 -> controller-2) + * Move galera-0 ( controller-1 -> controller-2 ) * Stop galera-2 (controller-1) due to node availability * Stop rabbitmq:2 (messaging-1) due to node availability - * Demote galera:1 (Master -> Stopped galera-2) - * Stop redis:0 (controller-1) due to node availability - * Move ip-172.17.1.14 (Started controller-1 -> controller-2) - * Move ip-172.17.1.17 (Started controller-1 -> controller-2) - * Move ip-172.17.4.11 (Started controller-1 -> controller-2) + * Stop galera:1 ( Master galera-2 ) due to node availability + * Stop redis:0 ( Slave controller-1 ) due to node availability + * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) + * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) + * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 (controller-1) due to node availability - * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) - * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) - * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) + * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) + * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing cluster transition: * Pseudo action: galera-master_demote_0 * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Pseudo action: galera_demote_0 * Pseudo action: galera-master_demoted_0 * Pseudo action: galera-master_stop_0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Pseudo action: galera_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: galera-master_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: galera-0 monitor=20000 on controller-2 * Pseudo action: galera-2_stop_0 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: messaging-1_stop_0 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-connection.summary b/pengine/test10/remote-recover-connection.summary index 7b5b5fc378..8e91068529 100644 --- a/pengine/test10/remote-recover-connection.summary +++ b/pengine/test10/remote-recover-connection.summary @@ -1,140 +1,140 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Move messaging-1 (Started controller-1 -> controller-2) - * Move galera-0 (Started controller-1 -> controller-2) - * Move galera-2 (Started controller-1 -> controller-2) - * Stop redis:0 (controller-1) due to node availability - * Move ip-172.17.1.14 (Started controller-1 -> controller-2) - * Move ip-172.17.1.17 (Started controller-1 -> controller-2) - * Move ip-172.17.4.11 (Started controller-1 -> controller-2) + * Move messaging-1 ( controller-1 -> controller-2 ) + * Move galera-0 ( controller-1 -> controller-2 ) + * Move galera-2 ( controller-1 -> controller-2 ) + * Stop redis:0 ( Slave controller-1 ) due to node availability + * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) + * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) + * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 (controller-1) due to node availability - * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) - * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) - * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) + * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) + * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Pseudo action: stonith_complete * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-2 messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-2 Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-fail.summary b/pengine/test10/remote-recover-fail.summary index 302b716db4..d4e1aab2e3 100644 --- a/pengine/test10/remote-recover-fail.summary +++ b/pengine/test10/remote-recover-fail.summary @@ -1,54 +1,54 @@ Current cluster status: RemoteNode rhel7-auto4: UNCLEAN (offline) Online: [ rhel7-auto2 rhel7-auto3 ] OFFLINE: [ rhel7-auto1 ] shooter (stonith:fence_xvm): Started rhel7-auto3 rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto2 FAKE1 (ocf::heartbeat:Dummy): Stopped FAKE2 (ocf::heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) FAKE3 (ocf::heartbeat:Dummy): Started rhel7-auto2 FAKE4 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE5 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE6 (ocf::heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) Transition Summary: * Fence (reboot) rhel7-auto4 'FAKE2 is thought to be active there' - * Recover rhel7-auto4 (Started rhel7-auto2) + * Recover rhel7-auto4 ( rhel7-auto2 ) * Start FAKE1 (rhel7-auto2) - * Move FAKE2 (Started rhel7-auto4 -> rhel7-auto3) - * Move FAKE6 (Started rhel7-auto4 -> rhel7-auto2) + * Move FAKE2 ( rhel7-auto4 -> rhel7-auto3 ) + * Move FAKE6 ( rhel7-auto4 -> rhel7-auto2 ) Executing cluster transition: * Resource action: FAKE3 monitor=10000 on rhel7-auto2 * Resource action: FAKE4 monitor=10000 on rhel7-auto3 * Fencing rhel7-auto4 (reboot) * Pseudo action: FAKE2_stop_0 * Pseudo action: FAKE6_stop_0 * Pseudo action: stonith_complete * Resource action: rhel7-auto4 stop on rhel7-auto2 * Resource action: FAKE1 start on rhel7-auto2 * Resource action: FAKE2 start on rhel7-auto3 * Resource action: FAKE6 start on rhel7-auto2 * Pseudo action: all_stopped * Resource action: rhel7-auto4 start on rhel7-auto2 * Resource action: FAKE1 monitor=10000 on rhel7-auto2 * Resource action: FAKE2 monitor=10000 on rhel7-auto3 * Resource action: FAKE6 monitor=10000 on rhel7-auto2 * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto2 Revised cluster status: Online: [ rhel7-auto2 rhel7-auto3 ] OFFLINE: [ rhel7-auto1 ] RemoteOnline: [ rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto3 rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto2 FAKE1 (ocf::heartbeat:Dummy): Started rhel7-auto2 FAKE2 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE3 (ocf::heartbeat:Dummy): Started rhel7-auto2 FAKE4 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE5 (ocf::heartbeat:Dummy): Started rhel7-auto3 FAKE6 (ocf::heartbeat:Dummy): Started rhel7-auto2 diff --git a/pengine/test10/remote-recover-no-resources.summary b/pengine/test10/remote-recover-no-resources.summary index 4383e8123f..c05e3552bd 100644 --- a/pengine/test10/remote-recover-no-resources.summary +++ b/pengine/test10/remote-recover-no-resources.summary @@ -1,145 +1,145 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Stop messaging-1 (controller-1) due to node availability - * Move galera-0 (Started controller-1 -> controller-2) + * Move galera-0 ( controller-1 -> controller-2 ) * Stop galera-2 (controller-1) * Stop rabbitmq:2 (messaging-1) due to node availability - * Stop redis:0 (controller-1) due to node availability - * Move ip-172.17.1.14 (Started controller-1 -> controller-2) - * Move ip-172.17.1.17 (Started controller-1 -> controller-2) - * Move ip-172.17.4.11 (Started controller-1 -> controller-2) + * Stop redis:0 ( Slave controller-1 ) due to node availability + * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) + * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) + * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 (controller-1) due to node availability - * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) - * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) - * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) + * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) + * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Pseudo action: messaging-1_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-unknown.summary b/pengine/test10/remote-recover-unknown.summary index 3ffb5f4b3d..bfbe7d367a 100644 --- a/pengine/test10/remote-recover-unknown.summary +++ b/pengine/test10/remote-recover-unknown.summary @@ -1,147 +1,147 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) galera-2 'resources are in an unknown state and the connection is unrecoverable' * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Stop messaging-1 (controller-1) due to node availability - * Move galera-0 (Started controller-1 -> controller-2) + * Move galera-0 ( controller-1 -> controller-2 ) * Stop galera-2 (controller-1) due to node availability * Stop rabbitmq:2 (messaging-1) due to node availability - * Stop redis:0 (controller-1) due to node availability - * Move ip-172.17.1.14 (Started controller-1 -> controller-2) - * Move ip-172.17.1.17 (Started controller-1 -> controller-2) - * Move ip-172.17.4.11 (Started controller-1 -> controller-2) + * Stop redis:0 ( Slave controller-1 ) due to node availability + * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) + * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) + * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 (controller-1) due to node availability - * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) - * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) - * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) + * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) + * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Pseudo action: messaging-1_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recovery.summary b/pengine/test10/remote-recovery.summary index 7b5b5fc378..8e91068529 100644 --- a/pengine/test10/remote-recovery.summary +++ b/pengine/test10/remote-recovery.summary @@ -1,140 +1,140 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Move messaging-1 (Started controller-1 -> controller-2) - * Move galera-0 (Started controller-1 -> controller-2) - * Move galera-2 (Started controller-1 -> controller-2) - * Stop redis:0 (controller-1) due to node availability - * Move ip-172.17.1.14 (Started controller-1 -> controller-2) - * Move ip-172.17.1.17 (Started controller-1 -> controller-2) - * Move ip-172.17.4.11 (Started controller-1 -> controller-2) + * Move messaging-1 ( controller-1 -> controller-2 ) + * Move galera-0 ( controller-1 -> controller-2 ) + * Move galera-2 ( controller-1 -> controller-2 ) + * Stop redis:0 ( Slave controller-1 ) due to node availability + * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) + * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) + * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 (controller-1) due to node availability - * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) - * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) - * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) + * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) + * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Pseudo action: stonith_complete * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-2 messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-2 Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-start-fail.summary b/pengine/test10/remote-start-fail.summary index 4f64c7520e..aa08600e2a 100644 --- a/pengine/test10/remote-start-fail.summary +++ b/pengine/test10/remote-start-fail.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] RemoteOFFLINE: [ rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto2 Transition Summary: - * Recover rhel7-auto4 (Started rhel7-auto2 -> rhel7-auto3) + * Recover rhel7-auto4 ( rhel7-auto2 -> rhel7-auto3 ) Executing cluster transition: * Resource action: rhel7-auto4 stop on rhel7-auto2 * Pseudo action: all_stopped * Resource action: rhel7-auto4 start on rhel7-auto3 * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto3 Revised cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] RemoteOnline: [ rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto3 diff --git a/pengine/test10/remote-startup-probes.summary b/pengine/test10/remote-startup-probes.summary index d7a382ea1e..f47bf47352 100644 --- a/pengine/test10/remote-startup-probes.summary +++ b/pengine/test10/remote-startup-probes.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOFFLINE: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Stopped FAKE1 (ocf::heartbeat:Dummy): Started 18builder FAKE2 (ocf::heartbeat:Dummy): Started 18node2 FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: * Start remote1 (18builder) - * Move FAKE1 (Started 18builder -> 18node2) - * Move FAKE2 (Started 18node2 -> remote1) + * Move FAKE1 ( 18builder -> 18node2 ) + * Move FAKE2 ( 18node2 -> remote1 ) Executing cluster transition: * Resource action: remote1 start on 18builder * Resource action: FAKE1 monitor on remote1 * Resource action: FAKE2 monitor on remote1 * Resource action: FAKE3 monitor on remote1 * Resource action: FAKE4 monitor on remote1 * Resource action: remote1 monitor=60000 on 18builder * Resource action: FAKE1 stop on 18builder * Resource action: FAKE2 stop on 18node2 * Pseudo action: all_stopped * Resource action: FAKE1 start on 18node2 * Resource action: FAKE2 start on remote1 * Resource action: FAKE1 monitor=60000 on 18node2 * Resource action: FAKE2 monitor=60000 on remote1 Revised cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Started 18builder FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started remote1 FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/remote-startup.summary b/pengine/test10/remote-startup.summary index 56c9a957d8..4eb688731a 100644 --- a/pengine/test10/remote-startup.summary +++ b/pengine/test10/remote-startup.summary @@ -1,38 +1,38 @@ Current cluster status: RemoteNode remote1: UNCLEAN (offline) Online: [ 18builder 18node1 18node2 ] shooter (stonith:fence_xvm): Started 18builder fake (ocf::pacemaker:Dummy): Stopped remote1 (ocf::pacemaker:remote): Stopped Transition Summary: - * Move shooter (Started 18builder -> 18node1) + * Move shooter ( 18builder -> 18node1 ) * Start fake (18node2) * Start remote1 (18builder) Executing cluster transition: * Resource action: shooter stop on 18builder * Resource action: fake monitor on 18node2 * Resource action: fake monitor on 18node1 * Resource action: fake monitor on 18builder * Resource action: remote1 monitor on 18node2 * Resource action: remote1 monitor on 18node1 * Resource action: remote1 monitor on 18builder * Pseudo action: all_stopped * Resource action: shooter start on 18node1 * Resource action: remote1 start on 18builder * Resource action: shooter monitor=60000 on 18node1 * Resource action: fake monitor on remote1 * Resource action: remote1 monitor=60000 on 18builder * Resource action: fake start on 18node2 Revised cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 fake (ocf::pacemaker:Dummy): Started 18node2 remote1 (ocf::pacemaker:remote): Started 18builder diff --git a/pengine/test10/remote-unclean2.summary b/pengine/test10/remote-unclean2.summary index 1408c090ad..e1666b590b 100644 --- a/pengine/test10/remote-unclean2.summary +++ b/pengine/test10/remote-unclean2.summary @@ -1,27 +1,27 @@ Current cluster status: RemoteNode rhel7-auto4: UNCLEAN (offline) Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto2 rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto1 Transition Summary: * Fence (reboot) rhel7-auto4 'remote connection is unrecoverable' - * Recover rhel7-auto4 (Started rhel7-auto1) + * Recover rhel7-auto4 ( rhel7-auto1 ) Executing cluster transition: * Resource action: rhel7-auto4 stop on rhel7-auto1 * Fencing rhel7-auto4 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: rhel7-auto4 start on rhel7-auto1 * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto1 Revised cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] RemoteOnline: [ rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto2 rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto1 diff --git a/pengine/test10/restart-versioned.summary b/pengine/test10/restart-versioned.summary index 9ae5c55ca3..14c6f17ca8 100644 --- a/pengine/test10/restart-versioned.summary +++ b/pengine/test10/restart-versioned.summary @@ -1,19 +1,19 @@ Current cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Restart A (Started node1) + * Restart A ( node1 ) Executing cluster transition: * Resource action: A stop on node1 * Resource action: A start on node1 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] A (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/rsc-discovery-per-node.summary b/pengine/test10/rsc-discovery-per-node.summary index ea82c3e4f2..13dff85ff5 100644 --- a/pengine/test10/rsc-discovery-per-node.summary +++ b/pengine/test10/rsc-discovery-per-node.summary @@ -1,129 +1,129 @@ Current cluster status: Online: [ 18builder 18node1 18node2 18node3 18node4 ] RemoteOFFLINE: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Stopped FAKE1 (ocf::heartbeat:Dummy): Stopped FAKE2 (ocf::heartbeat:Dummy): Started 18node2 FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 FAKE5 (ocf::heartbeat:Dummy): Stopped Clone Set: FAKECLONE1-clone [FAKECLONE1] Stopped: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Stopped: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] Transition Summary: * Start remote1 (18builder) * Start FAKE1 (18node2) - * Move FAKE2 (Started 18node2 -> 18node3) - * Move FAKE3 (Started 18builder -> 18node4) - * Move FAKE4 (Started 18node1 -> remote1) + * Move FAKE2 ( 18node2 -> 18node3 ) + * Move FAKE3 ( 18builder -> 18node4 ) + * Move FAKE4 ( 18node1 -> remote1 ) * Start FAKE5 (18builder) * Start FAKECLONE1:0 (18node1) * Start FAKECLONE1:1 (18node2) * Start FAKECLONE1:2 (18node3) * Start FAKECLONE1:3 (18node4) * Start FAKECLONE1:4 (remote1) * Start FAKECLONE1:5 (18builder) * Start FAKECLONE2:0 (18node1) * Start FAKECLONE2:1 (18node2) * Start FAKECLONE2:2 (18node3) * Start FAKECLONE2:3 (18node4) * Start FAKECLONE2:4 (remote1) * Start FAKECLONE2:5 (18builder) Executing cluster transition: * Resource action: shooter monitor on 18node4 * Resource action: shooter monitor on 18node3 * Resource action: remote1 monitor on 18node4 * Resource action: remote1 monitor on 18node3 * Resource action: FAKE1 monitor on 18node4 * Resource action: FAKE1 monitor on 18node3 * Resource action: FAKE1 monitor on 18node2 * Resource action: FAKE1 monitor on 18node1 * Resource action: FAKE1 monitor on 18builder * Resource action: FAKE2 monitor on 18node4 * Resource action: FAKE2 monitor on 18node3 * Resource action: FAKE3 monitor on 18node4 * Resource action: FAKE3 monitor on 18node3 * Resource action: FAKE4 monitor on 18node4 * Resource action: FAKE4 monitor on 18node3 * Resource action: FAKE5 monitor on 18node4 * Resource action: FAKE5 monitor on 18node3 * Resource action: FAKE5 monitor on 18node2 * Resource action: FAKE5 monitor on 18node1 * Resource action: FAKE5 monitor on 18builder * Resource action: FAKECLONE1:0 monitor on 18node1 * Resource action: FAKECLONE1:1 monitor on 18node2 * Resource action: FAKECLONE1:2 monitor on 18node3 * Resource action: FAKECLONE1:3 monitor on 18node4 * Resource action: FAKECLONE1:5 monitor on 18builder * Pseudo action: FAKECLONE1-clone_start_0 * Resource action: FAKECLONE2:0 monitor on 18node1 * Resource action: FAKECLONE2:1 monitor on 18node2 * Resource action: FAKECLONE2:2 monitor on 18node3 * Resource action: FAKECLONE2:3 monitor on 18node4 * Resource action: FAKECLONE2:5 monitor on 18builder * Pseudo action: FAKECLONE2-clone_start_0 * Resource action: remote1 start on 18builder * Resource action: FAKE1 start on 18node2 * Resource action: FAKE2 stop on 18node2 * Resource action: FAKE3 stop on 18builder * Resource action: FAKE4 stop on 18node1 * Resource action: FAKE5 start on 18builder * Resource action: FAKECLONE1:0 start on 18node1 * Resource action: FAKECLONE1:1 start on 18node2 * Resource action: FAKECLONE1:2 start on 18node3 * Resource action: FAKECLONE1:3 start on 18node4 * Resource action: FAKECLONE1:4 start on remote1 * Resource action: FAKECLONE1:5 start on 18builder * Pseudo action: FAKECLONE1-clone_running_0 * Resource action: FAKECLONE2:0 start on 18node1 * Resource action: FAKECLONE2:1 start on 18node2 * Resource action: FAKECLONE2:2 start on 18node3 * Resource action: FAKECLONE2:3 start on 18node4 * Resource action: FAKECLONE2:4 start on remote1 * Resource action: FAKECLONE2:5 start on 18builder * Pseudo action: FAKECLONE2-clone_running_0 * Pseudo action: all_stopped * Resource action: remote1 monitor=60000 on 18builder * Resource action: FAKE1 monitor=60000 on 18node2 * Resource action: FAKE2 start on 18node3 * Resource action: FAKE3 start on 18node4 * Resource action: FAKE4 start on remote1 * Resource action: FAKE5 monitor=60000 on 18builder * Resource action: FAKECLONE1:0 monitor=60000 on 18node1 * Resource action: FAKECLONE1:1 monitor=60000 on 18node2 * Resource action: FAKECLONE1:2 monitor=60000 on 18node3 * Resource action: FAKECLONE1:3 monitor=60000 on 18node4 * Resource action: FAKECLONE1:4 monitor=60000 on remote1 * Resource action: FAKECLONE1:5 monitor=60000 on 18builder * Resource action: FAKECLONE2:0 monitor=60000 on 18node1 * Resource action: FAKECLONE2:1 monitor=60000 on 18node2 * Resource action: FAKECLONE2:2 monitor=60000 on 18node3 * Resource action: FAKECLONE2:3 monitor=60000 on 18node4 * Resource action: FAKECLONE2:4 monitor=60000 on remote1 * Resource action: FAKECLONE2:5 monitor=60000 on 18builder * Resource action: FAKE2 monitor=60000 on 18node3 * Resource action: FAKE3 monitor=60000 on 18node4 * Resource action: FAKE4 monitor=60000 on remote1 Revised cluster status: Online: [ 18builder 18node1 18node2 18node3 18node4 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Started 18builder FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started 18node3 FAKE3 (ocf::heartbeat:Dummy): Started 18node4 FAKE4 (ocf::heartbeat:Dummy): Started remote1 FAKE5 (ocf::heartbeat:Dummy): Started 18builder Clone Set: FAKECLONE1-clone [FAKECLONE1] Started: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] diff --git a/pengine/test10/rsc-sets-clone-1.summary b/pengine/test10/rsc-sets-clone-1.summary index afafc555ca..211ad9724a 100644 --- a/pengine/test10/rsc-sets-clone-1.summary +++ b/pengine/test10/rsc-sets-clone-1.summary @@ -1,84 +1,84 @@ 5 of 24 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ sys2 sys3 ] vm1 (ocf::heartbeat:Xen): Started sys2 vm2 (ocf::heartbeat:Xen): Stopped ( disabled ) vm3 (ocf::heartbeat:Xen): Stopped ( disabled ) vm4 (ocf::heartbeat:Xen): Stopped ( disabled ) stonithsys2 (stonith:external/ipmi): Stopped stonithsys3 (stonith:external/ipmi): Started sys2 Clone Set: baseclone [basegrp] Started: [ sys2 ] Stopped: [ sys3 ] Clone Set: fs1 [nfs1] Stopped (disabled): [ sys2 sys3 ] Transition Summary: - * Restart stonithsys3 (Started sys2) + * Restart stonithsys3 ( sys2 ) * Start controld:1 (sys3) * Start clvmd:1 (sys3) * Start o2cb:1 (sys3) * Start iscsi1:1 (sys3) * Start iscsi2:1 (sys3) * Start vg1:1 (sys3) * Start vg2:1 (sys3) * Start fs2:1 (sys3) * Start stonithsys2 (sys3) Executing cluster transition: * Resource action: vm1 monitor on sys3 * Resource action: vm2 monitor on sys3 * Resource action: vm3 monitor on sys3 * Resource action: vm4 monitor on sys3 * Resource action: stonithsys3 monitor on sys3 * Resource action: controld:1 monitor on sys3 * Resource action: clvmd:1 monitor on sys3 * Resource action: o2cb:1 monitor on sys3 * Resource action: iscsi1:1 monitor on sys3 * Resource action: iscsi2:1 monitor on sys3 * Resource action: vg1:1 monitor on sys3 * Resource action: vg2:1 monitor on sys3 * Resource action: fs2:1 monitor on sys3 * Pseudo action: baseclone_start_0 * Resource action: nfs1:0 monitor on sys3 * Resource action: stonithsys2 monitor on sys3 * Pseudo action: load_stopped_sys3 * Pseudo action: load_stopped_sys2 * Resource action: stonithsys3 stop on sys2 * Resource action: stonithsys3 start on sys2 * Resource action: stonithsys3 monitor=15000 on sys2 * Pseudo action: basegrp:1_start_0 * Resource action: controld:1 start on sys3 * Resource action: clvmd:1 start on sys3 * Resource action: o2cb:1 start on sys3 * Resource action: iscsi1:1 start on sys3 * Resource action: iscsi2:1 start on sys3 * Resource action: vg1:1 start on sys3 * Resource action: vg2:1 start on sys3 * Resource action: fs2:1 start on sys3 * Resource action: stonithsys2 start on sys3 * Pseudo action: all_stopped * Pseudo action: basegrp:1_running_0 * Resource action: controld:1 monitor=10000 on sys3 * Resource action: iscsi1:1 monitor=120000 on sys3 * Resource action: iscsi2:1 monitor=120000 on sys3 * Resource action: fs2:1 monitor=20000 on sys3 * Pseudo action: baseclone_running_0 * Resource action: stonithsys2 monitor=15000 on sys3 Revised cluster status: Online: [ sys2 sys3 ] vm1 (ocf::heartbeat:Xen): Started sys2 vm2 (ocf::heartbeat:Xen): Stopped ( disabled ) vm3 (ocf::heartbeat:Xen): Stopped ( disabled ) vm4 (ocf::heartbeat:Xen): Stopped ( disabled ) stonithsys2 (stonith:external/ipmi): Started sys3 stonithsys3 (stonith:external/ipmi): Started sys2 Clone Set: baseclone [basegrp] Started: [ sys2 sys3 ] Clone Set: fs1 [nfs1] Stopped (disabled): [ sys2 sys3 ] diff --git a/pengine/test10/rsc-sets-clone.summary b/pengine/test10/rsc-sets-clone.summary index 70570e6803..43f9e0f6c1 100644 --- a/pengine/test10/rsc-sets-clone.summary +++ b/pengine/test10/rsc-sets-clone.summary @@ -1,37 +1,37 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone-rsc [rsc] Started: [ node1 node2 ] Transition Summary: - * Move rsc1 (Started node1 -> node2) - * Move rsc3 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc3 ( node1 -> node2 ) * Stop rsc:0 (node1) due to node availability Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc3 stop on node1 * Pseudo action: clone-rsc_stop_0 * Resource action: rsc1 start on node2 * Resource action: rsc3 start on node2 * Resource action: rsc:0 stop on node1 * Pseudo action: clone-rsc_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node2 Clone Set: clone-rsc [rsc] Started: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/rsc-sets-master.summary b/pengine/test10/rsc-sets-master.summary index 5415cda4c2..126edc7cdc 100644 --- a/pengine/test10/rsc-sets-master.summary +++ b/pengine/test10/rsc-sets-master.summary @@ -1,48 +1,48 @@ Current cluster status: Node node1: standby Online: [ node2 ] Master/Slave Set: ms-rsc [rsc] Masters: [ node1 ] Slaves: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Demote rsc:0 (Master -> Stopped node1) + * Stop rsc:0 ( Master node1 ) due to node availability * Promote rsc:1 (Slave -> Master node2) - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) - * Move rsc3 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) + * Move rsc3 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc3 stop on node1 * Pseudo action: ms-rsc_demote_0 * Resource action: rsc:0 demote on node1 * Pseudo action: ms-rsc_demoted_0 * Pseudo action: ms-rsc_stop_0 * Resource action: rsc:0 stop on node1 * Pseudo action: ms-rsc_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms-rsc_promote_0 * Resource action: rsc:1 promote on node2 * Pseudo action: ms-rsc_promoted_0 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: rsc3 start on node2 Revised cluster status: Node node1: standby Online: [ node2 ] Master/Slave Set: ms-rsc [rsc] Masters: [ node2 ] Stopped: [ node1 ] rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/rsc-sets-seq-false.summary b/pengine/test10/rsc-sets-seq-false.summary index 2d6dccffdd..5774f69f14 100644 --- a/pengine/test10/rsc-sets-seq-false.summary +++ b/pengine/test10/rsc-sets-seq-false.summary @@ -1,46 +1,46 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 rsc4 (ocf::pacemaker:Dummy): Started node1 rsc5 (ocf::pacemaker:Dummy): Started node1 rsc6 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) - * Move rsc3 (Started node1 -> node2) - * Move rsc4 (Started node1 -> node2) - * Move rsc5 (Started node1 -> node2) - * Move rsc6 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) + * Move rsc3 ( node1 -> node2 ) + * Move rsc4 ( node1 -> node2 ) + * Move rsc5 ( node1 -> node2 ) + * Move rsc6 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc4 stop on node1 * Resource action: rsc5 stop on node1 * Resource action: rsc6 stop on node1 * Resource action: rsc3 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: rsc3 start on node2 * Resource action: rsc4 start on node2 * Resource action: rsc5 start on node2 * Resource action: rsc6 start on node2 Revised cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node2 rsc4 (ocf::pacemaker:Dummy): Started node2 rsc5 (ocf::pacemaker:Dummy): Started node2 rsc6 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/rsc-sets-seq-true.summary b/pengine/test10/rsc-sets-seq-true.summary index e4fca51a2e..aba86c6bf4 100644 --- a/pengine/test10/rsc-sets-seq-true.summary +++ b/pengine/test10/rsc-sets-seq-true.summary @@ -1,46 +1,46 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 rsc4 (ocf::pacemaker:Dummy): Started node1 rsc5 (ocf::pacemaker:Dummy): Started node1 rsc6 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move rsc1 (Started node1 -> node2) - * Move rsc2 (Started node1 -> node2) - * Move rsc3 (Started node1 -> node2) - * Move rsc4 (Started node1 -> node2) - * Move rsc5 (Started node1 -> node2) - * Move rsc6 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) + * Move rsc2 ( node1 -> node2 ) + * Move rsc3 ( node1 -> node2 ) + * Move rsc4 ( node1 -> node2 ) + * Move rsc5 ( node1 -> node2 ) + * Move rsc6 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc6 stop on node1 * Resource action: rsc5 stop on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc3 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: rsc3 start on node2 * Resource action: rsc4 start on node2 * Resource action: rsc5 start on node2 * Resource action: rsc6 start on node2 Revised cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node2 rsc4 (ocf::pacemaker:Dummy): Started node2 rsc5 (ocf::pacemaker:Dummy): Started node2 rsc6 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/rsc_dep4.summary b/pengine/test10/rsc_dep4.summary index 2fe21cf92c..b911b460ce 100644 --- a/pengine/test10/rsc_dep4.summary +++ b/pengine/test10/rsc_dep4.summary @@ -1,35 +1,35 @@ Current cluster status: Online: [ node1 node2 ] rsc2 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 rsc1 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped Transition Summary: * Start rsc2 (node1) - * Move rsc4 (Started node1 -> node2) + * Move rsc4 ( node1 -> node2 ) * Start rsc3 (node2) Executing cluster transition: * Resource action: rsc2 monitor on node2 * Resource action: rsc2 monitor on node1 * Resource action: rsc4 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc2 start on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc3 start on node2 * Pseudo action: all_stopped * Resource action: rsc4 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc2 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node2 rsc1 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node2 diff --git a/pengine/test10/standby.summary b/pengine/test10/standby.summary index 89534cbf7a..f4d53a18ac 100644 --- a/pengine/test10/standby.summary +++ b/pengine/test10/standby.summary @@ -1,86 +1,86 @@ Current cluster status: Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby Node sapcl03 (0bfb78a2-fcd2-4f52-8a06-2d17437a6750): standby Online: [ sapcl01 ] Resource Group: app01 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_2 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: app02 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl02 LVM_12 (ocf::heartbeat:LVM): Started sapcl02 Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl02 Resource Group: oracle IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Started sapcl03 LVM_22 (ocf::heartbeat:LVM): Started sapcl03 Filesystem_23 (ocf::heartbeat:Filesystem): Started sapcl03 oracle_24 (ocf::heartbeat:oracle): Started sapcl03 oralsnr_25 (ocf::heartbeat:oralsnr): Started sapcl03 Transition Summary: - * Move IPaddr_192_168_1_102 (Started sapcl02 -> sapcl01) - * Move LVM_12 (Started sapcl02 -> sapcl01) - * Move Filesystem_13 (Started sapcl02 -> sapcl01) - * Move IPaddr_192_168_1_104 (Started sapcl03 -> sapcl01) - * Move LVM_22 (Started sapcl03 -> sapcl01) - * Move Filesystem_23 (Started sapcl03 -> sapcl01) - * Move oracle_24 (Started sapcl03 -> sapcl01) - * Move oralsnr_25 (Started sapcl03 -> sapcl01) + * Move IPaddr_192_168_1_102 ( sapcl02 -> sapcl01 ) + * Move LVM_12 ( sapcl02 -> sapcl01 ) + * Move Filesystem_13 ( sapcl02 -> sapcl01 ) + * Move IPaddr_192_168_1_104 ( sapcl03 -> sapcl01 ) + * Move LVM_22 ( sapcl03 -> sapcl01 ) + * Move Filesystem_23 ( sapcl03 -> sapcl01 ) + * Move oracle_24 ( sapcl03 -> sapcl01 ) + * Move oralsnr_25 ( sapcl03 -> sapcl01 ) Executing cluster transition: * Pseudo action: app02_stop_0 * Resource action: Filesystem_13 stop on sapcl02 * Pseudo action: oracle_stop_0 * Resource action: oralsnr_25 stop on sapcl03 * Resource action: LVM_12 stop on sapcl02 * Resource action: oracle_24 stop on sapcl03 * Resource action: IPaddr_192_168_1_102 stop on sapcl02 * Resource action: Filesystem_23 stop on sapcl03 * Pseudo action: app02_stopped_0 * Pseudo action: app02_start_0 * Resource action: IPaddr_192_168_1_102 start on sapcl01 * Resource action: LVM_12 start on sapcl01 * Resource action: Filesystem_13 start on sapcl01 * Resource action: LVM_22 stop on sapcl03 * Pseudo action: app02_running_0 * Resource action: IPaddr_192_168_1_102 monitor=5000 on sapcl01 * Resource action: LVM_12 monitor=120000 on sapcl01 * Resource action: Filesystem_13 monitor=120000 on sapcl01 * Resource action: IPaddr_192_168_1_104 stop on sapcl03 * Pseudo action: all_stopped * Pseudo action: oracle_stopped_0 * Pseudo action: oracle_start_0 * Resource action: IPaddr_192_168_1_104 start on sapcl01 * Resource action: LVM_22 start on sapcl01 * Resource action: Filesystem_23 start on sapcl01 * Resource action: oracle_24 start on sapcl01 * Resource action: oralsnr_25 start on sapcl01 * Pseudo action: oracle_running_0 * Resource action: IPaddr_192_168_1_104 monitor=5000 on sapcl01 * Resource action: LVM_22 monitor=120000 on sapcl01 * Resource action: Filesystem_23 monitor=120000 on sapcl01 * Resource action: oracle_24 monitor=120000 on sapcl01 * Resource action: oralsnr_25 monitor=120000 on sapcl01 Revised cluster status: Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby Node sapcl03 (0bfb78a2-fcd2-4f52-8a06-2d17437a6750): standby Online: [ sapcl01 ] Resource Group: app01 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_2 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: app02 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_12 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl01 Resource Group: oracle IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Started sapcl01 LVM_22 (ocf::heartbeat:LVM): Started sapcl01 Filesystem_23 (ocf::heartbeat:Filesystem): Started sapcl01 oracle_24 (ocf::heartbeat:oracle): Started sapcl01 oralsnr_25 (ocf::heartbeat:oralsnr): Started sapcl01 diff --git a/pengine/test10/start-then-stop-with-unfence.summary b/pengine/test10/start-then-stop-with-unfence.summary index 5e127f71b8..2e02a21e74 100644 --- a/pengine/test10/start-then-stop-with-unfence.summary +++ b/pengine/test10/start-then-stop-with-unfence.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ rhel7-node1.example.com rhel7-node2.example.com ] mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com mpath-node1 (stonith:fence_mpath): Stopped ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com Clone Set: jrummy-clone [jrummy] Started: [ rhel7-node2.example.com ] Stopped: [ rhel7-node1.example.com ] Transition Summary: * Fence (on) rhel7-node1.example.com 'Required by ip1' * Start mpath-node1 (rhel7-node1.example.com) - * Move ip1 (Started rhel7-node2.example.com -> rhel7-node1.example.com) + * Move ip1 ( rhel7-node2.example.com -> rhel7-node1.example.com ) * Start jrummy:1 (rhel7-node1.example.com) Executing cluster transition: * Resource action: mpath-node2 monitor on rhel7-node1.example.com * Resource action: mpath-node1 monitor on rhel7-node1.example.com * Pseudo action: jrummy-clone_start_0 * Fencing rhel7-node1.example.com (on) * Resource action: mpath-node1 start on rhel7-node1.example.com * Resource action: jrummy start on rhel7-node1.example.com * Pseudo action: jrummy-clone_running_0 * Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com * Resource action: ip1 stop on rhel7-node2.example.com * Resource action: jrummy monitor=10000 on rhel7-node1.example.com * Pseudo action: all_stopped * Resource action: ip1 start on rhel7-node1.example.com * Resource action: ip1 monitor=10000 on rhel7-node1.example.com Revised cluster status: Online: [ rhel7-node1.example.com rhel7-node2.example.com ] mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com mpath-node1 (stonith:fence_mpath): Started rhel7-node1.example.com ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node1.example.com ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com Clone Set: jrummy-clone [jrummy] Started: [ rhel7-node1.example.com rhel7-node2.example.com ] diff --git a/pengine/test10/stonith-0.summary b/pengine/test10/stonith-0.summary index 1062520241..78f0bbc04c 100644 --- a/pengine/test10/stonith-0.summary +++ b/pengine/test10/stonith-0.summary @@ -1,111 +1,111 @@ Current cluster status: Node c001n03 (f5e1d2de-73da-432a-9d5c-37472253c2ee): UNCLEAN (online) Node c001n05 (52a5ea5e-86ee-442c-b251-0bc9825c517e): UNCLEAN (online) Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started [ c001n03 c001n05 ] heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): FAILED [ c001n03 c001n05 ] lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] Stopped: [ c001n03 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 Transition Summary: * Fence (reboot) c001n05 'ocf_192.168.100.183 failed there' * Fence (reboot) c001n03 'ocf_192.168.100.183 failed there' - * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) - * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) - * Recover ocf_192.168.100.183 (Started c001n03 -> c001n02) - * Move rsc_c001n05 (Started c001n05 -> c001n07) - * Move rsc_c001n07 (Started c001n03 -> c001n07) + * Move ocf_192.168.100.181 ( c001n03 -> c001n02 ) + * Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 ) + * Recover ocf_192.168.100.183 ( c001n03 -> c001n02 ) + * Move rsc_c001n05 ( c001n05 -> c001n07 ) + * Move rsc_c001n07 ( c001n03 -> c001n07 ) Executing cluster transition: * Resource action: child_DoFencing:4 monitor=20000 on c001n08 * Fencing c001n05 (reboot) * Fencing c001n03 (reboot) * Pseudo action: group-1_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: rsc_c001n05_stop_0 * Pseudo action: rsc_c001n07_stop_0 * Pseudo action: stonith_complete * Pseudo action: heartbeat_192.168.100.182_stop_0 * Resource action: rsc_c001n05 start on c001n07 * Resource action: rsc_c001n07 start on c001n07 * Pseudo action: ocf_192.168.100.181_stop_0 * Pseudo action: ocf_192.168.100.181_stop_0 * Resource action: rsc_c001n05 monitor=5000 on c001n07 * Resource action: rsc_c001n07 monitor=5000 on c001n07 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] OFFLINE: [ c001n03 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] Stopped: [ c001n03 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 diff --git a/pengine/test10/stonith-1.summary b/pengine/test10/stonith-1.summary index 03c8a393aa..0e9c2a14ed 100644 --- a/pengine/test10/stonith-1.summary +++ b/pengine/test10/stonith-1.summary @@ -1,113 +1,113 @@ Current cluster status: Node sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): UNCLEAN (offline) Online: [ sles-1 sles-2 sles-4 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-3 (UNCLEAN) rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 (UNCLEAN) rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 Clone Set: DoFencing [child_DoFencing] child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN) Started: [ sles-1 sles-2 ] Stopped: [ sles-4 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN ) ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN ) ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped Transition Summary: * Fence (reboot) sles-3 'peer is no longer part of the cluster' * Start r192.168.100.183 (sles-1) - * Move migrator (Started sles-3 -> sles-4) - * Move rsc_sles-3 (Started sles-3 -> sles-4) - * Move child_DoFencing:2 (Started sles-3 -> sles-4) + * Move migrator ( sles-3 -> sles-4 ) + * Move rsc_sles-3 ( sles-3 -> sles-4 ) + * Move child_DoFencing:2 ( sles-3 -> sles-4 ) * Start ocf_msdummy:0 (sles-4) * Start ocf_msdummy:1 (sles-1) - * Move ocf_msdummy:2 (Slave sles-3 -> sles-2) + * Move ocf_msdummy:2 ( sles-3 -> sles-2 Slave ) * Start ocf_msdummy:3 (sles-4) * Start ocf_msdummy:4 (sles-1) - * Move ocf_msdummy:5 (Slave sles-3 -> sles-2) + * Move ocf_msdummy:5 ( sles-3 -> sles-2 Slave ) Executing cluster transition: * Pseudo action: group-1_start_0 * Resource action: r192.168.100.182 monitor=5000 on sles-1 * Resource action: lsb_dummy monitor=5000 on sles-2 * Resource action: rsc_sles-2 monitor=5000 on sles-2 * Resource action: rsc_sles-4 monitor=5000 on sles-4 * Pseudo action: DoFencing_stop_0 * Fencing sles-3 (reboot) * Pseudo action: migrator_stop_0 * Pseudo action: rsc_sles-3_stop_0 * Pseudo action: child_DoFencing:2_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Resource action: r192.168.100.183 start on sles-1 * Resource action: migrator start on sles-4 * Resource action: rsc_sles-3 start on sles-4 * Resource action: child_DoFencing:2 start on sles-4 * Pseudo action: DoFencing_running_0 * Pseudo action: ocf_msdummy:2_stop_0 * Pseudo action: ocf_msdummy:5_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Pseudo action: group-1_running_0 * Resource action: r192.168.100.183 monitor=5000 on sles-1 * Resource action: migrator monitor=10000 on sles-4 * Resource action: rsc_sles-3 monitor=5000 on sles-4 * Resource action: child_DoFencing:2 monitor=60000 on sles-4 * Resource action: ocf_msdummy:0 start on sles-4 * Resource action: ocf_msdummy:1 start on sles-1 * Resource action: ocf_msdummy:2 start on sles-2 * Resource action: ocf_msdummy:3 start on sles-4 * Resource action: ocf_msdummy:4 start on sles-1 * Resource action: ocf_msdummy:5 start on sles-2 * Pseudo action: master_rsc_1_running_0 * Resource action: ocf_msdummy:0 monitor=5000 on sles-4 * Resource action: ocf_msdummy:1 monitor=5000 on sles-1 * Resource action: ocf_msdummy:2 monitor=5000 on sles-2 * Resource action: ocf_msdummy:3 monitor=5000 on sles-4 * Resource action: ocf_msdummy:4 monitor=5000 on sles-1 * Resource action: ocf_msdummy:5 monitor=5000 on sles-2 Revised cluster status: Online: [ sles-1 sles-2 sles-4 ] OFFLINE: [ sles-3 ] Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1 r192.168.100.182 (heartbeat:IPaddr): Started sles-1 r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1 lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 migrator (ocf::heartbeat:Dummy): Started sles-4 rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1 rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2 rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-4 rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4 Clone Set: DoFencing [child_DoFencing] Started: [ sles-1 sles-2 sles-4 ] Stopped: [ sles-3 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-4 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-1 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-2 ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/stonith-4.summary b/pengine/test10/stonith-4.summary index 76b1f4467e..7502dada0a 100644 --- a/pengine/test10/stonith-4.summary +++ b/pengine/test10/stonith-4.summary @@ -1,40 +1,40 @@ Current cluster status: Node pcmk-10 (110): UNCLEAN (online) Node pcmk-11 (111): pending Node pcmk-2 (102): pending Node pcmk-3 (103): pending Node pcmk-5 (105): UNCLEAN (offline) Node pcmk-7 (107): UNCLEAN (online) Node pcmk-8 (108): UNCLEAN (offline) Node pcmk-9 (109): pending Online: [ pcmk-1 ] OFFLINE: [ pcmk-4 pcmk-6 ] Fencing (stonith:fence_xvm): Stopped Transition Summary: * Fence (reboot) pcmk-10 'peer process is no longer available' * Fence (reboot) pcmk-8 'peer has not been seen by the cluster' * Fence (reboot) pcmk-7 'peer failed the pacemaker membership criteria' * Fence (reboot) pcmk-5 'peer has not been seen by the cluster' - * Start Fencing (pcmk-1 - blocked) + * Start Fencing ( pcmk-1 ) blocked Executing cluster transition: * Fencing pcmk-10 (reboot) * Fencing pcmk-5 (reboot) * Fencing pcmk-7 (reboot) * Fencing pcmk-8 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: Node pcmk-11 (111): pending Node pcmk-2 (102): pending Node pcmk-3 (103): pending Node pcmk-9 (109): pending Online: [ pcmk-1 ] OFFLINE: [ pcmk-10 pcmk-4 pcmk-5 pcmk-6 pcmk-7 pcmk-8 ] Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/stop-failure-no-quorum.summary b/pengine/test10/stop-failure-no-quorum.summary index 75945b17a7..52ec84f0ae 100644 --- a/pengine/test10/stop-failure-no-quorum.summary +++ b/pengine/test10/stop-failure-no-quorum.summary @@ -1,46 +1,46 @@ Current cluster status: Node pcmk-2 (102): UNCLEAN (online) Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-2 clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked ) Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped Transition Summary: * Fence (reboot) pcmk-2 'clvm:0 failed there' - * Start dlm:0 (pcmk-1 - blocked) due to no quorum + * Start dlm:0 ( pcmk-1 ) due to no quorum (blocked) * Stop clvm:0 (pcmk-2) due to node availability - * Start clvm:2 (pcmk-1 - blocked) due to no quorum - * Start ClusterIP (pcmk-1 - blocked) due to no quorum - * Start Fencing (pcmk-1 - blocked) due to no quorum + * Start clvm:2 ( pcmk-1 ) due to no quorum (blocked) + * Start ClusterIP ( pcmk-1 ) due to no quorum (blocked) + * Start Fencing ( pcmk-1 ) due to no quorum (blocked) Executing cluster transition: * Fencing pcmk-2 (reboot) * Pseudo action: clvm-clone_stop_0 * Pseudo action: stonith_complete * Pseudo action: clvm_stop_0 * Pseudo action: clvm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] OFFLINE: [ pcmk-2 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked ) Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/stop-failure-with-fencing.summary b/pengine/test10/stop-failure-with-fencing.summary index 09680c8040..50c5f97a00 100644 --- a/pengine/test10/stop-failure-with-fencing.summary +++ b/pengine/test10/stop-failure-with-fencing.summary @@ -1,45 +1,45 @@ Current cluster status: Node pcmk-2 (102): UNCLEAN (online) Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-2 Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped Transition Summary: * Fence (reboot) pcmk-2 'clvm:0 failed there' - * Start dlm:0 (pcmk-1 - blocked) due to no quorum + * Start dlm:0 ( pcmk-1 ) due to no quorum (blocked) * Stop clvm:0 (pcmk-2) due to node availability - * Start clvm:1 (pcmk-1 - blocked) due to no quorum - * Start ClusterIP (pcmk-1 - blocked) due to no quorum - * Start Fencing (pcmk-1 - blocked) due to no quorum + * Start clvm:1 ( pcmk-1 ) due to no quorum (blocked) + * Start ClusterIP ( pcmk-1 ) due to no quorum (blocked) + * Start Fencing ( pcmk-1 ) due to no quorum (blocked) Executing cluster transition: * Resource action: Fencing monitor on pcmk-1 * Fencing pcmk-2 (reboot) * Pseudo action: clvm-clone_stop_0 * Pseudo action: stonith_complete * Pseudo action: clvm_stop_0 * Pseudo action: clvm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] OFFLINE: [ pcmk-2 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/stopped-monitor-01.summary b/pengine/test10/stopped-monitor-01.summary index 43cc1779d2..7baf2b92fa 100644 --- a/pengine/test10/stopped-monitor-01.summary +++ b/pengine/test10/stopped-monitor-01.summary @@ -1,20 +1,20 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (ocf::pacemaker:Dummy): FAILED node1 Transition Summary: - * Recover rsc1 (Started node1) + * Recover rsc1 ( node1 ) Executing cluster transition: * Resource action: rsc1 stop on node1 * Pseudo action: all_stopped * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=10000 on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/stopped-monitor-02.summary b/pengine/test10/stopped-monitor-02.summary index 6d97bfba8e..e1d44b5a96 100644 --- a/pengine/test10/stopped-monitor-02.summary +++ b/pengine/test10/stopped-monitor-02.summary @@ -1,22 +1,22 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (ocf::pacemaker:Dummy): FAILED [ node1 node2 ] Transition Summary: - * Recover rsc1 (Started node1) + * Recover rsc1 ( node1 ) Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc1 stop on node2 * Resource action: rsc1 monitor=20000 on node2 * Pseudo action: all_stopped * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=10000 on node1 Revised cluster status: Online: [ node1 node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/stopped-monitor-08.summary b/pengine/test10/stopped-monitor-08.summary index 6646859fb3..a15a906e63 100644 --- a/pengine/test10/stopped-monitor-08.summary +++ b/pengine/test10/stopped-monitor-08.summary @@ -1,24 +1,24 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 Transition Summary: - * Move rsc1 (Started node1 -> node2) + * Move rsc1 ( node1 -> node2 ) Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc1 cancel=20000 on node2 * Pseudo action: all_stopped * Resource action: rsc1 monitor=20000 on node1 * Resource action: rsc1 start on node2 * Resource action: rsc1 monitor=10000 on node2 Revised cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/ticket-master-14.summary b/pengine/test10/ticket-master-14.summary index 9a42d7882a..fa14935670 100644 --- a/pengine/test10/ticket-master-14.summary +++ b/pengine/test10/ticket-master-14.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: - * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) due to node availability + * Stop rsc1:0 ( Master node1 ) due to node availability + * Stop rsc1:1 ( Slave node2 ) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-master-15.summary b/pengine/test10/ticket-master-15.summary index 9a42d7882a..fa14935670 100644 --- a/pengine/test10/ticket-master-15.summary +++ b/pengine/test10/ticket-master-15.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: - * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) due to node availability + * Stop rsc1:0 ( Master node1 ) due to node availability + * Stop rsc1:1 ( Slave node2 ) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-master-2.summary b/pengine/test10/ticket-master-2.summary index 96a797e340..6f5be53032 100644 --- a/pengine/test10/ticket-master-2.summary +++ b/pengine/test10/ticket-master-2.summary @@ -1,30 +1,29 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] Transition Summary: * Start rsc1:0 (node2) - * Start rsc1:1 (node1) * Promote rsc1:1 (Stopped -> Master node1) Executing cluster transition: * Pseudo action: ms1_start_0 * Resource action: rsc1:0 start on node2 * Resource action: rsc1:1 start on node1 * Pseudo action: ms1_running_0 * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node1 * Pseudo action: ms1_promoted_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] diff --git a/pengine/test10/ticket-master-21.summary b/pengine/test10/ticket-master-21.summary index 8fc50eb0a0..88f62fd64f 100644 --- a/pengine/test10/ticket-master-21.summary +++ b/pengine/test10/ticket-master-21.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Fence (reboot) node1 'deadman ticket was lost' - * Move rsc_stonith (Started node1 -> node2) - * Demote rsc1:0 (Master -> Stopped node1) + * Move rsc_stonith ( node1 -> node2 ) + * Stop rsc1:0 ( Master node1 ) due to node availability Executing cluster transition: * Pseudo action: rsc_stonith_stop_0 * Pseudo action: ms1_demote_0 * Fencing node1 (reboot) * Resource action: rsc_stonith start on node2 * Pseudo action: rsc1:1_demote_0 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1:1_stop_0 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc_stonith (stonith:null): Started node2 Master/Slave Set: ms1 [rsc1] Slaves: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/ticket-master-3.summary b/pengine/test10/ticket-master-3.summary index 9a42d7882a..fa14935670 100644 --- a/pengine/test10/ticket-master-3.summary +++ b/pengine/test10/ticket-master-3.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: - * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) due to node availability + * Stop rsc1:0 ( Master node1 ) due to node availability + * Stop rsc1:1 ( Slave node2 ) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-master-9.summary b/pengine/test10/ticket-master-9.summary index 8fc50eb0a0..88f62fd64f 100644 --- a/pengine/test10/ticket-master-9.summary +++ b/pengine/test10/ticket-master-9.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Fence (reboot) node1 'deadman ticket was lost' - * Move rsc_stonith (Started node1 -> node2) - * Demote rsc1:0 (Master -> Stopped node1) + * Move rsc_stonith ( node1 -> node2 ) + * Stop rsc1:0 ( Master node1 ) due to node availability Executing cluster transition: * Pseudo action: rsc_stonith_stop_0 * Pseudo action: ms1_demote_0 * Fencing node1 (reboot) * Resource action: rsc_stonith start on node2 * Pseudo action: rsc1:1_demote_0 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1:1_stop_0 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc_stonith (stonith:null): Started node2 Master/Slave Set: ms1 [rsc1] Slaves: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/unfence-definition.summary b/pengine/test10/unfence-definition.summary index f728e39f60..4ca9344e72 100644 --- a/pengine/test10/unfence-definition.summary +++ b/pengine/test10/unfence-definition.summary @@ -1,65 +1,65 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: * Fence (reboot) virt-4 'node is unclean' * Fence (on) virt-3 'Required by dlm:2' * Fence (on) virt-1 'Device definition changed' - * Restart fencing (Started virt-1) - * Restart dlm:0 (Started virt-1) due to required stonith + * Restart fencing ( virt-1 ) + * Restart dlm:0 ( virt-1 ) due to required stonith * Start dlm:2 (virt-3) - * Restart clvmd:0 (Started virt-1) due to required stonith + * Restart clvmd:0 ( virt-1 ) due to required stonith * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: fencing monitor on virt-3 * Resource action: fencing stop on virt-1 * Resource action: clvmd monitor on virt-2 * Pseudo action: clvmd-clone_stop_0 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Resource action: fencing delete on virt-1 * Resource action: dlm monitor on virt-3 * Resource action: clvmd stop on virt-1 * Resource action: clvmd monitor on virt-3 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-1 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: dlm-clone_start_0 * Fencing virt-1 (on) * Pseudo action: all_stopped * Resource action: fencing start on virt-1 * Resource action: dlm start on virt-1 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-1 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/unfence-parameters.summary b/pengine/test10/unfence-parameters.summary index e8b15ecb58..8a6356ca76 100644 --- a/pengine/test10/unfence-parameters.summary +++ b/pengine/test10/unfence-parameters.summary @@ -1,69 +1,69 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: * Fence (reboot) virt-4 'node is unclean' * Fence (on) virt-3 'Device parameters changed (reload)' * Fence (on) virt-2 'Device parameters changed (reload)' * Fence (on) virt-1 'Device parameters changed (reload)' - * Restart fencing (Started virt-1) - * Restart dlm:0 (Started virt-1) due to required stonith - * Restart dlm:1 (Started virt-2) due to required stonith + * Restart fencing ( virt-1 ) + * Restart dlm:0 ( virt-1 ) due to required stonith + * Restart dlm:1 ( virt-2 ) due to required stonith * Start dlm:2 (virt-3) - * Restart clvmd:0 (Started virt-1) due to required stonith + * Restart clvmd:0 ( virt-1 ) due to required stonith * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: fencing monitor on virt-3 * Pseudo action: clvmd-clone_stop_0 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Resource action: fencing stop on virt-1 * Resource action: dlm monitor on virt-3 * Resource action: clvmd stop on virt-1 * Resource action: clvmd monitor on virt-3 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-2 * Fencing virt-2 (on) * Resource action: dlm stop on virt-1 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: dlm-clone_start_0 * Resource action: clvmd monitor on virt-2 * Fencing virt-1 (on) * Pseudo action: all_stopped * Resource action: fencing start on virt-1 * Resource action: dlm start on virt-1 * Resource action: dlm start on virt-2 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-1 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/unmanaged-block-restart.summary b/pengine/test10/unmanaged-block-restart.summary index 01dffeb1d0..d7725c9090 100644 --- a/pengine/test10/unmanaged-block-restart.summary +++ b/pengine/test10/unmanaged-block-restart.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Started yingying.site rsc3 (ocf::pacemaker:Dummy): Started yingying.site rsc4 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) Transition Summary: * Start rsc1 (yingying.site) - * Stop rsc2 (Started yingying.site - blocked) due to required rsc1 start - * Stop rsc3 (Started yingying.site - blocked) due to required rsc2 start + * Stop rsc2 ( yingying.site ) due to required rsc1 start (blocked) + * Stop rsc3 ( yingying.site ) due to required rsc2 start (blocked) Executing cluster transition: * Pseudo action: group1_stop_0 * Pseudo action: group1_start_0 * Resource action: rsc1 start on yingying.site * Resource action: rsc1 monitor=10000 on yingying.site Revised cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site rsc2 (ocf::pacemaker:Dummy): Started yingying.site rsc3 (ocf::pacemaker:Dummy): Started yingying.site rsc4 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) diff --git a/pengine/test10/unmanaged-stop-1.summary b/pengine/test10/unmanaged-stop-1.summary index 94e0908467..b02c7dc118 100644 --- a/pengine/test10/unmanaged-stop-1.summary +++ b/pengine/test10/unmanaged-stop-1.summary @@ -1,19 +1,19 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) due to node availability + * Stop rsc1 ( yingying.site ) due to node availability (blocked) Executing cluster transition: Revised cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) diff --git a/pengine/test10/unmanaged-stop-2.summary b/pengine/test10/unmanaged-stop-2.summary index 94e0908467..b02c7dc118 100644 --- a/pengine/test10/unmanaged-stop-2.summary +++ b/pengine/test10/unmanaged-stop-2.summary @@ -1,19 +1,19 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) due to node availability + * Stop rsc1 ( yingying.site ) due to node availability (blocked) Executing cluster transition: Revised cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) diff --git a/pengine/test10/unmanaged-stop-3.summary b/pengine/test10/unmanaged-stop-3.summary index c72d4514d3..d4571572c4 100644 --- a/pengine/test10/unmanaged-stop-3.summary +++ b/pengine/test10/unmanaged-stop-3.summary @@ -1,22 +1,22 @@ 4 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) unrunnable rsc2 stop due to node availability + * Stop rsc1 ( yingying.site ) due to node availability (blocked) Executing cluster transition: * Pseudo action: group1_stop_0 Revised cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) diff --git a/pengine/test10/unmanaged-stop-4.summary b/pengine/test10/unmanaged-stop-4.summary index 2704e98c89..1ddd68b931 100644 --- a/pengine/test10/unmanaged-stop-4.summary +++ b/pengine/test10/unmanaged-stop-4.summary @@ -1,24 +1,24 @@ 6 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) rsc3 (ocf::heartbeat:Dummy): Stopped ( disabled ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) unrunnable rsc2 stop due to node availability + * Stop rsc1 ( yingying.site ) due to node availability (blocked) Executing cluster transition: * Pseudo action: group1_stop_0 Revised cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) rsc3 (ocf::heartbeat:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/unrunnable-1.summary b/pengine/test10/unrunnable-1.summary index 8c27e45f10..54ca0d7a6e 100644 --- a/pengine/test10/unrunnable-1.summary +++ b/pengine/test10/unrunnable-1.summary @@ -1,65 +1,65 @@ Current cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.182 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n03 child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: - * Start DcIPaddr (c001n03 - blocked) due to no quorum - * Start child_192.168.100.181 (c001n03 - blocked) due to no quorum - * Start child_192.168.100.182 (c001n03 - blocked) due to no quorum - * Start child_192.168.100.183 (c001n03 - blocked) due to no quorum - * Start rsc_c001n08 (c001n03 - blocked) due to no quorum - * Start rsc_c001n02 (c001n03 - blocked) due to no quorum - * Start rsc_c001n03 (c001n03 - blocked) due to no quorum - * Start rsc_c001n01 (c001n03 - blocked) due to no quorum - * Stop child_DoFencing:1 (c001n02 - blocked) due to node availability + * Start DcIPaddr ( c001n03 ) due to no quorum (blocked) + * Start child_192.168.100.181 ( c001n03 ) due to no quorum (blocked) + * Start child_192.168.100.182 ( c001n03 ) due to no quorum (blocked) + * Start child_192.168.100.183 ( c001n03 ) due to no quorum (blocked) + * Start rsc_c001n08 ( c001n03 ) due to no quorum (blocked) + * Start rsc_c001n02 ( c001n03 ) due to no quorum (blocked) + * Start rsc_c001n03 ( c001n03 ) due to no quorum (blocked) + * Start rsc_c001n01 ( c001n03 ) due to no quorum (blocked) + * Stop child_DoFencing:1 ( c001n02 ) due to node availability (blocked) Executing cluster transition: * Resource action: DcIPaddr monitor on c001n03 * Resource action: child_192.168.100.181 monitor on c001n03 * Resource action: child_192.168.100.182 monitor on c001n03 * Resource action: child_192.168.100.183 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n03 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Pseudo action: DoFencing_stop_0 * Pseudo action: DoFencing_stopped_0 Revised cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.182 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n03 child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/unrunnable-2.summary b/pengine/test10/unrunnable-2.summary index b67f7cb421..4bbacece54 100644 --- a/pengine/test10/unrunnable-2.summary +++ b/pengine/test10/unrunnable-2.summary @@ -1,175 +1,175 @@ 6 of 117 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] ip-192.0.2.12 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 Clone Set: haproxy-clone [haproxy] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: galera-master [galera] Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: memcached-clone [memcached] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: rabbitmq-clone [rabbitmq] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-core-clone [openstack-core] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: redis-master [redis] Masters: [ overcloud-controller-1 ] Slaves: [ overcloud-controller-0 overcloud-controller-2 ] ip-192.0.2.11 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 Clone Set: mongod-clone [mongod] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped Clone Set: openstack-heat-engine-clone [openstack-heat-engine] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-clone [openstack-heat-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-api-clone [openstack-glance-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-api-clone [openstack-nova-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-api-clone [openstack-sahara-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-registry-clone [openstack-glance-registry] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-api-clone [openstack-cinder-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: delay-clone [delay] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-server-clone [neutron-server] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: httpd-clone [httpd] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Transition Summary: - * Start openstack-cinder-volume (overcloud-controller-2 - blocked) due to unrunnable openstack-cinder-scheduler-clone running + * Start openstack-cinder-volume ( overcloud-controller-2 ) due to unrunnable openstack-cinder-scheduler-clone running (blocked) Executing cluster transition: Revised cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] ip-192.0.2.12 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 Clone Set: haproxy-clone [haproxy] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: galera-master [galera] Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: memcached-clone [memcached] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: rabbitmq-clone [rabbitmq] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-core-clone [openstack-core] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: redis-master [redis] Masters: [ overcloud-controller-1 ] Slaves: [ overcloud-controller-0 overcloud-controller-2 ] ip-192.0.2.11 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 Clone Set: mongod-clone [mongod] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped Clone Set: openstack-heat-engine-clone [openstack-heat-engine] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-clone [openstack-heat-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-api-clone [openstack-glance-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-api-clone [openstack-nova-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-api-clone [openstack-sahara-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-registry-clone [openstack-glance-registry] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-api-clone [openstack-cinder-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: delay-clone [delay] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-server-clone [neutron-server] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: httpd-clone [httpd] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] diff --git a/pengine/test10/utilization-order2.summary b/pengine/test10/utilization-order2.summary index db65a164f6..ad3fd38623 100644 --- a/pengine/test10/utilization-order2.summary +++ b/pengine/test10/utilization-order2.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] rsc4 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone-rsc2 [rsc2] Started: [ node1 node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 Transition Summary: * Start rsc4 (node1) - * Move rsc3 (Started node1 -> node2) + * Move rsc3 ( node1 -> node2 ) * Stop rsc2:0 (node1) due to node availability * Stop rsc1 (node2) Executing cluster transition: * Resource action: rsc3 stop on node1 * Pseudo action: clone-rsc2_stop_0 * Resource action: rsc1 stop on node2 * Pseudo action: load_stopped_node2 * Resource action: rsc3 start on node2 * Resource action: rsc2:1 stop on node1 * Pseudo action: clone-rsc2_stopped_0 * Pseudo action: load_stopped_node1 * Pseudo action: all_stopped * Resource action: rsc4 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc4 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node2 Clone Set: clone-rsc2 [rsc2] Started: [ node2 ] Stopped: [ node1 ] rsc1 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/utilization-order3.summary b/pengine/test10/utilization-order3.summary index cfeea48924..9d59f379f1 100644 --- a/pengine/test10/utilization-order3.summary +++ b/pengine/test10/utilization-order3.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Stopped rsc1 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Start rsc2 (node1) - * Migrate rsc1 (Started node1 -> node2) + * Migrate rsc1 ( node1 -> node2 ) Executing cluster transition: * Pseudo action: load_stopped_node2 * Resource action: rsc1 migrate_to on node1 * Resource action: rsc1 migrate_from on node2 * Resource action: rsc1 stop on node1 * Pseudo action: load_stopped_node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 * Pseudo action: rsc1_start_0 Revised cluster status: Online: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/utilization-order4.summary b/pengine/test10/utilization-order4.summary index 09a8c31fe8..04c5f93392 100644 --- a/pengine/test10/utilization-order4.summary +++ b/pengine/test10/utilization-order4.summary @@ -1,61 +1,61 @@ 2 of 13 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen002 degllx63-vm (ocf::heartbeat:Xen): Stopped ( disabled ) degllx61-vm (ocf::heartbeat:Xen): Started deglxen001 degllx64-vm (ocf::heartbeat:Xen): Stopped ( disabled ) stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 deglxen002 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 deglxen002 ] Transition Summary: - * Migrate degllx62-vm (Started deglxen002 -> deglxen001) + * Migrate degllx62-vm ( deglxen002 -> deglxen001 ) * Stop degllx61-vm (deglxen001) * Stop nfs-xen_config:1 (deglxen002) due to node availability * Stop nfs-xen_swapfiles:1 (deglxen002) due to node availability * Stop nfs-xen_images:1 (deglxen002) due to node availability * Stop prim-ping:1 (deglxen002) due to node availability Executing cluster transition: * Resource action: degllx61-vm stop on deglxen001 * Pseudo action: load_stopped_deglxen001 * Resource action: degllx62-vm migrate_to on deglxen002 * Resource action: degllx62-vm migrate_from on deglxen001 * Resource action: degllx62-vm stop on deglxen002 * Pseudo action: clone-nfs_stop_0 * Pseudo action: load_stopped_deglxen002 * Pseudo action: degllx62-vm_start_0 * Pseudo action: grp-nfs:1_stop_0 * Resource action: nfs-xen_images:1 stop on deglxen002 * Resource action: degllx62-vm monitor=30000 on deglxen001 * Resource action: nfs-xen_swapfiles:1 stop on deglxen002 * Resource action: nfs-xen_config:1 stop on deglxen002 * Pseudo action: grp-nfs:1_stopped_0 * Pseudo action: clone-nfs_stopped_0 * Pseudo action: clone-ping_stop_0 * Resource action: prim-ping:0 stop on deglxen002 * Pseudo action: clone-ping_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen001 degllx63-vm (ocf::heartbeat:Xen): Stopped ( disabled ) degllx61-vm (ocf::heartbeat:Xen): Stopped deglxen002 degllx64-vm (ocf::heartbeat:Xen): Stopped ( disabled ) stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 ] Stopped: [ deglxen002 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 ] Stopped: [ deglxen002 ] diff --git a/pengine/test10/versioned-operations-3.summary b/pengine/test10/versioned-operations-3.summary index 8638d6acd2..e3acedec2f 100644 --- a/pengine/test10/versioned-operations-3.summary +++ b/pengine/test10/versioned-operations-3.summary @@ -1,32 +1,31 @@ Current cluster status: Online: [ node1 node2 ] Master/Slave Set: A-master [A] Stopped: [ node1 node2 ] Transition Summary: * Start A:0 (node1) - * Start A:1 (node2) - * Promote A:1 (Stopped -> Master node2) + * Promote A:1 ( Stopped -> Master node2 ) Executing cluster transition: * Resource action: A:0 monitor on node1 * Resource action: A:1 monitor on node2 * Pseudo action: A-master_start_0 * Resource action: A:0 start on node1 * Resource action: A:1 start on node2 * Pseudo action: A-master_running_0 * Resource action: A:0 monitor=10000 on node1 * Pseudo action: A-master_promote_0 * Resource action: A:1 promote on node2 * Pseudo action: A-master_promoted_0 * Resource action: A:1 monitor=11000 on node2 Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: A-master [A] Masters: [ node2 ] Slaves: [ node1 ] diff --git a/pengine/test10/whitebox-fail1.summary b/pengine/test10/whitebox-fail1.summary index 1d065520e7..1271d4f9a4 100644 --- a/pengine/test10/whitebox-fail1.summary +++ b/pengine/test10/whitebox-fail1.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): FAILED lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' - * Recover container1 (Started 18node2) - * Recover M:4 (Started lxc1) - * Recover B (Started lxc1) - * Restart lxc1 (Started 18node2) due to required container1 start + * Recover container1 ( 18node2 ) + * Recover M:4 ( lxc1 ) + * Recover B ( lxc1 ) + * Restart lxc1 ( 18node2 ) due to required container1 start Executing cluster transition: * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: container1 start on 18node2 * Pseudo action: M-clone_stop_0 * Pseudo action: B_stop_0 * Resource action: lxc1 start on 18node2 * Resource action: lxc1 monitor=30000 on 18node2 * Pseudo action: M_stop_0 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: B start on lxc1 * Pseudo action: all_stopped * Resource action: M start on lxc1 * Pseudo action: M-clone_running_0 * Resource action: B monitor=10000 on lxc1 * Resource action: M monitor=10000 on lxc1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-fail2.summary b/pengine/test10/whitebox-fail2.summary index 06364fb5eb..5fd1ebd764 100644 --- a/pengine/test10/whitebox-fail2.summary +++ b/pengine/test10/whitebox-fail2.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): FAILED lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' - * Recover container1 (Started 18node2) - * Recover M:4 (Started lxc1) - * Recover B (Started lxc1) - * Recover lxc1 (Started 18node2) + * Recover container1 ( 18node2 ) + * Recover M:4 ( lxc1 ) + * Recover B ( lxc1 ) + * Recover lxc1 ( 18node2 ) Executing cluster transition: * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: container1 start on 18node2 * Pseudo action: M-clone_stop_0 * Pseudo action: B_stop_0 * Resource action: lxc1 start on 18node2 * Resource action: lxc1 monitor=30000 on 18node2 * Pseudo action: M_stop_0 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: B start on lxc1 * Pseudo action: all_stopped * Resource action: M start on lxc1 * Pseudo action: M-clone_running_0 * Resource action: B monitor=10000 on lxc1 * Resource action: M monitor=10000 on lxc1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-fail3.summary b/pengine/test10/whitebox-fail3.summary index 19b2e3d11c..eded0999e0 100644 --- a/pengine/test10/whitebox-fail3.summary +++ b/pengine/test10/whitebox-fail3.summary @@ -1,54 +1,54 @@ Current cluster status: Online: [ dvossel-laptop2 ] vm (ocf::heartbeat:VirtualDomain): Stopped vm2 (ocf::heartbeat:VirtualDomain): Stopped FAKE (ocf::pacemaker:Dummy): Started dvossel-laptop2 Master/Slave Set: W-master [W] Masters: [ dvossel-laptop2 ] Stopped: [ 18builder 18node1 ] Master/Slave Set: X-master [X] Masters: [ dvossel-laptop2 ] Stopped: [ 18builder 18node1 ] Transition Summary: * Start vm (dvossel-laptop2) - * Move FAKE (Started dvossel-laptop2 -> 18builder) + * Move FAKE ( dvossel-laptop2 -> 18builder ) * Start W:1 (18builder) * Start X:1 (18builder) * Start 18builder (dvossel-laptop2) Executing cluster transition: * Resource action: vm start on dvossel-laptop2 * Resource action: FAKE stop on dvossel-laptop2 * Pseudo action: W-master_start_0 * Pseudo action: X-master_start_0 * Resource action: 18builder monitor on dvossel-laptop2 * Pseudo action: all_stopped * Resource action: 18builder start on dvossel-laptop2 * Resource action: FAKE start on 18builder * Resource action: W start on 18builder * Pseudo action: W-master_running_0 * Resource action: X start on 18builder * Pseudo action: X-master_running_0 * Resource action: 18builder monitor=30000 on dvossel-laptop2 * Resource action: W monitor=10000 on 18builder * Resource action: X monitor=10000 on 18builder Revised cluster status: Online: [ dvossel-laptop2 ] Containers: [ 18builder:vm ] vm (ocf::heartbeat:VirtualDomain): Started dvossel-laptop2 vm2 (ocf::heartbeat:VirtualDomain): Stopped FAKE (ocf::pacemaker:Dummy): Started 18builder Master/Slave Set: W-master [W] Masters: [ dvossel-laptop2 ] Slaves: [ 18builder ] Stopped: [ 18node1 ] Master/Slave Set: X-master [X] Masters: [ dvossel-laptop2 ] Slaves: [ 18builder ] Stopped: [ 18node1 ] diff --git a/pengine/test10/whitebox-imply-stop-on-fence.summary b/pengine/test10/whitebox-imply-stop-on-fence.summary index bea75a20e5..a3e9ce85fa 100644 --- a/pengine/test10/whitebox-imply-stop-on-fence.summary +++ b/pengine/test10/whitebox-imply-stop-on-fence.summary @@ -1,96 +1,96 @@ Current cluster status: Node kiff-01 (1): UNCLEAN (offline) Online: [ kiff-02 ] Containers: [ lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-02:R-lxc-02_kiff-02 ] fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN) Clone Set: dlm-clone [dlm] dlm (ocf::pacemaker:controld): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: clvmd-clone [clvmd] clvmd (ocf::heartbeat:clvm): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: shared0-clone [shared0] shared0 (ocf::heartbeat:Filesystem): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN) R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN) R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 vm-fs (ocf::heartbeat:Filesystem): FAILED lxc-01_kiff-01 Transition Summary: * Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean' * Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean' * Fence (reboot) kiff-01 'peer is no longer part of the cluster' - * Move fence-kiff-02 (Started kiff-01 -> kiff-02) + * Move fence-kiff-02 ( kiff-01 -> kiff-02 ) * Stop dlm:0 (kiff-01) due to node availability * Stop clvmd:0 (kiff-01) due to node availability * Stop shared0:0 (kiff-01) due to node availability - * Recover R-lxc-01_kiff-01 (Started kiff-01 -> kiff-02) - * Move R-lxc-02_kiff-01 (Started kiff-01 -> kiff-02) - * Recover vm-fs (Started lxc-01_kiff-01) - * Move lxc-01_kiff-01 (Started kiff-01 -> kiff-02) - * Move lxc-02_kiff-01 (Started kiff-01 -> kiff-02) + * Recover R-lxc-01_kiff-01 ( kiff-01 -> kiff-02 ) + * Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 ) + * Recover vm-fs ( lxc-01_kiff-01 ) + * Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 ) + * Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 ) Executing cluster transition: * Pseudo action: fence-kiff-02_stop_0 * Fencing kiff-01 (reboot) * Pseudo action: lxc-01_kiff-01_stop_0 * Pseudo action: lxc-02_kiff-01_stop_0 * Pseudo action: R-lxc-01_kiff-01_stop_0 * Pseudo action: R-lxc-02_kiff-01_stop_0 * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01 * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01 * Pseudo action: stonith_complete * Pseudo action: shared0-clone_stop_0 * Resource action: R-lxc-01_kiff-01 start on kiff-02 * Resource action: R-lxc-02_kiff-01 start on kiff-02 * Pseudo action: vm-fs_stop_0 * Resource action: lxc-01_kiff-01 start on kiff-02 * Resource action: lxc-02_kiff-01 start on kiff-02 * Pseudo action: shared0_stop_0 * Pseudo action: shared0-clone_stopped_0 * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02 * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02 * Resource action: vm-fs start on lxc-01_kiff-01 * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 * Pseudo action: clvmd-clone_stop_0 * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 * Pseudo action: clvmd_stop_0 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Pseudo action: dlm_stop_0 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: all_stopped * Resource action: fence-kiff-02 start on kiff-02 * Resource action: fence-kiff-02 monitor=60000 on kiff-02 Revised cluster status: Online: [ kiff-02 ] OFFLINE: [ kiff-01 ] Containers: [ lxc-01_kiff-01:R-lxc-01_kiff-01 lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-01:R-lxc-02_kiff-01 lxc-02_kiff-02:R-lxc-02_kiff-02 ] fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02 Clone Set: dlm-clone [dlm] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: clvmd-clone [clvmd] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: shared0-clone [shared0] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 vm-fs (ocf::heartbeat:Filesystem): Started lxc-01_kiff-01 diff --git a/pengine/test10/whitebox-migrate1.summary b/pengine/test10/whitebox-migrate1.summary index a6e4c00d75..0fc0f9a2bb 100644 --- a/pengine/test10/whitebox-migrate1.summary +++ b/pengine/test10/whitebox-migrate1.summary @@ -1,55 +1,55 @@ Current cluster status: Online: [ rhel7-node2 rhel7-node3 ] Containers: [ rhel7-node1:remote-rsc ] shooter1 (stonith:fence_xvm): Started rhel7-node3 FAKE1 (ocf::heartbeat:Dummy): Started rhel7-node1 FAKE2 (ocf::heartbeat:Dummy): Started rhel7-node1 FAKE3 (ocf::heartbeat:Dummy): Started rhel7-node3 FAKE4 (ocf::heartbeat:Dummy): Started rhel7-node3 FAKE5 (ocf::heartbeat:Dummy): Started rhel7-node2 FAKE6 (ocf::heartbeat:Dummy): Started rhel7-node1 FAKE7 (ocf::heartbeat:Dummy): Started rhel7-node3 remote-rsc (ocf::heartbeat:Dummy): Started rhel7-node2 Transition Summary: - * Move shooter1 (Started rhel7-node3 -> rhel7-node2) - * Move FAKE3 (Started rhel7-node3 -> rhel7-node2) - * Migrate remote-rsc (Started rhel7-node2 -> rhel7-node3) - * Migrate rhel7-node1 (Started rhel7-node2 -> rhel7-node3) + * Move shooter1 ( rhel7-node3 -> rhel7-node2 ) + * Move FAKE3 ( rhel7-node3 -> rhel7-node2 ) + * Migrate remote-rsc ( rhel7-node2 -> rhel7-node3 ) + * Migrate rhel7-node1 ( rhel7-node2 -> rhel7-node3 ) Executing cluster transition: * Resource action: shooter1 stop on rhel7-node3 * Resource action: FAKE3 stop on rhel7-node3 * Resource action: remote-rsc migrate_to on rhel7-node2 * Resource action: rhel7-node1 monitor on rhel7-node3 * Resource action: shooter1 start on rhel7-node2 * Resource action: FAKE3 start on rhel7-node2 * Resource action: remote-rsc migrate_from on rhel7-node3 * Resource action: rhel7-node1 migrate_to on rhel7-node2 * Resource action: shooter1 monitor=60000 on rhel7-node2 * Resource action: FAKE3 monitor=10000 on rhel7-node2 * Resource action: rhel7-node1 migrate_from on rhel7-node3 * Resource action: rhel7-node1 stop on rhel7-node2 * Resource action: remote-rsc stop on rhel7-node2 * Pseudo action: all_stopped * Pseudo action: remote-rsc_start_0 * Pseudo action: rhel7-node1_start_0 * Resource action: remote-rsc monitor=10000 on rhel7-node3 * Resource action: rhel7-node1 monitor=30000 on rhel7-node3 Revised cluster status: Online: [ rhel7-node2 rhel7-node3 ] Containers: [ rhel7-node1:remote-rsc ] shooter1 (stonith:fence_xvm): Started rhel7-node2 FAKE1 (ocf::heartbeat:Dummy): Started rhel7-node1 FAKE2 (ocf::heartbeat:Dummy): Started rhel7-node1 FAKE3 (ocf::heartbeat:Dummy): Started rhel7-node2 FAKE4 (ocf::heartbeat:Dummy): Started rhel7-node3 FAKE5 (ocf::heartbeat:Dummy): Started rhel7-node2 FAKE6 (ocf::heartbeat:Dummy): Started rhel7-node1 FAKE7 (ocf::heartbeat:Dummy): Started rhel7-node3 remote-rsc (ocf::heartbeat:Dummy): Started rhel7-node3 diff --git a/pengine/test10/whitebox-move.summary b/pengine/test10/whitebox-move.summary index dbf0780532..5e27a67772 100644 --- a/pengine/test10/whitebox-move.summary +++ b/pengine/test10/whitebox-move.summary @@ -1,47 +1,47 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node1 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started lxc1 Transition Summary: - * Move container1 (Started 18node1 -> 18node2) - * Restart M:3 (Started lxc1) due to required container1 start - * Restart A (Started lxc1) due to required container1 start - * Move lxc1 (Started 18node1 -> 18node2) + * Move container1 ( 18node1 -> 18node2 ) + * Restart M:3 ( lxc1 ) due to required container1 start + * Restart A ( lxc1 ) due to required container1 start + * Move lxc1 ( 18node1 -> 18node2 ) Executing cluster transition: * Pseudo action: M-clone_stop_0 * Resource action: A stop on lxc1 * Resource action: M stop on lxc1 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: lxc1 stop on 18node1 * Resource action: container1 stop on 18node1 * Pseudo action: all_stopped * Resource action: container1 start on 18node2 * Resource action: lxc1 start on 18node2 * Resource action: M start on lxc1 * Resource action: M monitor=10000 on lxc1 * Pseudo action: M-clone_running_0 * Resource action: A start on lxc1 * Resource action: A monitor=10000 on lxc1 * Resource action: lxc1 monitor=30000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started lxc1 diff --git a/pengine/test10/whitebox-ms-ordering-move.summary b/pengine/test10/whitebox-ms-ordering-move.summary index b48d988759..80156b0fda 100644 --- a/pengine/test10/whitebox-ms-ordering-move.summary +++ b/pengine/test10/whitebox-ms-ordering-move.summary @@ -1,94 +1,94 @@ Current cluster status: Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started rhel7-3 FencingPass (stonith:fence_dummy): Started rhel7-4 FencingFail (stonith:fence_dummy): Started rhel7-5 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 rsc_rhel7-2 (ocf::heartbeat:IPaddr2): Started rhel7-2 rsc_rhel7-3 (ocf::heartbeat:IPaddr2): Started rhel7-3 rsc_rhel7-4 (ocf::heartbeat:IPaddr2): Started rhel7-4 rsc_rhel7-5 (ocf::heartbeat:IPaddr2): Started rhel7-5 migrator (ocf::pacemaker:Dummy): Started rhel7-4 Clone Set: Connectivity [ping-1] Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Stopped: [ lxc1 lxc2 ] Master/Slave Set: master-1 [stateful-1] Masters: [ rhel7-3 ] Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] Resource Group: group-1 r192.168.122.207 (ocf::heartbeat:IPaddr2): Started rhel7-3 petulant (service:DummySD): Started rhel7-3 r192.168.122.208 (ocf::heartbeat:IPaddr2): Started rhel7-3 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 container1 (ocf::heartbeat:VirtualDomain): Started rhel7-1 container2 (ocf::heartbeat:VirtualDomain): Started rhel7-1 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] Transition Summary: - * Move container1 (Started rhel7-1 -> rhel7-2) + * Move container1 ( rhel7-1 -> rhel7-2 ) * Restart lxc-ms:0 (Master lxc1) due to required container1 start - * Move lxc1 (Started rhel7-1 -> rhel7-2) + * Move lxc1 ( rhel7-1 -> rhel7-2 ) Executing cluster transition: * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on rhel7-5 * Resource action: lxc1 monitor on rhel7-4 * Resource action: lxc1 monitor on rhel7-3 * Resource action: lxc1 monitor on rhel7-2 * Resource action: lxc2 monitor on rhel7-5 * Resource action: lxc2 monitor on rhel7-4 * Resource action: lxc2 monitor on rhel7-3 * Resource action: lxc2 monitor on rhel7-2 * Resource action: lxc-ms demote on lxc1 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc-ms stop on lxc1 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 stop on rhel7-1 * Resource action: container1 stop on rhel7-1 * Pseudo action: all_stopped * Resource action: container1 start on rhel7-2 * Resource action: lxc1 start on rhel7-2 * Resource action: lxc-ms start on lxc1 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc1 monitor=30000 on rhel7-2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised cluster status: Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started rhel7-3 FencingPass (stonith:fence_dummy): Started rhel7-4 FencingFail (stonith:fence_dummy): Started rhel7-5 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 rsc_rhel7-2 (ocf::heartbeat:IPaddr2): Started rhel7-2 rsc_rhel7-3 (ocf::heartbeat:IPaddr2): Started rhel7-3 rsc_rhel7-4 (ocf::heartbeat:IPaddr2): Started rhel7-4 rsc_rhel7-5 (ocf::heartbeat:IPaddr2): Started rhel7-5 migrator (ocf::pacemaker:Dummy): Started rhel7-4 Clone Set: Connectivity [ping-1] Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Stopped: [ lxc1 lxc2 ] Master/Slave Set: master-1 [stateful-1] Masters: [ rhel7-3 ] Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] Resource Group: group-1 r192.168.122.207 (ocf::heartbeat:IPaddr2): Started rhel7-3 petulant (service:DummySD): Started rhel7-3 r192.168.122.208 (ocf::heartbeat:IPaddr2): Started rhel7-3 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 container1 (ocf::heartbeat:VirtualDomain): Started rhel7-2 container2 (ocf::heartbeat:VirtualDomain): Started rhel7-1 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] diff --git a/pengine/test10/whitebox-orphan-ms.summary b/pengine/test10/whitebox-orphan-ms.summary index 3efa6bd245..2b0234b261 100644 --- a/pengine/test10/whitebox-orphan-ms.summary +++ b/pengine/test10/whitebox-orphan-ms.summary @@ -1,85 +1,86 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started 18node2 FencingPass (stonith:fence_dummy): Started 18node3 FencingFail (stonith:fence_dummy): Started 18node3 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started 18node2 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node2 18node3 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node2 18node3 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 container2 (ocf::heartbeat:VirtualDomain): ORPHANED Started 18node1 lxc1 (ocf::pacemaker:remote): ORPHANED Started 18node1 lxc-ms (ocf::pacemaker:Stateful): ORPHANED Master [ lxc1 lxc2 ] lxc2 (ocf::pacemaker:remote): ORPHANED Started 18node1 container1 (ocf::heartbeat:VirtualDomain): ORPHANED Started 18node1 Transition Summary: - * Move FencingFail (Started 18node3 -> 18node1) + * Move FencingFail ( 18node3 -> 18node1 ) * Stop container2 (18node1) due to node availability * Stop lxc1 (18node1) due to node availability - * Demote lxc-ms (Master -> Stopped lxc1) + * Stop lxc-ms ( Master lxc1 ) + * Stop lxc-ms ( Master lxc2 ) * Stop lxc2 (18node1) due to node availability * Stop container1 (18node1) due to node availability Executing cluster transition: * Resource action: FencingFail stop on 18node3 * Resource action: lxc-ms demote on lxc2 * Resource action: lxc-ms demote on lxc1 * Resource action: FencingFail start on 18node1 * Resource action: lxc-ms stop on lxc2 * Resource action: lxc-ms stop on lxc1 * Resource action: lxc-ms delete on 18node3 * Resource action: lxc-ms delete on 18node2 * Resource action: lxc-ms delete on 18node1 * Resource action: lxc2 stop on 18node1 * Resource action: lxc2 delete on 18node3 * Resource action: lxc2 delete on 18node2 * Resource action: lxc2 delete on 18node1 * Resource action: container2 stop on 18node1 * Resource action: container2 delete on 18node3 * Resource action: container2 delete on 18node2 * Resource action: container2 delete on 18node1 * Resource action: lxc1 stop on 18node1 * Resource action: lxc1 delete on 18node3 * Resource action: lxc1 delete on 18node2 * Resource action: lxc1 delete on 18node1 * Resource action: container1 stop on 18node1 * Resource action: container1 delete on 18node3 * Resource action: container1 delete on 18node2 * Resource action: container1 delete on 18node1 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] Fencing (stonith:fence_xvm): Started 18node2 FencingPass (stonith:fence_dummy): Started 18node3 FencingFail (stonith:fence_dummy): Started 18node1 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started 18node2 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node2 18node3 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node2 18node3 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 diff --git a/pengine/test10/whitebox-orphaned.summary b/pengine/test10/whitebox-orphaned.summary index 52b54aa5a3..7d173b2375 100644 --- a/pengine/test10/whitebox-orphaned.summary +++ b/pengine/test10/whitebox-orphaned.summary @@ -1,55 +1,55 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] M (ocf::pacemaker:Dummy): ORPHANED Started lxc1 Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 container1 (ocf::heartbeat:VirtualDomain): ORPHANED Started 18node2 lxc1 (ocf::pacemaker:remote): ORPHANED Started 18node2 Transition Summary: * Stop M:4 (lxc1) due to node availability - * Move B (Started lxc1 -> lxc2) + * Move B ( lxc1 -> lxc2 ) * Stop container1 (18node2) due to node availability * Stop lxc1 (18node2) due to node availability Executing cluster transition: * Pseudo action: M-clone_stop_0 * Resource action: B stop on lxc1 * Cluster action: clear_failcount for container1 on 18node2 * Cluster action: clear_failcount for lxc1 on 18node2 * Resource action: M stop on lxc1 * Pseudo action: M-clone_stopped_0 * Resource action: B start on lxc2 * Resource action: lxc1 stop on 18node2 * Resource action: lxc1 delete on 18node3 * Resource action: lxc1 delete on 18node2 * Resource action: lxc1 delete on 18node1 * Resource action: B monitor=10000 on lxc2 * Resource action: container1 stop on 18node2 * Resource action: container1 delete on 18node3 * Resource action: container1 delete on 18node2 * Resource action: container1 delete on 18node1 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc2 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-start.summary b/pengine/test10/whitebox-start.summary index 6819ebc6f9..e76e28a45f 100644 --- a/pengine/test10/whitebox-start.summary +++ b/pengine/test10/whitebox-start.summary @@ -1,53 +1,53 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Stopped container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] Stopped: [ lxc1 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc2 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: * Start container1 (18node1) * Start M:4 (lxc1) - * Move A (Started 18node1 -> lxc1) - * Move B (Started lxc2 -> 18node3) + * Move A ( 18node1 -> lxc1 ) + * Move B ( lxc2 -> 18node3 ) * Start lxc1 (18node1) Executing cluster transition: * Resource action: container1 start on 18node1 * Pseudo action: M-clone_start_0 * Resource action: A stop on 18node1 * Resource action: B stop on lxc2 * Resource action: lxc1 start on 18node1 * Pseudo action: all_stopped * Resource action: M start on lxc1 * Pseudo action: M-clone_running_0 * Resource action: A start on lxc1 * Resource action: B start on 18node3 * Resource action: lxc1 monitor=30000 on 18node1 * Resource action: M monitor=10000 on lxc1 * Resource action: A monitor=10000 on lxc1 * Resource action: B monitor=10000 on 18node3 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node1 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started lxc1 B (ocf::pacemaker:Dummy): Started 18node3 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-stop.summary b/pengine/test10/whitebox-stop.summary index 89094dacae..9b15ea0c60 100644 --- a/pengine/test10/whitebox-stop.summary +++ b/pengine/test10/whitebox-stop.summary @@ -1,48 +1,48 @@ 1 of 14 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 ( disabled ) container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: * Stop container1 (18node2) due to node availability * Stop M:4 (lxc1) due to node availability - * Move B (Started lxc1 -> lxc2) + * Move B ( lxc1 -> lxc2 ) * Stop lxc1 (18node2) due to node availability Executing cluster transition: * Pseudo action: M-clone_stop_0 * Resource action: B stop on lxc1 * Resource action: M stop on lxc1 * Pseudo action: M-clone_stopped_0 * Resource action: B start on lxc2 * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Resource action: B monitor=10000 on lxc2 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Stopped ( disabled ) container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] Stopped: [ lxc1 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc2 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-unexpectedly-running.summary b/pengine/test10/whitebox-unexpectedly-running.summary index 0e69d3c8f0..eef4f6353e 100644 --- a/pengine/test10/whitebox-unexpectedly-running.summary +++ b/pengine/test10/whitebox-unexpectedly-running.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ 18builder ] FAKE (ocf::pacemaker:Dummy): FAILED 18builder Transition Summary: * Fence (reboot) remote1 (resource: FAKE) 'guest is unclean' - * Recover FAKE (Started 18builder) + * Recover FAKE ( 18builder ) * Start remote1 (18builder) Executing cluster transition: * Resource action: FAKE stop on 18builder * Resource action: remote1 monitor on 18builder * Pseudo action: stonith-remote1-reboot on remote1 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: FAKE start on 18builder * Resource action: remote1 start on 18builder * Resource action: FAKE monitor=60000 on 18builder * Resource action: remote1 monitor=30000 on 18builder Revised cluster status: Online: [ 18builder ] Containers: [ remote1:FAKE ] FAKE (ocf::pacemaker:Dummy): Started 18builder