diff --git a/pengine/graph.c b/pengine/graph.c index 4561627536..09e55dabb1 100644 --- a/pengine/graph.c +++ b/pengine/graph.c @@ -1,1004 +1,1003 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include gboolean update_action(action_t * action); gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type); static enum pe_action_flags get_action_flags(action_t * action, node_t * node) { enum pe_action_flags flags = action->flags; if (action->rsc) { flags = action->rsc->cmds->action_flags(action, NULL); if (action->rsc->variant >= pe_clone && node) { /* We only care about activity on $node */ enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node); /* Go to great lengths to ensure the correct value for pe_action_runnable... * * If we are a clone, then for _ordering_ constraints, its only relevant * if we are runnable _anywhere_. * * This only applies to _runnable_ though, and only for ordering constraints. * If this function is ever used during colocation, then we'll need additional logic * * Not very satisfying, but its logical and appears to work well. */ if (is_not_set(clone_flags, pe_action_runnable) && is_set(flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid); set_bit(clone_flags, pe_action_runnable); } flags = clone_flags; } } return flags; } static char * convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify, gboolean free_original) { int interval = 0; char *uuid = NULL; char *rid = NULL; char *raw_task = NULL; int task = no_action; pe_rsc_trace(rsc, "Processing %s", old_uuid); if (old_uuid == NULL) { return NULL; } else if (strstr(old_uuid, "notify") != NULL) { goto done; /* no conversion */ } else if (rsc->variant < pe_group) { goto done; /* no conversion */ } CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval)); if (interval > 0) { goto done; /* no conversion */ } task = text2task(raw_task); switch (task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: task--; break; case monitor_rsc: case shutdown_crm: case stonith_node: task = no_action; break; default: crm_err("Unknown action: %s", raw_task); task = no_action; break; } if (task != no_action) { if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) { uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1)); } else { uuid = generate_op_key(rid, task2text(task + 1), 0); } pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(old_uuid); } if (free_original) { free(old_uuid); } free(raw_task); free(rid); return uuid; } static action_t * rsc_expand_action(action_t * action) { action_t *result = action; if (action->rsc && action->rsc->variant >= pe_group) { /* Expand 'start' -> 'started' */ char *uuid = NULL; gboolean notify = FALSE; if (action->rsc->parent == NULL) { /* Only outter-most resources have notification actions */ notify = is_set(action->rsc->flags, pe_rsc_notify); } uuid = convert_non_atomic_uuid(action->uuid, action->rsc, notify, FALSE); if (uuid) { pe_rsc_trace(action->rsc, "Converting %s to %s %d", action->uuid, uuid, is_set(action->rsc->flags, pe_rsc_notify)); result = find_first_action(action->rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_err("Couldn't expand %s", action->uuid); result = action; } free(uuid); } } return result; } static enum pe_graph_flags graph_update_action(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; gboolean processed = FALSE; /* TODO: Do as many of these in parallel as possible */ if (type & pe_order_implies_then) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_then); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(then, pe_action_optional | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies right: %s then %s", first->uuid, then->uuid); } } if ((type & pe_order_restart) && then->rsc) { enum pe_action_flags restart = (pe_action_optional | pe_action_runnable); processed = TRUE; changed |= then->rsc->cmds->update_actions(first, then, node, flags, restart, pe_order_restart); if (changed) { pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("restart: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first) { processed = TRUE; if (first->rsc) { changed |= first->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_implies_first); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(first, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_first; } } if (changed) { pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_master) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_first_master); } if (changed) { pe_rsc_trace(then->rsc, "implies left when right rsc is Master role: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_one_or_more) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_one_or_more); } else if (is_set(flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_runnable_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_runnable_left); } else if (is_set(flags, pe_action_runnable) == FALSE) { if (update_action_flags(then, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_optional) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_optional); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_asymmetrical) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_asymmetrical); } if (changed) { pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid); } } if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", first->uuid, then->uuid); update_action_flags(then, pe_action_print_always); /* dont care about changed */ } if ((type & pe_order_implies_first_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", then->uuid, first->uuid); update_action_flags(first, pe_action_print_always); /* dont care about changed */ } if (processed == FALSE) { crm_trace("Constraint 0x%.6x not applicable", type); } return changed; } gboolean update_action(action_t * then) { GListPtr lpc = NULL; enum pe_graph_flags changed = pe_graph_none; int last_flags = then->flags; crm_trace("Processing %s (%s %s %s)", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : ""); if (is_set(then->flags, pe_action_requires_any)) { clear_bit(then->flags, pe_action_runnable); } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; action_t *first = other->action; node_t *then_node = then->node; node_t *first_node = first->node; enum pe_action_flags then_flags = 0; enum pe_action_flags first_flags = 0; if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node) { crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid); } } if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node) { crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid); } } clear_bit(changed, pe_graph_updated_first); if (first->rsc != then->rsc && first->rsc != NULL && then->rsc != NULL && first->rsc != then->rsc->parent) { first = rsc_expand_action(first); } if (first != other->action) { crm_trace("Ordering %s afer %s instead of %s", then->uuid, first->uuid, other->action->uuid); } first_flags = get_action_flags(first, then_node); then_flags = get_action_flags(then, first_node); crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) 0x%.6x", then->uuid, is_set(then_flags, pe_action_optional) ? "optional" : "required", is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then_flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : "", first->uuid, is_set(first_flags, pe_action_optional) ? "optional" : "required", is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first_flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : "", other->type); if (first == other->action) { clear_bit(first_flags, pe_action_pseudo); changed |= graph_update_action(first, then, then->node, first_flags, other->type); } else if (order_actions(first, then, other->type)) { /* Start again to get the new actions_before list */ changed |= (pe_graph_updated_then | pe_graph_disable); } if (changed & pe_graph_disable) { crm_trace("Disabled constraint %s -> %s", other->action->uuid, then->uuid); clear_bit(changed, pe_graph_disable); other->type = pe_order_none; } if (changed & pe_graph_updated_first) { GListPtr lpc2 = NULL; crm_trace("Updated %s (first %s %s %s), processing dependants ", first->uuid, is_set(first->flags, pe_action_optional) ? "optional" : "required", is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first->flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : ""); for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other = (action_wrapper_t *) lpc2->data; update_action(other->action); } update_action(first); } } if (is_set(then->flags, pe_action_requires_any)) { if (last_flags != then->flags) { changed |= pe_graph_updated_then; } else { clear_bit(changed, pe_graph_updated_then); } } if (changed & pe_graph_updated_then) { crm_trace("Updated %s (then %s %s %s), processing dependants ", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : ""); update_action(then); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; update_action(other->action); } } return FALSE; } gboolean shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set) { /* add the stop to the before lists so it counts as a pre-req * for the shutdown */ GListPtr lpc = NULL; for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) { action_t *action = (action_t *) lpc->data; if (action->rsc == NULL || action->node == NULL) { continue; } else if(action->node->details != node->details) { continue; } else if(is_set(data_set->flags, pe_flag_maintenance_mode)) { pe_rsc_trace(action->rsc, "Skipping %s: maintainence mode", action->uuid); continue; } else if(safe_str_neq(action->task, RSC_STOP)) { continue; } else if(is_not_set(action->rsc->flags, pe_rsc_managed) && is_not_set(action->rsc->flags, pe_rsc_block)) { /* * If another action depends on this one, we may still end up blocking */ pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid); continue; } pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid, node->details->uname); clear_bit(action->flags, pe_action_optional); custom_action_order(action->rsc, NULL, action, NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op, pe_order_optional|pe_order_runnable_left, data_set); } return TRUE; } gboolean stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set) { CRM_CHECK(stonith_op != NULL, return FALSE); /* * Make sure the stonith OP occurs before we start any shared resources */ if (stonith_op != NULL) { GListPtr lpc = NULL; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc_stonith_ordering(rsc, stonith_op, data_set); } } /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ return TRUE; } xmlNode * action2xml(action_t * action, gboolean as_input) { gboolean needs_node_info = TRUE; xmlNode *action_xml = NULL; xmlNode *args_xml = NULL; char *action_id_s = NULL; if (action == NULL) { return NULL; } if (safe_str_eq(action->task, CRM_OP_FENCE)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* needs_node_info = FALSE; */ } else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* } else if(safe_str_eq(action->task, RSC_PROBED)) { */ /* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */ } else if (is_set(action->flags, pe_action_pseudo)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT); needs_node_info = FALSE; } else { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); } action_id_s = crm_itoa(action->id); crm_xml_add(action_xml, XML_ATTR_ID, action_id_s); free(action_id_s); crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task); if (action->rsc != NULL && action->rsc->clone_name != NULL) { char *clone_key = NULL; const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); int interval = crm_parse_int(interval_s, "0"); if (safe_str_eq(action->task, RSC_NOTIFY)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid)); CRM_CHECK(n_task != NULL, crm_err("No notify operation value found for %s", action->uuid)); clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task); } else { clone_key = generate_op_key(action->rsc->clone_name, action->task, interval); } CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid)); crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key); crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid); } if (needs_node_info && action->node != NULL) { crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id); } if (is_set(action->flags, pe_action_failure_is_fatal) == FALSE) { add_hash_param(action->meta, XML_ATTR_TE_ALLOWFAIL, XML_BOOLEAN_TRUE); } if (as_input) { return action_xml; } if (action->rsc) { if (is_set(action->flags, pe_action_pseudo) == FALSE) { int lpc = 0; xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml)); const char *attr_list[] = { XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER, XML_ATTR_TYPE }; if(is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) { /* Do not use the 'instance free' name here as that * might interfere with the instance we plan to keep. * Ie. if there are more than two named /anonymous/ * instances on a given node, we need to make sure the * command goes to the right one. * * Keep this block, even when everyone is using * 'instance free' anonymous clone names - it means * we'll do the right thing if anyone toggles the * unique flag to 'off' */ crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } else if(is_not_set(action->rsc->flags, pe_rsc_unique)) { const char *xml_id = ID(action->rsc->xml); crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, action->rsc->clone_name); /* ID is what we'd like client to use * ID_LONG is what they might know it as instead * * ID_LONG is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the claus above instead */ crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); if(action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); } else { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } } else { CRM_ASSERT(action->rsc->clone_name == NULL); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); } for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); } } } args_xml = create_xml_node(NULL, XML_TAG_ATTRS); crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if (action->rsc != NULL) { g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (action->rsc != NULL) { resource_t *parent = action->rsc; while (parent != NULL) { parent->cmds->append_meta(parent, args_xml); parent = parent->parent; } } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml); } sorted_xml(args_xml, action_xml, FALSE); crm_log_xml_trace(action_xml, "dumped action"); free_xml(args_xml); return action_xml; } static gboolean should_dump_action(action_t * action) { CRM_CHECK(action != NULL, return FALSE); if (is_set(action->flags, pe_action_dumped)) { crm_trace( "action %d (%s) was already dumped", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) { GListPtr lpc = NULL; /* This is a horrible but convenient hack * * It mimimizes the number of actions with unsatisfied inputs * (ie. not included in the graph) * * This in turn, means we can be more concise when printing * aborted/incomplete graphs. * * It also makes it obvious which node is preventing * probe_complete from running (presumably because it is only * partially up) * * For these reasons we tolerate such perversions */ for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (is_not_set(wrapper->action->flags, pe_action_runnable)) { /* Only interested in runnable operations */ } else if (safe_str_neq(wrapper->action->task, RSC_START)) { /* Only interested in start operations */ } else if (is_set(wrapper->action->flags, pe_action_dumped)) { crm_trace( "action %d (%s) dependancy of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } else if (should_dump_action(wrapper->action)) { crm_trace( "action %d (%s) dependancy of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } } } if (is_set(action->flags, pe_action_runnable) == FALSE) { crm_trace( "action %d (%s) was not runnable", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_optional) && is_set(action->flags, pe_action_print_always) == FALSE) { crm_trace( "action %d (%s) was optional", action->id, action->uuid); return FALSE; } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) { const char *interval = NULL; interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); /* make sure probes and recurring monitors go through */ if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) { crm_trace( "action %d (%s) was for an unmanaged resource (%s)", action->id, action->uuid, action->rsc->id); return FALSE; } } if (is_set(action->flags, pe_action_pseudo) || safe_str_eq(action->task, CRM_OP_FENCE) || safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { /* skip the next checks */ return TRUE; } if (action->node == NULL) { pe_err("action %d (%s) was not allocated", action->id, action->uuid); log_action(LOG_DEBUG, "Unallocated action", action, FALSE); return FALSE; } else if (action->node->details->online == FALSE) { pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for offline node", action, FALSE); return FALSE; #if 0 /* but this would also affect resources that can be safely * migrated before a fencing op */ } else if (action->node->details->unclean == FALSE) { pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for unclean node", action, FALSE); return FALSE; #endif } return TRUE; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a; const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } return 0; } static gboolean should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) { int type = wrapper->type; type &= ~pe_order_implies_first_printed; type &= ~pe_order_implies_then_printed; type &= ~pe_order_optional; wrapper->state = pe_link_not_dumped; if (last_action == wrapper->action->id) { crm_trace( "Input (%d) %s duplicated for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); wrapper->state = pe_link_dup; return FALSE; } else if (wrapper->type == pe_order_none) { crm_trace( "Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) { crm_trace( "Input (%d) %s optional (ordering) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && (wrapper->type & pe_order_stonith_stop)) { crm_trace( "Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (wrapper->type == pe_order_load) { - crm_trace("check load filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node->details->uname, action->uuid, action->node->details->uname); + crm_trace("check load filter %s.%s -> %s.%s", + wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", + action->uuid, action->node ? action->node->details->uname : ""); if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) { - /* For migrate_to ops, we care about where it has been - * allocated to, not where the action will be executed + /* Remove the orders like : + * "load_stopped_node2" -> "rscA_migrate_to node1" + * which were created from: pengine/native.c: MigrateRsc() + * order_actions(other, then, other_w->type); */ - if(wrapper->action->node == NULL || action->rsc->allocated_to == NULL - || wrapper->action->node->details != action->rsc->allocated_to->details) { - /* Check if the actions are for the same node, ignore otherwise */ - crm_trace("load filter - migrate"); - wrapper->type = pe_order_none; - return FALSE; - } + wrapper->type = pe_order_none; + return FALSE; } else if (wrapper->action->node == NULL || action->node == NULL || wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - node"); wrapper->type = pe_order_none; return FALSE; } else if(is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("load filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->action->rsc && wrapper->action->rsc != action->rsc && is_set(wrapper->action->rsc->flags, pe_rsc_failed) && is_not_set(wrapper->action->rsc->flags, pe_rsc_managed) && strstr(wrapper->action->uuid, "_stop_0") && action->rsc && action->rsc->variant >= pe_clone) { crm_warn("Ignoring requirement that %s comeplete before %s:" " unmanaged failed resources cannot prevent clone shutdown", wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_dumped) || should_dump_action(wrapper->action)) { crm_trace( "Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #if 0 } else if (is_set(wrapper->action->flags, pe_action_runnable) && is_set(wrapper->action->flags, pe_action_pseudo) && wrapper->action->rsc->variant != pe_native) { crm_crit("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #endif } else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE && is_set(wrapper->action->flags, pe_action_print_always) == FALSE) { crm_trace( "Input (%d) %s optional for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); crm_trace( "Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type); return FALSE; } dump: crm_trace( "Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid); return TRUE; } void graph_element_from_action(action_t * action, pe_working_set_t * data_set) { GListPtr lpc = NULL; int last_action = -1; int synapse_priority = 0; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; xmlNode *input = NULL; xmlNode *xml_action = NULL; if (should_dump_action(action) == FALSE) { return; } set_bit(action->flags, pe_action_dumped); syn = create_xml_node(data_set->graph, "synapse"); set = create_xml_node(syn, "action_set"); in = create_xml_node(syn, "inputs"); crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse); data_set->num_synapse++; if (action->rsc != NULL) { synapse_priority = action->rsc->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority); } xml_action = action2xml(action, FALSE); add_node_nocopy(set, crm_element_name(xml_action), xml_action); action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (should_dump_input(last_action, action, wrapper) == FALSE) { continue; } wrapper->state = pe_link_dumped; CRM_CHECK(last_action < wrapper->action->id,; ); last_action = wrapper->action->id; input = create_xml_node(in, "trigger"); xml_action = action2xml(wrapper->action, TRUE); add_node_nocopy(input, crm_element_name(xml_action), xml_action); } } diff --git a/pengine/native.c b/pengine/native.c index 148b4dfc59..9e4029988c 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,3165 +1,3161 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define DELETE_THEN_REFRESH 1 /* The crmd will remove the resource from the CIB itself, making this redundant */ #define INFINITY_HACK (INFINITY * -100) #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void pe_post_notify(resource_t * rsc, node_t * node, action_t * op, notify_data_t * n_data, pe_working_set_t * data_set); gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); /* *INDENT-OFF* */ enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, RoleError, NullOp, PromoteRsc, }, /* Master */ { RoleError, DemoteRsc, RoleError, DemoteRsc, NullOp, }, }; /* *INDENT-ON* */ struct capacity_data { node_t *node; resource_t *rsc; gboolean is_enough; }; static gboolean is_fencing_resource(resource_t *rsc) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (safe_str_eq(class, "stonith")) { return TRUE; } return FALSE; } static void check_capacity(gpointer key, gpointer value, gpointer user_data) { int required = 0; int remaining = 0; struct capacity_data *data = user_data; required = crm_parse_int(value, "0"); remaining = crm_parse_int(g_hash_table_lookup(data->node->details->utilization, key), "0"); if (required > remaining) { pe_rsc_debug(data->rsc, "Node %s has no enough %s for resource %s: required=%d remaining=%d", data->node->details->uname, (char *)key, data->rsc->id, required, remaining); data->is_enough = FALSE; } } static gboolean have_enough_capacity(node_t * node, resource_t * rsc) { struct capacity_data data; data.node = node; data.rsc = rsc; data.is_enough = TRUE; g_hash_table_foreach(rsc->utilization, check_capacity, &data); return data.is_enough; } static gboolean native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ int alloc_details = scores_log_level + 1; GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; gboolean result = FALSE; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr gIter = NULL; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (have_enough_capacity(node, rsc) == FALSE) { pe_rsc_debug(rsc, "Resource %s cannot be allocated to node %s: none of enough capacity", rsc->id, node->details->uname); resource_location(rsc, node, -INFINITY, "__limit_utilization_", data_set); } } dump_node_scores(alloc_details, rsc, "Post-utilization", rsc->allowed_nodes); } length = g_hash_table_size(rsc->allowed_nodes); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to ? TRUE : FALSE; } if (prefer) { chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen && chosen->weight >= 0 && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Using preferred node %s for %s instead of choosing from %d candidates", chosen->details->uname, rsc->id, length); } else if (chosen && chosen->weight < 0) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else if (chosen && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); } } if (chosen == NULL && rsc->allowed_nodes) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, g_list_nth_data(rsc->running_on, 0)); chosen = g_list_nth_data(nodes, 0); pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates", chosen ? chosen->details->uname : "", rsc->id, length); if (chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if (running && can_run_resources(running) == FALSE) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, running->details->uname); running = NULL; } for (lpc = 1; lpc < length && running; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if (tmp->weight == chosen->weight) { multiple++; if (tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if (multiple > 1) { int log_level = LOG_INFO; char *score = score2char(chosen->weight); if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); free(score); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } static int node_list_attr_score(GHashTable * list, const char *attr, const char *value) { GHashTableIter iter; node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { int weight = node->weight; if (can_run_resources(node) == FALSE) { weight = -INFINITY; } if (weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if (safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } } if (safe_str_neq(attr, "#" XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node ? best_node : "", best_score); } return best_score; } static void node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor, gboolean only_positive) { int score = 0; int new_score = 0; GHashTableIter iter; node_t *node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list1); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { CRM_CHECK(node != NULL, continue); score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); new_score = merge_weights(factor * score, node->weight); if (factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO - Decide if we want to filter only if weight == -INFINITY * */ crm_trace("%s: Filtering %d + %f*%d (factor * score)", node->details->uname, node->weight, factor, score); } else if (node->weight == INFINITY_HACK) { crm_trace("%s: Filtering %d + %f*%d (node < 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight > 0) { node->weight = INFINITY_HACK; crm_trace("%s: Filtering %d + %f*%d (score > 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight == 0) { crm_trace("%s: Filtering %d + %f*%d (score == 0)", node->details->uname, node->weight, factor, score); } else { crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score); node->weight = new_score; } } } static GHashTable * node_hash_dup(GHashTable * hash) { /* Hack! */ GListPtr list = g_hash_table_get_values(hash); GHashTable *result = node_hash_from_list(list); g_list_free(list); return result; } GHashTable * native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } GHashTable * rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { GHashTable *work = NULL; int multiplier = 1; if (factor < 0) { multiplier = -1; } if (is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); if (is_set(flags, pe_weights_init)) { if (rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last); work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags); } else { work = node_hash_dup(rsc->allowed_nodes); } clear_bit(flags, pe_weights_init); } else { pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id); work = node_hash_dup(nodes); node_hash_update(work, rsc->allowed_nodes, attr, factor, is_set(flags, pe_weights_positive)); } if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id); g_hash_table_destroy(work); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if (can_run_any(work)) { GListPtr gIter = NULL; if (is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; } else { gIter = rsc->rsc_cons_lhs; } for (; gIter != NULL; gIter = gIter->next) { resource_t *other = NULL; rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else { other = constraint->rsc_lh; } pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id); work = rsc_merge_weights(other, rhs, work, constraint->node_attribute, multiplier * (float) constraint->score / INFINITY, flags); dump_node_scores(LOG_TRACE, NULL, rhs, work); } } if(is_set(flags, pe_weights_positive)) { node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->weight == INFINITY_HACK) { node->weight = 1; } } } if (nodes) { g_hash_table_destroy(nodes); } clear_bit(rsc->flags, pe_rsc_merging); return work; } node_t * native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr gIter = NULL; int alloc_details = scores_log_level + 1; if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-allloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; GHashTable *archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if (constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_hash_dup(rsc->allowed_nodes); } rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); if (archive && can_run_any(rsc->allowed_nodes) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive) { g_hash_table_destroy(archive); } } dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float) constraint->score / INFINITY, pe_weights_rollback); } for (gIter = rsc->rsc_tickets; gIter != NULL; gIter = gIter->next) { rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data; if (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby) { rsc_ticket_constraint(rsc, rsc_ticket, data_set); } } print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id); /* make sure it doesnt come up again */ resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; rsc->next_role = rsc->role; if (rsc->running_on == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if (is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to ? assign_to->details->uname : "'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if (is_set(data_set->flags, pe_flag_stop_everything) && is_fencing_resource(rsc) == FALSE) { pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if (is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set)) { pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if (rsc->allocated_to == NULL) { if (is_not_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); return rsc->allocated_to; } static gboolean is_op_dup(resource_t * rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { value = crm_element_value(operation, "name"); if (safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (value == NULL) { value = "0"; } if (safe_str_neq(value, interval)) { continue; } if (id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err ("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } } } return dup; } void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; /* Only process for the operations without role="Stopped" */ value = crm_element_value(operation, "role"); if (value && text2role(value) == RSC_ROLE_STOPPED) { return; } pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node ? node->details->uname : "n/a"); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } if (start != NULL) { pe_rsc_trace(rsc, "Marking %s %s due to %s", key, is_set(start->flags, pe_action_optional) ? "optional" : "manditory", start->uuid); is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional); } else { pe_rsc_trace(rsc, "Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches == NULL) { is_optional = FALSE; pe_rsc_trace(rsc, "Marking %s manditory: not active", key); } else { g_list_free(possible_matches); } if ((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if (is_optional) { char *local_key = strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* its running : cancel it */ mon = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(mon->task); mon->task = strdup(RSC_CANCEL); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch (rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if (rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if (rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if (local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result, key, value ? value : role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); free(key); key = NULL; return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if (is_optional) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear); } else if (node == NULL || node->details->online == FALSE || node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear); } else if (is_set(mon->flags, pe_action_optional) == FALSE) { pe_rsc_info(rsc, " Start recurring %s (%llus) for %s on %s", mon->task, interval_ms / 1000, rsc->id, crm_str(node_uname)); } if (rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(PCMK_EXECRA_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); free(running_master); } if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); if (rsc->next_role == RSC_ROLE_MASTER) { custom_action_order(rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } else if (rsc->role == RSC_ROLE_MASTER) { custom_action_order(rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } } } void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp(rsc, start, node, operation, data_set); } } } } void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* TODO: Support of non-unique clone */ if (is_set(rsc->flags, pe_rsc_unique) == FALSE) { return; } /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } pe_rsc_trace(rsc, "Creating recurring actions %s for %s in role %s on nodes where it'll not be running", ID(operation), rsc->id, role2text(rsc->next_role)); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } /* if the monitor exists on the node where the resource will be running, cancel it */ if (node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches) { action_t *cancel_op = NULL; char *local_key = strdup(key); g_list_free(possible_matches); cancel_op = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(cancel_op->task); cancel_op->task = strdup(RSC_CANCEL); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), crm_str(node_uname)); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *stop_node = (node_t *) gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; char *stop_op_key = NULL; if (node_uname && safe_str_eq(stop_node_uname, node_uname)) { continue; } pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if (possible_matches == NULL) { pe_rsc_trace(rsc, "Marking %s manditory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(PCMK_EXECRA_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); free(rc_inactive); probe_complete_ops = find_actions(data_set->actions, CRM_OP_PROBED, NULL); for (local_gIter = probe_complete_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *probe_complete = (action_t *) local_gIter->data; if (probe_complete->node == NULL) { if (is_set(probe_complete->flags, pe_action_optional) == FALSE) { probe_is_optional = FALSE; } if (is_set(probe_complete->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : probe un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(NULL, NULL, probe_complete, NULL, strdup(key), stopped_mon, pe_order_optional, data_set); } break; } } if (probe_complete_ops) { g_list_free(probe_complete_ops); } stop_op_key = stop_key(rsc); stop_ops = find_actions_exact(rsc->actions, stop_op_key, stop_node); for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *stop = (action_t *) local_gIter->data; if (is_set(stop->flags, pe_action_optional) == FALSE) { stop_is_optional = FALSE; } if (is_set(stop->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, strdup(stop_op_key), stop, NULL, strdup(key), stopped_mon, pe_order_implies_then | pe_order_runnable_left, data_set); } } if (stop_ops) { g_list_free(stop_ops); } free(stop_op_key); if (is_optional == FALSE && probe_is_optional && stop_is_optional && is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional); } if (is_set(stopped_mon->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if (stop_node->details->online == FALSE || stop_node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear); } if (is_set(stopped_mon->flags, pe_action_runnable) && is_set(stopped_mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", stopped_mon->task, interval_ms / 1000, rsc->id, crm_str(stop_node_uname)); } } free(key); } void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } void native_create_actions(resource_t * rsc, pe_working_set_t * data_set) { action_t *start = NULL; node_t *chosen = NULL; node_t *current = NULL; gboolean need_stop = FALSE; GListPtr gIter = NULL; int num_active_nodes = 0; gboolean fence_device = is_fencing_resource(rsc); enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; chosen = rsc->allocated_to; if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } else if (rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } pe_rsc_trace(rsc, "Processing state transition for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); if(rsc->running_on) { current = rsc->running_on->data; } for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *n = (node_t *) gIter->data; if(fence_device && n->details->unclean) { crm_info("Ignoring %s on %s: fencing resource on an unclean node", rsc->id, n->details->uname); continue; } num_active_nodes++; } get_rsc_attributes(rsc->parameters, rsc, chosen, data_set); for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop = stop_action(rsc, current, FALSE); set_bit(stop->flags, pe_action_dangle); pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s", rsc->id, current->details->uname); if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, FALSE, data_set); } } if (num_active_nodes > 1) { if (num_active_nodes == 2 && chosen && rsc->partial_migration_target && (chosen->details == rsc->partial_migration_target->details)) { /* Here the chosen node is still the migration target from a partial * migration. Attempt to continue the migration instead of recovering * by stopping the resource everywhere and starting it on a single node. */ pe_rsc_trace(rsc, "Will attempt to continue with a partial migration to target %s from %s", rsc->partial_migration_target->details->id, rsc->partial_migration_source->details->id); } else { const char *type = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); pe_proc_err("Resource %s (%s::%s) is active on %d nodes %s", rsc->id, class, type, num_active_nodes, recovery2text(rsc->recovery_type)); crm_warn("See %s for more information.", "http://clusterlabs.org/wiki/FAQ#Resource_is_Too_Active"); if (rsc->recovery_type == recovery_stop_start) { need_stop = TRUE; } /* If by chance a partial migration is in process, * but the migration target is not chosen still, clear all * partial migration data. */ rsc->partial_migration_source = rsc->partial_migration_target = NULL; } } if (is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, chosen, TRUE); set_bit(start->flags, pe_action_print_always); } if(current && chosen && current->details != chosen->details) { pe_rsc_trace(rsc, "Moving %s", rsc->id); need_stop = TRUE; } else if(is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Recovering %s", rsc->id); need_stop = TRUE; } else if(rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) { /* Recovery of a promoted resource */ start = start_action(rsc, chosen, TRUE); if(is_set(start->flags, pe_action_optional) == FALSE) { pe_rsc_trace(rsc, "Forced start %s", rsc->id); need_stop = TRUE; } } pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); role = rsc->role; /* Potentiall optional steps on brining the resource down and back up to the same level */ while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop?" required":""); if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) { break; } role = next_role; } while (rsc->role <= rsc->next_role && role != rsc->role) { next_role = rsc_state_matrix[role][rsc->role]; pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop?" required":""); if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { break; } role = next_role; } role = rsc->role; /* Required steps from this role to the next */ while (role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; pe_rsc_trace(rsc, "Role: Executing: %s->%s (%s)", role2text(role), role2text(next_role), rsc->id); if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { Recurring_Stopped(rsc, NULL, NULL, data_set); } } void native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { /* This function is on the critical path and worth optimizing as much as possible */ resource_t *top = uber_parent(rsc); int type = pe_order_optional | pe_order_implies_then | pe_order_restart; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, type, data_set); if (top->variant == pe_master) { custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_implies_first_master, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if (is_fencing_resource(rsc) == FALSE) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(all_stopped->task), all_stopped, pe_order_implies_then | pe_order_runnable_left, data_set); } if (g_hash_table_size(rsc->utilization) > 0 && safe_str_neq(data_set->placement_strategy, "default")) { GHashTableIter iter; node_t *next = NULL; GListPtr gIter = NULL; pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(current); update_action_flags(load_stopped, pe_action_optional | pe_action_clear); } custom_action_order(rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_load, data_set); } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&next)) { char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(next); update_action_flags(load_stopped, pe_action_optional | pe_action_clear); } custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, start_key(rsc), NULL, pe_order_load, data_set); - custom_action_order(NULL, strdup(load_stopped_task), load_stopped, - rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, - pe_order_load, data_set); - free(load_stopped_task); } } } void native_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } enum filter_colocation_res { influence_nothing = 0, influence_rsc_location, influence_rsc_priority, }; static enum filter_colocation_res filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { if (constraint->score == 0) { return influence_nothing; } /* rh side must be allocated before we can process constraint */ if (is_set(rsc_rh->flags, pe_rsc_provisional)) { return influence_nothing; } if ((constraint->role_lh >= RSC_ROLE_SLAVE) && rsc_lh->parent && rsc_lh->parent->variant == pe_master && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* LH and RH resources have already been allocated, place the correct * priority oh LH rsc for the given multistate resource role */ return influence_rsc_priority; } if (is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if ((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return influence_nothing; } details_rh = rsc_rh->allocated_to ? rsc_rh->allocated_to->details : NULL; details_lh = rsc_lh->allocated_to ? rsc_lh->allocated_to->details : NULL; if (constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh ? details_lh->uname : "n/a", details_rh ? details_rh->uname : "n/a"); } else if (constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh ? details_rh->uname : "n/a"); } return influence_nothing; } if (constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { crm_trace( "LH: Skipping constraint: \"%s\" state filter nextrole is %s", role2text(constraint->role_lh), role2text(rsc_lh->next_role)); return influence_nothing; } if (constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { crm_trace( "RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if (constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { crm_trace( "LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_lh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { crm_trace( "RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } return influence_rsc_location; } static void influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *rh_value = NULL; const char *lh_value = NULL; const char *attribute = "#id"; int score_multiplier = 1; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { return; } lh_value = g_hash_table_lookup(rsc_lh->allocated_to->details->attrs, attribute); rh_value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); if (!safe_str_eq(lh_value, rh_value)) { return; } if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) { return; } if (constraint->role_lh == RSC_ROLE_SLAVE) { score_multiplier = -1; } rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority); } static void colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = "#id"; GHashTable *work = NULL; gboolean do_check = FALSE; GHashTableIter iter; node_t *node = NULL; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (rsc_rh->allocated_to) { value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if (constraint->score < 0) { /* nothing to do: * anti-colocation with something thats not running */ return; } work = node_hash_dup(rsc_lh->allowed_nodes); g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { tmp = g_hash_table_lookup(node->details->attrs, attribute); if (do_check && safe_str_eq(tmp, value)) { if (constraint->score < INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights(constraint->score, node->weight); } } else if (do_check == FALSE || constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check ? "failed" : "unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } } if (can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { char *score = score2char(constraint->score); pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); free(score); } if (work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { enum filter_colocation_res filter_results; filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint); switch (filter_results) { case influence_rsc_priority: influence_priority(rsc_lh, rsc_rh, constraint); break; case influence_rsc_location: pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); colocation_match(rsc_lh, rsc_rh, constraint); break; case influence_nothing: default: return; } } static gboolean filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket) { if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) { pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter", role2text(rsc_ticket->role_lh)); return FALSE; } return TRUE; } void rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set) { if (rsc_ticket == NULL) { pe_err("rsc_ticket was NULL"); return; } if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", rsc_ticket->id); return; } if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) { return; } if (rsc_lh->children) { GListPtr gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); } return; } pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)", rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); if (rsc_ticket->ticket->granted == FALSE && g_list_length(rsc_lh->running_on) > 0) { GListPtr gIter = NULL; switch (rsc_ticket->loss_policy) { case loss_ticket_stop: resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); break; case loss_ticket_demote: /*Promotion score will be set to -INFINITY in master_promotion_order() */ if (rsc_ticket->role_lh != RSC_ROLE_MASTER) { resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); } break; case loss_ticket_fence: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; crm_warn("Node %s will be fenced for deadman", node->details->uname); node->details->unclean = TRUE; } break; case loss_ticket_freeze: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } if (g_list_length(rsc_lh->running_on) > 0) { clear_bit(rsc_lh->flags, pe_rsc_managed); set_bit(rsc_lh->flags, pe_rsc_block); } break; } } else if (rsc_ticket->ticket->granted == FALSE){ if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); } } else if (rsc_ticket->ticket->standby) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set); } } } enum pe_action_flags native_action_flags(action_t * action, node_t * node) { return action->flags; } enum pe_graph_flags native_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { /* flags == get_action_flags(first, then_node) called from update_action() */ enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; if (type & pe_order_asymmetrical) { resource_t *then_rsc = then->rsc; enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0; if (!then_rsc) { /* ignore */ } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) { /* ignore... if 'then' is supposed to be stopped after 'first', but * then is already stopped, there is nothing to be done when non-symmetrical. */ } else if ((then_rsc_role == RSC_ROLE_STARTED) && safe_str_eq(then->task, RSC_START)) { /* ignore... if 'then' is supposed to be started after 'first', but * then is already started, there is nothing to be done when non-symmetrical. */ } else if (!(first->flags & pe_action_runnable)) { /* prevent 'then' action from happening if 'first' is not runnable and * 'then' has not yet occurred. */ pe_clear_action_bit(then, pe_action_runnable); pe_clear_action_bit(then, pe_action_optional); pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid); } else { /* ignore... then is allowed to start/stop if it wants to. */ } } if (type & pe_order_implies_first) { if ((filter & pe_action_optional) && (flags & pe_action_optional) == 0) { pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); pe_clear_action_bit(first, pe_action_optional); } } if (type & pe_order_implies_first_master) { if ((filter & pe_action_optional) && ((then->flags & pe_action_optional) == FALSE) && then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) { clear_bit(first->flags, pe_action_optional); } } if (is_set(type, pe_order_runnable_left) && is_set(filter, pe_action_runnable) && is_set(then->flags, pe_action_runnable) && is_set(flags, pe_action_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid); pe_clear_action_bit(then, pe_action_runnable); } if (is_set(type, pe_order_implies_then) && is_set(filter, pe_action_optional) && is_set(then->flags, pe_action_optional) && is_set(flags, pe_action_optional) == FALSE) { pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid); pe_clear_action_bit(then, pe_action_optional); } if (is_set(type, pe_order_restart)) { const char *reason = NULL; CRM_ASSERT(first->rsc && first->rsc->variant == pe_native); CRM_ASSERT(then->rsc && then->rsc->variant == pe_native); if ((filter & pe_action_runnable) && (then->flags & pe_action_runnable) == 0) { reason = "shutdown"; } if ((filter & pe_action_optional) && (then->flags & pe_action_optional) == 0) { reason = "recover"; } if (reason && is_set(first->flags, pe_action_optional) && is_set(first->flags, pe_action_runnable)) { pe_rsc_trace(first->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); pe_clear_action_bit(first, pe_action_optional); } if (reason && is_not_set(first->flags, pe_action_optional) && is_not_set(first->flags, pe_action_runnable)) { pe_rsc_trace(then->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); pe_clear_action_bit(then, pe_action_runnable); } } if (then_flags != then->flags) { changed |= pe_graph_updated_then; pe_rsc_trace(then->rsc, "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", then->uuid, then->node ? then->node->details->uname : "[none]", then->flags, then_flags, first->uuid, first->flags); } if (first_flags != first->flags) { changed |= pe_graph_updated_first; pe_rsc_trace(first->rsc, "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, first_flags, then->uuid, then->flags); } return changed; } void native_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { GListPtr gIter = NULL; GHashTableIter iter; node_t *node = NULL; if (constraint == NULL) { pe_err("Constraint is NULL"); return; } else if (rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if (constraint->role_filter > 0 && constraint->role_filter != rsc->next_role) { pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s)", constraint->id, role2text(constraint->role_filter)); return; } else if (is_active(constraint) == FALSE) { pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id); return; } if (constraint->node_list_rh == NULL) { pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id); return; } for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *other_node = NULL; other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (other_node != NULL) { pe_rsc_trace(rsc, "%s + %s: %d + %d", node->details->uname, other_node->details->uname, node->weight, other_node->weight); other_node->weight = merge_weights(other_node->weight, node->weight); } else { node_t *new_node = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) new_node->details->id, new_node); } } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight); } } void native_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } void #define log_change(fmt, args...) do { \ if(terminal) { \ printf(" * "fmt"\n", ##args); \ } else { \ crm_notice(fmt, ##args); \ } \ } while(0) LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { node_t *next = NULL; node_t *current = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *demote = NULL; action_t *promote = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; LogActions(child_rsc, data_set, terminal); } return; } next = rsc->allocated_to; if (rsc->running_on) { if (g_list_length(rsc->running_on) > 1 && rsc->partial_migration_source) { current = rsc->partial_migration_source; } else { current = rsc->running_on->data; } if (rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if (is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed) ? " unmanaged" : ""); return; } if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } key = start_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = stop_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } key = promote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } key = demote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { key = generate_op_key(rsc->id, RSC_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); free(key); CRM_CHECK(next != NULL,); if (next == NULL) { } else if (possible_matches && current) { log_change("Migrate %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); g_list_free(possible_matches); } else if(is_set(rsc->flags, pe_rsc_reload)) { log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start == NULL || is_set(start->flags, pe_action_optional)) { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (moving && current) { log_change("Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if (is_set(rsc->flags, pe_rsc_failed)) { log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { log_change("Stop %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } return; } if (rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if (current != NULL) { gboolean allowed = FALSE; if (demote != NULL && (demote->flags & pe_action_runnable)) { allowed = TRUE; } log_change("Demote %s\t(%s -> %s %s%s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname, allowed ? "" : " - blocked"); if (stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->next_role > RSC_ROLE_STOPPED) { if (is_set(rsc->flags, pe_rsc_failed)) { log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(is_set(rsc->flags, pe_rsc_reload)) { log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } } } else if (rsc->next_role == RSC_ROLE_STOPPED) { GListPtr gIter = NULL; CRM_CHECK(current != NULL,); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; log_change("Stop %s\t(%s)", rsc->id, node->details->uname); } } if (moving) { log_change("Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->next_role), current->details->uname, next->details->uname); } if (rsc->role == RSC_ROLE_STOPPED) { gboolean allowed = FALSE; if(start && (start->flags & pe_action_runnable)) { allowed = TRUE; } CRM_CHECK(next != NULL,); if (next != NULL) { log_change("Start %s\t(%s%s)", rsc->id, next->details->uname, allowed?"":" - blocked"); } if(allowed == FALSE) { return; } } if (rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { gboolean allowed = FALSE; CRM_CHECK(next != NULL,); if (stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->role > RSC_ROLE_STOPPED) { if (is_set(rsc->flags, pe_rsc_failed)) { log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(is_set(rsc->flags, pe_rsc_reload)) { log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } if (promote && (promote->flags & pe_action_runnable)) { allowed = TRUE; } log_change("Promote %s\t(%s -> %s %s%s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next->details->uname, allowed ? "" : " - blocked"); } } gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "%s", rsc->id); if (rsc->next_role == RSC_ROLE_STOPPED && rsc->variant == pe_native && is_fencing_resource(rsc)) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order(NULL, strdup(all_stopped->task), all_stopped, rsc, stop_key(rsc), NULL, pe_order_optional | pe_order_stonith_stop, data_set); } for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop; if (rsc->partial_migration_target) { if(rsc->partial_migration_target->details == current->details) { pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname, next->details->uname, rsc->id); continue; } else { pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id); optional = FALSE; } } pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname); stop = stop_action(rsc, current, optional); if(is_not_set(rsc->flags, pe_rsc_managed)) { update_action_flags(stop, pe_action_runnable|pe_action_clear); } if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } } return TRUE; } gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { action_t *start = NULL; pe_rsc_trace(rsc, "%s on %s %d", rsc->id, next?next->details->uname:"N/A", optional); start = start_action(rsc, next, TRUE); if (is_set(start->flags, pe_action_runnable) && optional == FALSE) { update_action_flags(start, pe_action_optional | pe_action_clear); } return TRUE; } gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; pe_rsc_trace(rsc, "%s on %s", rsc->id, next?next->details->uname:"N/A"); CRM_CHECK(next != NULL, return FALSE); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *start = (action_t *) gIter->data; if (is_set(start->flags, pe_action_runnable) == FALSE) { runnable = FALSE; } } g_list_free(action_list); if (runnable) { promote_action(rsc, next, optional); return TRUE; } pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *promote = (action_t *) gIter->data; update_action_flags(promote, pe_action_runnable | pe_action_clear); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "%s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; pe_rsc_trace(rsc, "%s on %s", rsc->id, next?next->details->uname:"N/A"); demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { crm_err("%s on %s", rsc->id, next?next->details->uname:"N/A"); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { pe_rsc_trace(rsc, "%s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set) { action_t *delete = NULL; #if DELETE_THEN_REFRESH action_t *refresh = NULL; #endif if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if (node == NULL) { pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if (node->details->unclean || node->details->online == FALSE) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete = delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional ? pe_order_implies_then : pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional ? pe_order_implies_then : pe_order_optional, data_set); #if DELETE_THEN_REFRESH refresh = custom_action(NULL, strdup(CRM_OP_LRM_REFRESH), CRM_OP_LRM_REFRESH, node, FALSE, TRUE, data_set); add_hash_param(refresh->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); order_actions(delete, refresh, pe_order_optional); #endif return TRUE; } #include <../lib/pengine/unpack.h> #define set_char(x) last_rsc_id[lpc] = x; complete = TRUE; static char * increment_clone(char *last_rsc_id) { int lpc = 0; int len = 0; char *tmp = NULL; gboolean complete = FALSE; CRM_CHECK(last_rsc_id != NULL, return NULL); if (last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len - 1; while (complete == FALSE && lpc > 0) { switch (last_rsc_id[lpc]) { case 0: lpc--; break; case '0': set_char('1'); break; case '1': set_char('2'); break; case '2': set_char('3'); break; case '3': set_char('4'); break; case '4': set_char('5'); break; case '5': set_char('6'); break; case '6': set_char('7'); break; case '7': set_char('8'); break; case '8': set_char('9'); break; case '9': last_rsc_id[lpc] = '0'; lpc--; break; case ':': tmp = last_rsc_id; last_rsc_id = calloc(1, len + 2); memcpy(last_rsc_id, tmp, len); last_rsc_id[++lpc] = '1'; last_rsc_id[len] = '0'; last_rsc_id[len + 1] = 0; complete = TRUE; free(tmp); break; default: crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc); return NULL; break; } } return last_rsc_id; } static node_t * probe_grouped_clone(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { node_t *running = NULL; resource_t *top = uber_parent(rsc); if (running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() * * This code desperately needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=10: * No probes O(25s) * Detection without clone loop O(3m) * Detection with clone loop O(8m) ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while (peer && running == NULL) { running = pe_hash_table_lookup(peer->known_on, node->details->id); if (running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping active clone: %s", rsc->id); free(clone_id); return running; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } free(clone_id); } return running; } gboolean native_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { char *key = NULL; action_t *probe = NULL; node_t *running = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if (rc_inactive == NULL) { rc_inactive = crm_itoa(PCMK_EXECRA_NOT_RUNNING); rc_master = crm_itoa(PCMK_EXECRA_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id); return FALSE; } if (rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set) || any_created; } return any_created; } if (is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id); return FALSE; } if(node->details->pending && is_fencing_resource(rsc)) { crm_trace("Skipping probe for fencing resource %s on pending node %s", rsc->id, node->details->uname); return FALSE; } running = g_hash_table_lookup(rsc->known_on, node->details->id); if (running == NULL && is_set(rsc->flags, pe_rsc_unique) == FALSE) { /* Anonymous clones */ if (rsc->parent == top) { running = g_hash_table_lookup(rsc->parent->known_on, node->details->id); } else { /* Grouped anonymous clones need extra special handling */ running = probe_grouped_clone(rsc, node, data_set); } } if (force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping active: %s on %s", rsc->id, node->details->uname); return FALSE; } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional | pe_action_clear); /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if (running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if (rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } pe_rsc_debug(rsc, "Probing %s on %s (%s)", rsc->id, node->details->uname, role2text(rsc->role)); order_actions(probe, complete, pe_order_implies_then); return TRUE; } static void native_start_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_stonith, pe_working_set_t * data_set) { node_t *target = stonith_op ? stonith_op->node : NULL; if (is_stonith) { char *key = start_key(rsc); action_t *ready = get_pseudo_op(STONITH_UP, data_set); pe_rsc_trace(rsc, "Ordering %s action before stonith events", key); custom_action_order(rsc, key, NULL, NULL, strdup(ready->task), ready, pe_order_optional | pe_order_implies_then, data_set); } else { GListPtr gIter = NULL; action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_optional); } else if (target != NULL && safe_str_eq(action->task, RSC_START) && NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) { /* if known == NULL, then we dont know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * its analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explaination is that the * DC died and took its status with it */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_optional | pe_order_runnable_left); } } } } static void native_stop_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_stonith, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; GListPtr action_list = NULL; resource_t *top = uber_parent(rsc); key = stop_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); free(key); /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online && action->node->details->unclean == FALSE && is_set(rsc->flags, pe_rsc_failed)) { continue; } if (is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is" " implicit after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); update_action_flags(action, pe_action_implied_by_stonith); if (is_stonith == FALSE) { action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); order_actions(stonith_op, action, pe_order_optional); order_actions(stonith_op, parent_stop, pe_order_optional); } if (is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ notify_data_t *n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); crm_info("Creating secondary notification for %s", action->uuid); collect_notification_data(rsc, TRUE, FALSE, n_data); g_hash_table_insert(n_data->keys, strdup("notify_stop_resource"), strdup(rsc->id)); g_hash_table_insert(n_data->keys, strdup("notify_stop_uname"), strdup(action->node->details->uname)); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); key = demote_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online == FALSE || action->node->details->unclean == TRUE || is_set(rsc->flags, pe_rsc_failed)) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is" " implict after %s is fenced", rsc->id, action->node->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ crm_trace("here - 1"); update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); if (is_stonith == FALSE) { order_actions(stonith_op, action, pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { gboolean is_stonith = FALSE; if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } return; } if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if (stonith_op != NULL && is_fencing_resource(rsc)) { is_stonith = TRUE; } /* Start constraints */ native_start_constraints(rsc, stonith_op, is_stonith, data_set); /* Stop constraints */ if(stonith_op) { native_stop_constraints(rsc, stonith_op, is_stonith, data_set); } } enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static enum stack_activity find_clone_activity_on(resource_t * rsc, resource_t * target, node_t * node, const char *type) { int mode = stack_stable; action_t *active = NULL; if (target->children) { GListPtr gIter = NULL; for (gIter = target->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; mode |= find_clone_activity_on(rsc, child, node, type); } return mode; } active = find_first_action(target->actions, NULL, RSC_START, NULL); if (active && is_set(active->flags, pe_action_optional) == FALSE && is_set(active->flags, pe_action_pseudo) == FALSE) { pe_rsc_debug(rsc, "%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_starting; } active = find_first_action(target->actions, NULL, RSC_STOP, node); if (active && is_set(active->flags, pe_action_optional) == FALSE && is_set(active->flags, pe_action_pseudo) == FALSE) { pe_rsc_debug(rsc, "%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_stopping; } return mode; } static enum stack_activity check_stack_element(resource_t * rsc, resource_t * other_rsc, const char *type) { resource_t *other_p = uber_parent(other_rsc); if (other_rsc == NULL || other_rsc == rsc) { return stack_stable; } else if (other_p->variant == pe_native) { crm_notice("Cannot migrate %s due to dependency on %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } else if (other_rsc == rsc->parent) { int mode = 0; GListPtr gIter = NULL; for (gIter = other_rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (constraint->score > 0) { mode |= check_stack_element(rsc, constraint->rsc_rh, type); } } return mode; } else if (other_p->variant == pe_group) { crm_notice("Cannot migrate %s due to dependency on group %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } /* else: >= clone */ /* ## Assumption A depends on clone(B) ## Resource Activity During Move N1 N2 N3 --- --- --- t0 A.stop t1 B.stop B.stop t2 B.start B.start t3 A.start ## Resource Activity During Migration N1 N2 N3 --- --- --- t0 B.start B.start t1 A.stop (1) t2 A.start (2) t3 B.stop B.stop Node 1: Rewritten to be a migrate-to operation Node 2: Rewritten to be a migrate-from operation # Constraints The following constraints already exist in the system. The 'ok' and 'fail' column refers to whether they still hold for migration. a) A.stop -> A.start - ok b) B.stop -> B.start - fail c) A.stop -> B.stop - ok d) B.start -> A.start - ok e) B.stop -> A.start - fail f) A.stop -> B.start - fail ## Scenarios B unchanged - ok B stopping only - fail - possible after fixing 'e' B starting only - fail - possible after fixing 'f' B stoping and starting - fail - constraint 'b' is unfixable B restarting only on N2 - fail - as-per previous only rarer */ /* Only allow migration when the clone is either stable, only starting or only stopping */ return find_clone_activity_on(rsc, other_rsc, NULL, type); } static gboolean at_stack_bottom(resource_t * rsc) { char *key = NULL; action_t *start = NULL; action_t *other = NULL; int mode = stack_stable; GListPtr action_list = NULL; GListPtr gIter = NULL; key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); free(key); pe_rsc_trace(rsc, "%s: processing", rsc->id); CRM_CHECK(action_list != NULL, return FALSE); start = action_list->data; g_list_free(action_list); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; resource_t *target = constraint->rsc_rh; pe_rsc_trace(rsc, "Checking %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if (constraint->score > 0) { mode |= check_stack_element(rsc, target, "coloc"); if (mode & stack_middle) { return FALSE; } else if ((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to colocation activity (last was %s)", rsc->id, target->id); return FALSE; } } } for (gIter = start->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (other_w->type & pe_order_serialize_only) { pe_rsc_trace(rsc, "%s: depends on %s (serialize ordering)", rsc->id, other->uuid); continue; } pe_rsc_trace(rsc, "%s: Checking %s ordering", rsc->id, other->uuid); if (is_set(other->flags, pe_action_optional) == FALSE) { mode |= check_stack_element(rsc, other->rsc, "order"); if (mode & stack_middle) { return FALSE; } else if ((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to ordering activity (last was %s)", rsc->id, other->rsc->id); return FALSE; } } } return TRUE; } static action_t * get_first_named_action(resource_t *rsc, const char *action, gboolean only_valid, node_t *current) { action_t *a = NULL; GListPtr action_list = NULL; char *key = generate_op_key(rsc->id, action, 0); action_list = find_actions(rsc->actions, key, current); if (action_list == NULL || action_list->data == NULL) { crm_trace("%s: no %s action", rsc->id, action); free(key); return NULL; } a = action_list->data; g_list_free(action_list); if(only_valid && is_set(a->flags, pe_action_pseudo)) { crm_trace("%s: pseudo", key); a = NULL; } else if(only_valid && is_not_set(a->flags, pe_action_runnable)) { crm_trace("%s: runnable", key); a = NULL; } free(key); return a; } static void MigrateRsc(resource_t * rsc, action_t *stop, action_t *start, pe_working_set_t * data_set, gboolean partial) { action_t *to = NULL; action_t *from = NULL; action_t *then = NULL; action_t *other = NULL; action_t *done = get_pseudo_op(STONITH_DONE, data_set); GListPtr gIter = NULL; const char *value = g_hash_table_lookup(rsc->meta, XML_OP_ATTR_ALLOW_MIGRATE); if (crm_is_true(value) == FALSE) { return; } if (rsc->next_role > RSC_ROLE_SLAVE) { pe_rsc_trace(rsc, "%s: resource role: role=%s", rsc->id, role2text(rsc->next_role)); return; } if(start == NULL || stop == NULL) { pe_rsc_trace(rsc, "%s: not exists %p -> %p", rsc->id, stop, start); return; } else if (start->node == NULL || stop->node == NULL) { pe_rsc_trace(rsc, "%s: no node %p -> %p", rsc->id, stop->node, start->node); return; } else if(is_set(stop->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: stop action", rsc->id); return; } else if(is_set(start->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: start action", rsc->id); return; } else if (stop->node->details == start->node->details) { pe_rsc_trace(rsc, "%s: not moving %p -> %p", rsc->id, stop->node, start->node); return; } else if (at_stack_bottom(rsc) == FALSE) { pe_rsc_trace(rsc, "%s: not at stack bottom", rsc->id); return; } pe_rsc_trace(rsc, "%s %s -> %s", rsc->id, stop->node->details->uname, start->node->details->uname); if (partial) { pe_rsc_info(rsc, "Completing partial migration of %s from %s to %s", rsc->id, stop->node ? stop->node->details->uname : "unknown", start->node ? start->node->details->uname : "unknown"); } else { pe_rsc_info(rsc, "Migrating %s from %s to %s", rsc->id, stop->node ? stop->node->details->uname : "unknown", start->node ? start->node->details->uname : "unknown"); } /* Preserve the stop to ensure the end state is sane on that node, * Make the start a pseudo op * Create migrate_to, have it depend on everything the stop did * Create migrate_from * *-> migrate_to -> migrate_from -> stop -> start */ update_action_flags(start, pe_action_pseudo); /* easier than trying to delete it from the graph * but perhaps we should have it run anyway */ if (!partial) { to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, stop->node, FALSE, TRUE, data_set); } from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, start->node, FALSE, TRUE, data_set); /* This is slightly sub-optimal if 'to' fails, but always * run both halves of the migration before terminating the * transition. * * This can be removed if/when we update unpack_rsc_op() to * 'correctly' handle partial migrations. * * Without this, we end up stopping both sides */ from->priority = INFINITY; if (!partial) { order_actions(to, from, pe_order_optional); add_hash_param(to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, stop->node->details->uname); add_hash_param(to->meta, XML_LRM_ATTR_MIGRATE_TARGET, start->node->details->uname); } then = to ? to : from; order_actions(from, stop, pe_order_optional); order_actions(done, then, pe_order_optional); add_hash_param(from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, stop->node->details->uname); add_hash_param(from->meta, XML_LRM_ATTR_MIGRATE_TARGET, start->node->details->uname); /* Create the correct ordering ajustments based on find_clone_activity_on(); */ for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; resource_t *target = constraint->rsc_rh; pe_rsc_info(rsc, "Repairing %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if (constraint->score > 0) { int mode = check_stack_element(rsc, target, "coloc"); action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); CRM_ASSERT(clone_stop != NULL); CRM_ASSERT(clone_start != NULL); CRM_ASSERT((mode & stack_middle) == 0); CRM_ASSERT(((mode & stack_stopping) && (mode & stack_starting)) == 0); if (mode & stack_stopping) { #if 0 crm_debug("Creating %s.start -> %s.stop ordering", rsc->id, target->id); order_actions(from, clone_stop, pe_order_optional); #endif GListPtr lpc2 = NULL; for (lpc2 = start->actions_before; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other_w = (action_wrapper_t *) lpc2->data; /* Needed if the clone's started pseudo-action ever gets printed in the graph */ if (other_w->action == clone_start) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, start->uuid); other_w->type = pe_order_none; } } } else if (mode & stack_starting) { #if 0 crm_debug("Creating %s.started -> %s.stop ordering", target->id, rsc->id); order_actions(clone_start, to, pe_order_optional); #endif GListPtr lpc2 = NULL; for (lpc2 = clone_stop->actions_before; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other_w = (action_wrapper_t *) lpc2->data; /* Needed if the clone's stop pseudo-action ever gets printed in the graph */ if (other_w->action == stop) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, clone_stop->uuid); other_w->type = pe_order_none; } } } } } #if 0 /* Implied now that start/stop are not morphed into migrate ops */ /* Anything that needed stop to complete, now also needs start to have completed */ for (gIter = stop->actions_after; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (is_set(other->flags, pe_action_optional) || other->rsc != NULL) { continue; } crm_debug("Ordering %s before %s (stop)", from->uuid, other->uuid); order_actions(from, other, other_w->type); } #endif /* migrate 'then' action also needs anything that the stop needed to have completed too */ for (gIter = stop->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (other->rsc == NULL) { /* nothing */ } else if (is_set(other->flags, pe_action_optional) || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (stop)", other_w->action->uuid, stop->uuid); order_actions(other, then, other_w->type); } /* migrate 'then' action also needs anything that the start needed to have completed too */ for (gIter = start->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (other->rsc == NULL) { /* nothing */ } else if (is_set(other->flags, pe_action_optional) || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (start)", other_w->action->uuid, stop->uuid); order_actions(other, then, other_w->type); } } static void ReloadRsc(resource_t * rsc, action_t *stop, action_t *start, pe_working_set_t * data_set) { action_t *action = NULL; action_t *rewrite = NULL; if(is_not_set(rsc->flags, pe_rsc_try_reload)) { return; } else if(is_not_set(stop->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: stop action", rsc->id); return; } else if(is_not_set(start->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: start action", rsc->id); return; } pe_rsc_trace(rsc, "%s on %s", rsc->id, stop->node->details->uname); action = get_first_named_action(rsc, RSC_PROMOTE, TRUE, NULL); if (action && is_set(action->flags, pe_action_optional) == FALSE) { update_action_flags(action, pe_action_pseudo); } action = get_first_named_action(rsc, RSC_DEMOTE, TRUE, NULL); if (action && is_set(action->flags, pe_action_optional) == FALSE) { rewrite = action; update_action_flags(stop, pe_action_pseudo); } else { rewrite = start; } pe_rsc_info(rsc, "Rewriting %s of %s on %s as a reload", rewrite->task, rsc->id, stop->node->details->uname); set_bit(rsc->flags, pe_rsc_reload); update_action_flags(rewrite, pe_action_optional|pe_action_clear); free(rewrite->uuid); free(rewrite->task); rewrite->task = strdup("reload"); rewrite->uuid = generate_op_key(rsc->id, rewrite->task, 0); } void rsc_migrate_reload(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; action_t *stop = NULL; action_t *start = NULL; gboolean partial = FALSE; if (rsc->children) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_migrate_reload(child_rsc, data_set); } return; } else if (rsc->variant > pe_native) { return; } pe_rsc_trace(rsc, "Processing %s", rsc->id); if (rsc->partial_migration_target) { start = get_first_named_action(rsc, RSC_START, TRUE, rsc->partial_migration_target); stop = get_first_named_action(rsc, RSC_STOP, TRUE, rsc->partial_migration_source); if (start && stop) { partial = TRUE; } } pe_rsc_trace(rsc, "%s %s %p", rsc->id, partial?"partial":"full", stop); if (!partial) { stop = get_first_named_action(rsc, RSC_STOP, TRUE, rsc->running_on ? rsc->running_on->data : NULL); start = get_first_named_action(rsc, RSC_START, TRUE, NULL); } if (is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || rsc->next_role < RSC_ROLE_STARTED || ((g_list_length(rsc->running_on) != 1) && !partial)) { pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); return; } if(stop == NULL) { return; } else if (is_set(stop->flags, pe_action_optional) && is_set(rsc->flags, pe_rsc_try_reload)) { ReloadRsc(rsc, stop, start, data_set); } else if(is_not_set(stop->flags, pe_action_optional)) { MigrateRsc(rsc, stop, start, data_set, partial); } } void native_append_meta(resource_t * rsc, xmlNode * xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } } diff --git a/pengine/regression.sh b/pengine/regression.sh index 65d508f008..55b5c5cc2c 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,656 +1,657 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # core=`dirname $0` . $core/regression.core.sh create_mode="true" info Generating test outputs for these tests... # do_test file description info Done. echo "" info Performing the following tests from $io_dir create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" do_test orphan-2 "Orphan stop, remove failcount" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" do_test params-6 "Params: Detect reload in previously migrated resource" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test domain "Failover domains" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" --rc 4 do_test standby "Standby" do_test comments "Comments" echo "" do_test one-or-more-0 "Everything starts" do_test one-or-more-1 "Nothing starts because of A" do_test one-or-more-2 "D can start because of C" do_test one-or-more-3 "D cannot start because of B and C" do_test one-or-more-4 "D cannot start because of target-role" do_test one-or-more-5 "Start A and F even though C and D are stopped" do_test one-or-more-6 "Leave A running even though B is stopped" do_test one-or-more-7 "Leave A running even though C is stopped" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" do_test ordered-set-basic-startup "Constraint set with default order settings." do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop" do_test coloc_fp_logic "Verify floating point calculations in colocation are working" do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc." do_test colo_slave_w_native "cl#5070 - Verify promotion order is affected when colocating slave to native rsc." echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" do_test master-demote-2 "Demote does not clear past failure" do_test master-move "Move master based on failure of colocated group" do_test master-probed-score "Observe the promotion score of probed resources" do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint" do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint" do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint" do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion." echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource" do_test bug-5028-detach "Ensure detach still works" do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition" do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" do_test 696 "OSDL #696 - CRM starts stonith RA without monitor" do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id" do_test 829 "OSDL #829" do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test stonith-4 "Stonith node state" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false" do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false" do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts." do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false" do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false." do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false." do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false" do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true" do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources." do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases" do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload" do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change." do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart" do_test failcount "Ensure failcounts are correctly expired" do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart" do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop" do_test bug-5059 "No need to restart p_stateful1:*" do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled." do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled." do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group" do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)." do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group." do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs." echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" +do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" echo "" do_test stopped-monitor-00 "Stopped Monitor - initial start" do_test stopped-monitor-01 "Stopped Monitor - failed started" do_test stopped-monitor-02 "Stopped Monitor - started multi-up" do_test stopped-monitor-03 "Stopped Monitor - stop started" do_test stopped-monitor-04 "Stopped Monitor - failed stop" do_test stopped-monitor-05 "Stopped Monitor - start unmanaged" do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08 "Stopped Monitor - migrate" do_test stopped-monitor-09 "Stopped Monitor - unmanage started" do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")" do_test stopped-monitor-20 "Stopped Monitor - initial stop" do_test stopped-monitor-21 "Stopped Monitor - stopped single-up" do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23 "Stopped Monitor - start stopped" do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role="Started")" do_test stopped-monitor-30 "Stopped Monitor - new node started" do_test stopped-monitor-31 "Stopped Monitor - new node stopped" echo"" do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)" do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)" do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)" do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)" do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)" do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)" do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)" do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)" do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)" do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)" do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)" do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)" do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)" do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)" do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)" do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)" do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)" do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)" do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)" do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)" do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)" do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)" do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)" do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)" do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)" do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)" do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)" do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)" do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)" do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)" do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)" do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)" do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)" do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)" do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)" do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)" do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)" do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)" do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)" do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)" do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)" do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)" do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)" do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)" do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)" do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)" do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)" do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)" do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)" do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)" do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)" do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)" do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)" do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)" do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)" do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)" do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)" do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)" do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)" do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)" do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)" do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)" do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)" do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)" do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)" do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)" do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)" do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)" do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)" do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)" do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)" do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)" do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)" do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)" do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)" do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)" do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)" do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)" do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)" do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)" do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)" do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)" do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)" do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)" do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)" do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)" do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)" do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)" do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)" do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)" do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)" do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)" do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)" echo "" do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)" do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)" do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)" do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)" do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)" do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)" do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)" do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)" do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)" do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)" echo "" do_test template-1 "Template - 1" do_test template-2 "Template - 2" do_test template-3 "Template - 3 (merge operations)" do_test template-coloc-1 "Template - Colocation 1" do_test template-coloc-2 "Template - Colocation 2" do_test template-coloc-3 "Template - Colocation 3" do_test template-order-1 "Template - Order 1" do_test template-order-2 "Template - Order 2" do_test template-order-3 "Template - Order 3" do_test template-ticket "Template - Ticket" do_test template-rsc-sets-1 "Template - Resource Sets 1" do_test template-rsc-sets-2 "Template - Resource Sets 2" do_test template-rsc-sets-3 "Template - Resource Sets 3" do_test template-rsc-sets-4 "Template - Resource Sets 4" echo "" test_results diff --git a/pengine/test10/load-stopped-loop.dot b/pengine/test10/load-stopped-loop.dot new file mode 100644 index 0000000000..ed8ae48c46 --- /dev/null +++ b/pengine/test10/load-stopped-loop.dot @@ -0,0 +1,62 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange"] +"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" -> "license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style = bold] +"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style=bold color="green" fontcolor="black"] +"license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" -> "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style = bold] +"license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" [ style=bold color="green" fontcolor="black"] +"license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"] +"license.anbriz.vds-ok.com-vm_start_0 v03-a" -> "license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold] +"license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="orange"] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "all_stopped" [ style = bold] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "load_stopped_v03-b v03-b" [ style = bold] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style=bold color="green" fontcolor="black"] +"load_stopped_mgmt01 mgmt01" [ style=bold color="green" fontcolor="orange"] +"load_stopped_v03-a v03-a" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] +"load_stopped_v03-a v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style = bold] +"load_stopped_v03-a v03-a" [ style=bold color="green" fontcolor="orange"] +"load_stopped_v03-b v03-b" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] +"load_stopped_v03-b v03-b" [ style=bold color="green" fontcolor="orange"] +"stonith-v03-a_monitor_60000 v03-b" [ style=bold color="green" fontcolor="black"] +"stonith-v03-a_start_0 v03-b" -> "stonith-v03-a_monitor_60000 v03-b" [ style = bold] +"stonith-v03-a_start_0 v03-b" [ style=bold color="green" fontcolor="black"] +"stonith-v03-a_stop_0 v03-b" -> "stonith-v03-a_start_0 v03-b" [ style = bold] +"stonith-v03-a_stop_0 v03-b" [ style=bold color="green" fontcolor="black"] +"stonith-v03-b_monitor_60000 v03-a" [ style=bold color="green" fontcolor="black"] +"stonith-v03-b_start_0 v03-a" -> "stonith-v03-b_monitor_60000 v03-a" [ style = bold] +"stonith-v03-b_start_0 v03-a" [ style=bold color="green" fontcolor="black"] +"stonith-v03-b_stop_0 v03-a" -> "stonith-v03-b_start_0 v03-a" [ style = bold] +"stonith-v03-b_stop_0 v03-a" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style=bold color="green" fontcolor="orange"] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "all_stopped" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "load_stopped_v03-a v03-a" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style=bold color="green" fontcolor="black"] +"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"] +"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold] +"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style = bold] +"vds-ok-pool-0-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style = bold] +"vds-ok-pool-0-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style = bold] +"vds-ok-pool-0-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style = bold] +"vds-ok-pool-1-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style = bold] +"vds-ok-pool-1-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style = bold] +"vds-ok-pool-1-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"] +} diff --git a/pengine/test10/load-stopped-loop.exp b/pengine/test10/load-stopped-loop.exp new file mode 100644 index 0000000000..75aa2e8687 --- /dev/null +++ b/pengine/test10/load-stopped-loop.exp @@ -0,0 +1,404 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/load-stopped-loop.scores b/pengine/test10/load-stopped-loop.scores new file mode 100644 index 0000000000..dc9f5f5a3b --- /dev/null +++ b/pengine/test10/load-stopped-loop.scores @@ -0,0 +1,1765 @@ +Allocation scores: +clone_color: cl-clvmd allocation score on mgmt01: 0 +clone_color: cl-clvmd allocation score on v03-a: 0 +clone_color: cl-clvmd allocation score on v03-b: 0 +clone_color: cl-dlm allocation score on mgmt01: 0 +clone_color: cl-dlm allocation score on v03-a: 0 +clone_color: cl-dlm allocation score on v03-b: 0 +clone_color: cl-iscsid allocation score on mgmt01: 0 +clone_color: cl-iscsid allocation score on v03-a: 0 +clone_color: cl-iscsid allocation score on v03-b: 0 +clone_color: cl-libvirt-images-fs allocation score on mgmt01: 0 +clone_color: cl-libvirt-images-fs allocation score on v03-a: 0 +clone_color: cl-libvirt-images-fs allocation score on v03-b: 0 +clone_color: cl-libvirt-images-pool allocation score on mgmt01: -INFINITY +clone_color: cl-libvirt-images-pool allocation score on v03-a: 0 +clone_color: cl-libvirt-images-pool allocation score on v03-b: 0 +clone_color: cl-libvirt-install-fs allocation score on mgmt01: 0 +clone_color: cl-libvirt-install-fs allocation score on v03-a: 0 +clone_color: cl-libvirt-install-fs allocation score on v03-b: 0 +clone_color: cl-libvirt-qpid allocation score on mgmt01: -INFINITY +clone_color: cl-libvirt-qpid allocation score on v03-a: 0 +clone_color: cl-libvirt-qpid allocation score on v03-b: 0 +clone_color: cl-libvirtd allocation score on mgmt01: -INFINITY +clone_color: cl-libvirtd allocation score on v03-a: 0 +clone_color: cl-libvirtd allocation score on v03-b: 0 +clone_color: cl-mcast-anbriz-net allocation score on mgmt01: -INFINITY +clone_color: cl-mcast-anbriz-net allocation score on v03-a: 0 +clone_color: cl-mcast-anbriz-net allocation score on v03-b: 0 +clone_color: cl-mcast-gleb-net allocation score on mgmt01: -INFINITY +clone_color: cl-mcast-gleb-net allocation score on v03-a: 0 +clone_color: cl-mcast-gleb-net allocation score on v03-b: 0 +clone_color: cl-mcast-test-net allocation score on mgmt01: -INFINITY +clone_color: cl-mcast-test-net allocation score on v03-a: 0 +clone_color: cl-mcast-test-net allocation score on v03-b: 0 +clone_color: cl-multipathd allocation score on mgmt01: 0 +clone_color: cl-multipathd allocation score on v03-a: 0 +clone_color: cl-multipathd allocation score on v03-b: 0 +clone_color: cl-node-params allocation score on mgmt01: -INFINITY +clone_color: cl-node-params allocation score on v03-a: 0 +clone_color: cl-node-params allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-0-iscsi allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-0-iscsi allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-0-iscsi allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-0-pool allocation score on mgmt01: -INFINITY +clone_color: cl-vds-ok-pool-0-pool allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-0-pool allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-0-vg allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-0-vg allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-0-vg allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-1-iscsi allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-1-iscsi allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-1-iscsi allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-1-pool allocation score on mgmt01: -INFINITY +clone_color: cl-vds-ok-pool-1-pool allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-1-pool allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-1-vg allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-1-vg allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-1-vg allocation score on v03-b: 0 +clone_color: cl-vlan1-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan1-if allocation score on v03-a: 0 +clone_color: cl-vlan1-if allocation score on v03-b: 0 +clone_color: cl-vlan101-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan101-if allocation score on v03-a: 0 +clone_color: cl-vlan101-if allocation score on v03-b: 0 +clone_color: cl-vlan102-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan102-if allocation score on v03-a: 0 +clone_color: cl-vlan102-if allocation score on v03-b: 0 +clone_color: cl-vlan103-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan103-if allocation score on v03-a: 0 +clone_color: cl-vlan103-if allocation score on v03-b: 0 +clone_color: cl-vlan104-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan104-if allocation score on v03-a: 0 +clone_color: cl-vlan104-if allocation score on v03-b: 0 +clone_color: cl-vlan200-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan200-if allocation score on v03-a: 0 +clone_color: cl-vlan200-if allocation score on v03-b: 0 +clone_color: cl-vlan3-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan3-if allocation score on v03-a: 0 +clone_color: cl-vlan3-if allocation score on v03-b: 0 +clone_color: cl-vlan4-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan4-if allocation score on v03-a: 0 +clone_color: cl-vlan4-if allocation score on v03-b: 0 +clone_color: cl-vlan5-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan5-if allocation score on v03-a: 0 +clone_color: cl-vlan5-if allocation score on v03-b: 0 +clone_color: cl-vlan900-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan900-if allocation score on v03-a: 0 +clone_color: cl-vlan900-if allocation score on v03-b: 0 +clone_color: cl-vlan909-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan909-if allocation score on v03-a: 0 +clone_color: cl-vlan909-if allocation score on v03-b: 0 +clone_color: clvmd:0 allocation score on mgmt01: 1 +clone_color: clvmd:0 allocation score on v03-a: 0 +clone_color: clvmd:0 allocation score on v03-b: 0 +clone_color: clvmd:1 allocation score on mgmt01: 0 +clone_color: clvmd:1 allocation score on v03-a: 0 +clone_color: clvmd:1 allocation score on v03-b: 1 +clone_color: clvmd:2 allocation score on mgmt01: 0 +clone_color: clvmd:2 allocation score on v03-a: 1 +clone_color: clvmd:2 allocation score on v03-b: 0 +clone_color: clvmd:3 allocation score on mgmt01: 0 +clone_color: clvmd:3 allocation score on v03-a: 0 +clone_color: clvmd:3 allocation score on v03-b: 0 +clone_color: clvmd:4 allocation score on mgmt01: 0 +clone_color: clvmd:4 allocation score on v03-a: 0 +clone_color: clvmd:4 allocation score on v03-b: 0 +clone_color: clvmd:5 allocation score on mgmt01: 0 +clone_color: clvmd:5 allocation score on v03-a: 0 +clone_color: clvmd:5 allocation score on v03-b: 0 +clone_color: clvmd:6 allocation score on mgmt01: 0 +clone_color: clvmd:6 allocation score on v03-a: 0 +clone_color: clvmd:6 allocation score on v03-b: 0 +clone_color: clvmd:7 allocation score on mgmt01: 0 +clone_color: clvmd:7 allocation score on v03-a: 0 +clone_color: clvmd:7 allocation score on v03-b: 0 +clone_color: clvmd:8 allocation score on mgmt01: 0 +clone_color: clvmd:8 allocation score on v03-a: 0 +clone_color: clvmd:8 allocation score on v03-b: 0 +clone_color: dlm:0 allocation score on mgmt01: 1 +clone_color: dlm:0 allocation score on v03-a: 0 +clone_color: dlm:0 allocation score on v03-b: 0 +clone_color: dlm:1 allocation score on mgmt01: 0 +clone_color: dlm:1 allocation score on v03-a: 0 +clone_color: dlm:1 allocation score on v03-b: 1 +clone_color: dlm:2 allocation score on mgmt01: 0 +clone_color: dlm:2 allocation score on v03-a: 1 +clone_color: dlm:2 allocation score on v03-b: 0 +clone_color: dlm:3 allocation score on mgmt01: 0 +clone_color: dlm:3 allocation score on v03-a: 0 +clone_color: dlm:3 allocation score on v03-b: 0 +clone_color: dlm:4 allocation score on mgmt01: 0 +clone_color: dlm:4 allocation score on v03-a: 0 +clone_color: dlm:4 allocation score on v03-b: 0 +clone_color: dlm:5 allocation score on mgmt01: 0 +clone_color: dlm:5 allocation score on v03-a: 0 +clone_color: dlm:5 allocation score on v03-b: 0 +clone_color: dlm:6 allocation score on mgmt01: 0 +clone_color: dlm:6 allocation score on v03-a: 0 +clone_color: dlm:6 allocation score on v03-b: 0 +clone_color: dlm:7 allocation score on mgmt01: 0 +clone_color: dlm:7 allocation score on v03-a: 0 +clone_color: dlm:7 allocation score on v03-b: 0 +clone_color: dlm:8 allocation score on mgmt01: 0 +clone_color: dlm:8 allocation score on v03-a: 0 +clone_color: dlm:8 allocation score on v03-b: 0 +clone_color: iscsid:0 allocation score on mgmt01: 1 +clone_color: iscsid:0 allocation score on v03-a: 0 +clone_color: iscsid:0 allocation score on v03-b: 0 +clone_color: iscsid:1 allocation score on mgmt01: 0 +clone_color: iscsid:1 allocation score on v03-a: 0 +clone_color: iscsid:1 allocation score on v03-b: 1 +clone_color: iscsid:2 allocation score on mgmt01: 0 +clone_color: iscsid:2 allocation score on v03-a: 1 +clone_color: iscsid:2 allocation score on v03-b: 0 +clone_color: iscsid:3 allocation score on mgmt01: 0 +clone_color: iscsid:3 allocation score on v03-a: 0 +clone_color: iscsid:3 allocation score on v03-b: 0 +clone_color: iscsid:4 allocation score on mgmt01: 0 +clone_color: iscsid:4 allocation score on v03-a: 0 +clone_color: iscsid:4 allocation score on v03-b: 0 +clone_color: iscsid:5 allocation score on mgmt01: 0 +clone_color: iscsid:5 allocation score on v03-a: 0 +clone_color: iscsid:5 allocation score on v03-b: 0 +clone_color: iscsid:6 allocation score on mgmt01: 0 +clone_color: iscsid:6 allocation score on v03-a: 0 +clone_color: iscsid:6 allocation score on v03-b: 0 +clone_color: iscsid:7 allocation score on mgmt01: 0 +clone_color: iscsid:7 allocation score on v03-a: 0 +clone_color: iscsid:7 allocation score on v03-b: 0 +clone_color: iscsid:8 allocation score on mgmt01: 0 +clone_color: iscsid:8 allocation score on v03-a: 0 +clone_color: iscsid:8 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:0 allocation score on mgmt01: 1 +clone_color: libvirt-images-fs:0 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:0 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:1 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:1 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:1 allocation score on v03-b: 1 +clone_color: libvirt-images-fs:2 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:2 allocation score on v03-a: 1 +clone_color: libvirt-images-fs:2 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:3 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:3 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:3 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:4 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:4 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:4 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:5 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:5 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:5 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:6 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:6 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:6 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:7 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:7 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:7 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:8 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:8 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:8 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:0 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:0 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:0 allocation score on v03-b: 1 +clone_color: libvirt-images-pool:1 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:1 allocation score on v03-a: 1 +clone_color: libvirt-images-pool:1 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:2 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:2 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:2 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:3 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:3 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:3 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:4 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:4 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:4 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:5 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:5 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:5 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:6 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:6 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:6 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:7 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:7 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:7 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:0 allocation score on mgmt01: 1 +clone_color: libvirt-install-fs:0 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:0 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:1 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:1 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:1 allocation score on v03-b: 1 +clone_color: libvirt-install-fs:2 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:2 allocation score on v03-a: 1 +clone_color: libvirt-install-fs:2 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:3 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:3 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:3 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:4 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:4 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:4 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:5 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:5 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:5 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:6 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:6 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:6 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:7 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:7 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:7 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:8 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:8 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:8 allocation score on v03-b: 0 +clone_color: libvirt-qpid:0 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:0 allocation score on v03-a: 0 +clone_color: libvirt-qpid:0 allocation score on v03-b: 1 +clone_color: libvirt-qpid:1 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:1 allocation score on v03-a: 1 +clone_color: libvirt-qpid:1 allocation score on v03-b: 0 +clone_color: libvirt-qpid:2 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:2 allocation score on v03-a: 0 +clone_color: libvirt-qpid:2 allocation score on v03-b: 0 +clone_color: libvirt-qpid:3 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:3 allocation score on v03-a: 0 +clone_color: libvirt-qpid:3 allocation score on v03-b: 0 +clone_color: libvirt-qpid:4 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:4 allocation score on v03-a: 0 +clone_color: libvirt-qpid:4 allocation score on v03-b: 0 +clone_color: libvirt-qpid:5 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:5 allocation score on v03-a: 0 +clone_color: libvirt-qpid:5 allocation score on v03-b: 0 +clone_color: libvirt-qpid:6 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:6 allocation score on v03-a: 0 +clone_color: libvirt-qpid:6 allocation score on v03-b: 0 +clone_color: libvirt-qpid:7 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:7 allocation score on v03-a: 0 +clone_color: libvirt-qpid:7 allocation score on v03-b: 0 +clone_color: libvirtd:0 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:0 allocation score on v03-a: 0 +clone_color: libvirtd:0 allocation score on v03-b: 1 +clone_color: libvirtd:1 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:1 allocation score on v03-a: 1 +clone_color: libvirtd:1 allocation score on v03-b: 0 +clone_color: libvirtd:2 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:2 allocation score on v03-a: 0 +clone_color: libvirtd:2 allocation score on v03-b: 0 +clone_color: libvirtd:3 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:3 allocation score on v03-a: 0 +clone_color: libvirtd:3 allocation score on v03-b: 0 +clone_color: libvirtd:4 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:4 allocation score on v03-a: 0 +clone_color: libvirtd:4 allocation score on v03-b: 0 +clone_color: libvirtd:5 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:5 allocation score on v03-a: 0 +clone_color: libvirtd:5 allocation score on v03-b: 0 +clone_color: libvirtd:6 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:6 allocation score on v03-a: 0 +clone_color: libvirtd:6 allocation score on v03-b: 0 +clone_color: libvirtd:7 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:7 allocation score on v03-a: 0 +clone_color: libvirtd:7 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:0 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:0 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:0 allocation score on v03-b: 1 +clone_color: mcast-anbriz-net:1 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:1 allocation score on v03-a: 1 +clone_color: mcast-anbriz-net:1 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:2 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:2 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:2 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:3 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:3 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:3 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:4 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:4 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:4 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:5 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:5 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:5 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:6 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:6 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:6 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:7 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:7 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:7 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:0 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:0 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:0 allocation score on v03-b: 1 +clone_color: mcast-gleb-net:1 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:1 allocation score on v03-a: 1 +clone_color: mcast-gleb-net:1 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:2 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:2 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:2 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:3 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:3 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:3 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:4 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:4 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:4 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:5 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:5 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:5 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:6 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:6 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:6 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:7 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:7 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:7 allocation score on v03-b: 0 +clone_color: mcast-test-net:0 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:0 allocation score on v03-a: 0 +clone_color: mcast-test-net:0 allocation score on v03-b: 1 +clone_color: mcast-test-net:1 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:1 allocation score on v03-a: 1 +clone_color: mcast-test-net:1 allocation score on v03-b: 0 +clone_color: mcast-test-net:2 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:2 allocation score on v03-a: 0 +clone_color: mcast-test-net:2 allocation score on v03-b: 0 +clone_color: mcast-test-net:3 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:3 allocation score on v03-a: 0 +clone_color: mcast-test-net:3 allocation score on v03-b: 0 +clone_color: mcast-test-net:4 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:4 allocation score on v03-a: 0 +clone_color: mcast-test-net:4 allocation score on v03-b: 0 +clone_color: mcast-test-net:5 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:5 allocation score on v03-a: 0 +clone_color: mcast-test-net:5 allocation score on v03-b: 0 +clone_color: mcast-test-net:6 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:6 allocation score on v03-a: 0 +clone_color: mcast-test-net:6 allocation score on v03-b: 0 +clone_color: mcast-test-net:7 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:7 allocation score on v03-a: 0 +clone_color: mcast-test-net:7 allocation score on v03-b: 0 +clone_color: multipathd:0 allocation score on mgmt01: 1 +clone_color: multipathd:0 allocation score on v03-a: 0 +clone_color: multipathd:0 allocation score on v03-b: 0 +clone_color: multipathd:1 allocation score on mgmt01: 0 +clone_color: multipathd:1 allocation score on v03-a: 0 +clone_color: multipathd:1 allocation score on v03-b: 1 +clone_color: multipathd:2 allocation score on mgmt01: 0 +clone_color: multipathd:2 allocation score on v03-a: 1 +clone_color: multipathd:2 allocation score on v03-b: 0 +clone_color: multipathd:3 allocation score on mgmt01: 0 +clone_color: multipathd:3 allocation score on v03-a: 0 +clone_color: multipathd:3 allocation score on v03-b: 0 +clone_color: multipathd:4 allocation score on mgmt01: 0 +clone_color: multipathd:4 allocation score on v03-a: 0 +clone_color: multipathd:4 allocation score on v03-b: 0 +clone_color: multipathd:5 allocation score on mgmt01: 0 +clone_color: multipathd:5 allocation score on v03-a: 0 +clone_color: multipathd:5 allocation score on v03-b: 0 +clone_color: multipathd:6 allocation score on mgmt01: 0 +clone_color: multipathd:6 allocation score on v03-a: 0 +clone_color: multipathd:6 allocation score on v03-b: 0 +clone_color: multipathd:7 allocation score on mgmt01: 0 +clone_color: multipathd:7 allocation score on v03-a: 0 +clone_color: multipathd:7 allocation score on v03-b: 0 +clone_color: multipathd:8 allocation score on mgmt01: 0 +clone_color: multipathd:8 allocation score on v03-a: 0 +clone_color: multipathd:8 allocation score on v03-b: 0 +clone_color: node-params:0 allocation score on mgmt01: -INFINITY +clone_color: node-params:0 allocation score on v03-a: 0 +clone_color: node-params:0 allocation score on v03-b: 1 +clone_color: node-params:1 allocation score on mgmt01: -INFINITY +clone_color: node-params:1 allocation score on v03-a: 1 +clone_color: node-params:1 allocation score on v03-b: 0 +clone_color: node-params:2 allocation score on mgmt01: -INFINITY +clone_color: node-params:2 allocation score on v03-a: 0 +clone_color: node-params:2 allocation score on v03-b: 0 +clone_color: node-params:3 allocation score on mgmt01: -INFINITY +clone_color: node-params:3 allocation score on v03-a: 0 +clone_color: node-params:3 allocation score on v03-b: 0 +clone_color: node-params:4 allocation score on mgmt01: -INFINITY +clone_color: node-params:4 allocation score on v03-a: 0 +clone_color: node-params:4 allocation score on v03-b: 0 +clone_color: node-params:5 allocation score on mgmt01: -INFINITY +clone_color: node-params:5 allocation score on v03-a: 0 +clone_color: node-params:5 allocation score on v03-b: 0 +clone_color: node-params:6 allocation score on mgmt01: -INFINITY +clone_color: node-params:6 allocation score on v03-a: 0 +clone_color: node-params:6 allocation score on v03-b: 0 +clone_color: node-params:7 allocation score on mgmt01: -INFINITY +clone_color: node-params:7 allocation score on v03-a: 0 +clone_color: node-params:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-0-iscsi:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-0-iscsi:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-0-iscsi:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:8 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:0 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:0 allocation score on v03-b: 1 +clone_color: vds-ok-pool-0-pool:1 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:1 allocation score on v03-a: 1 +clone_color: vds-ok-pool-0-pool:1 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:2 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:2 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:3 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:4 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:5 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:6 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:7 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-0-vg:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-0-vg:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-0-vg:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:8 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-1-iscsi:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-1-iscsi:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-1-iscsi:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:8 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:0 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:0 allocation score on v03-b: 1 +clone_color: vds-ok-pool-1-pool:1 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:1 allocation score on v03-a: 1 +clone_color: vds-ok-pool-1-pool:1 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:2 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:2 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:3 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:4 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:5 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:6 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:7 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-1-vg:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-1-vg:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-1-vg:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:8 allocation score on v03-b: 0 +clone_color: vlan1-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:0 allocation score on v03-a: 0 +clone_color: vlan1-if:0 allocation score on v03-b: 1 +clone_color: vlan1-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:1 allocation score on v03-a: 1 +clone_color: vlan1-if:1 allocation score on v03-b: 0 +clone_color: vlan1-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:2 allocation score on v03-a: 0 +clone_color: vlan1-if:2 allocation score on v03-b: 0 +clone_color: vlan1-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:3 allocation score on v03-a: 0 +clone_color: vlan1-if:3 allocation score on v03-b: 0 +clone_color: vlan1-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:4 allocation score on v03-a: 0 +clone_color: vlan1-if:4 allocation score on v03-b: 0 +clone_color: vlan1-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:5 allocation score on v03-a: 0 +clone_color: vlan1-if:5 allocation score on v03-b: 0 +clone_color: vlan1-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:6 allocation score on v03-a: 0 +clone_color: vlan1-if:6 allocation score on v03-b: 0 +clone_color: vlan1-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:7 allocation score on v03-a: 0 +clone_color: vlan1-if:7 allocation score on v03-b: 0 +clone_color: vlan101-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:0 allocation score on v03-a: 0 +clone_color: vlan101-if:0 allocation score on v03-b: 1 +clone_color: vlan101-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:1 allocation score on v03-a: 1 +clone_color: vlan101-if:1 allocation score on v03-b: 0 +clone_color: vlan101-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:2 allocation score on v03-a: 0 +clone_color: vlan101-if:2 allocation score on v03-b: 0 +clone_color: vlan101-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:3 allocation score on v03-a: 0 +clone_color: vlan101-if:3 allocation score on v03-b: 0 +clone_color: vlan101-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:4 allocation score on v03-a: 0 +clone_color: vlan101-if:4 allocation score on v03-b: 0 +clone_color: vlan101-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:5 allocation score on v03-a: 0 +clone_color: vlan101-if:5 allocation score on v03-b: 0 +clone_color: vlan101-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:6 allocation score on v03-a: 0 +clone_color: vlan101-if:6 allocation score on v03-b: 0 +clone_color: vlan101-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:7 allocation score on v03-a: 0 +clone_color: vlan101-if:7 allocation score on v03-b: 0 +clone_color: vlan102-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:0 allocation score on v03-a: 0 +clone_color: vlan102-if:0 allocation score on v03-b: 1 +clone_color: vlan102-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:1 allocation score on v03-a: 1 +clone_color: vlan102-if:1 allocation score on v03-b: 0 +clone_color: vlan102-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:2 allocation score on v03-a: 0 +clone_color: vlan102-if:2 allocation score on v03-b: 0 +clone_color: vlan102-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:3 allocation score on v03-a: 0 +clone_color: vlan102-if:3 allocation score on v03-b: 0 +clone_color: vlan102-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:4 allocation score on v03-a: 0 +clone_color: vlan102-if:4 allocation score on v03-b: 0 +clone_color: vlan102-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:5 allocation score on v03-a: 0 +clone_color: vlan102-if:5 allocation score on v03-b: 0 +clone_color: vlan102-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:6 allocation score on v03-a: 0 +clone_color: vlan102-if:6 allocation score on v03-b: 0 +clone_color: vlan102-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:7 allocation score on v03-a: 0 +clone_color: vlan102-if:7 allocation score on v03-b: 0 +clone_color: vlan103-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:0 allocation score on v03-a: 0 +clone_color: vlan103-if:0 allocation score on v03-b: 1 +clone_color: vlan103-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:1 allocation score on v03-a: 1 +clone_color: vlan103-if:1 allocation score on v03-b: 0 +clone_color: vlan103-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:2 allocation score on v03-a: 0 +clone_color: vlan103-if:2 allocation score on v03-b: 0 +clone_color: vlan103-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:3 allocation score on v03-a: 0 +clone_color: vlan103-if:3 allocation score on v03-b: 0 +clone_color: vlan103-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:4 allocation score on v03-a: 0 +clone_color: vlan103-if:4 allocation score on v03-b: 0 +clone_color: vlan103-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:5 allocation score on v03-a: 0 +clone_color: vlan103-if:5 allocation score on v03-b: 0 +clone_color: vlan103-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:6 allocation score on v03-a: 0 +clone_color: vlan103-if:6 allocation score on v03-b: 0 +clone_color: vlan103-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:7 allocation score on v03-a: 0 +clone_color: vlan103-if:7 allocation score on v03-b: 0 +clone_color: vlan104-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:0 allocation score on v03-a: 0 +clone_color: vlan104-if:0 allocation score on v03-b: 1 +clone_color: vlan104-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:1 allocation score on v03-a: 1 +clone_color: vlan104-if:1 allocation score on v03-b: 0 +clone_color: vlan104-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:2 allocation score on v03-a: 0 +clone_color: vlan104-if:2 allocation score on v03-b: 0 +clone_color: vlan104-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:3 allocation score on v03-a: 0 +clone_color: vlan104-if:3 allocation score on v03-b: 0 +clone_color: vlan104-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:4 allocation score on v03-a: 0 +clone_color: vlan104-if:4 allocation score on v03-b: 0 +clone_color: vlan104-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:5 allocation score on v03-a: 0 +clone_color: vlan104-if:5 allocation score on v03-b: 0 +clone_color: vlan104-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:6 allocation score on v03-a: 0 +clone_color: vlan104-if:6 allocation score on v03-b: 0 +clone_color: vlan104-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:7 allocation score on v03-a: 0 +clone_color: vlan104-if:7 allocation score on v03-b: 0 +clone_color: vlan200-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:0 allocation score on v03-a: 0 +clone_color: vlan200-if:0 allocation score on v03-b: 1 +clone_color: vlan200-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:1 allocation score on v03-a: 1 +clone_color: vlan200-if:1 allocation score on v03-b: 0 +clone_color: vlan200-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:2 allocation score on v03-a: 0 +clone_color: vlan200-if:2 allocation score on v03-b: 0 +clone_color: vlan200-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:3 allocation score on v03-a: 0 +clone_color: vlan200-if:3 allocation score on v03-b: 0 +clone_color: vlan200-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:4 allocation score on v03-a: 0 +clone_color: vlan200-if:4 allocation score on v03-b: 0 +clone_color: vlan200-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:5 allocation score on v03-a: 0 +clone_color: vlan200-if:5 allocation score on v03-b: 0 +clone_color: vlan200-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:6 allocation score on v03-a: 0 +clone_color: vlan200-if:6 allocation score on v03-b: 0 +clone_color: vlan200-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:7 allocation score on v03-a: 0 +clone_color: vlan200-if:7 allocation score on v03-b: 0 +clone_color: vlan3-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:0 allocation score on v03-a: 0 +clone_color: vlan3-if:0 allocation score on v03-b: 1 +clone_color: vlan3-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:1 allocation score on v03-a: 1 +clone_color: vlan3-if:1 allocation score on v03-b: 0 +clone_color: vlan3-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:2 allocation score on v03-a: 0 +clone_color: vlan3-if:2 allocation score on v03-b: 0 +clone_color: vlan3-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:3 allocation score on v03-a: 0 +clone_color: vlan3-if:3 allocation score on v03-b: 0 +clone_color: vlan3-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:4 allocation score on v03-a: 0 +clone_color: vlan3-if:4 allocation score on v03-b: 0 +clone_color: vlan3-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:5 allocation score on v03-a: 0 +clone_color: vlan3-if:5 allocation score on v03-b: 0 +clone_color: vlan3-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:6 allocation score on v03-a: 0 +clone_color: vlan3-if:6 allocation score on v03-b: 0 +clone_color: vlan3-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:7 allocation score on v03-a: 0 +clone_color: vlan3-if:7 allocation score on v03-b: 0 +clone_color: vlan4-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:0 allocation score on v03-a: 0 +clone_color: vlan4-if:0 allocation score on v03-b: 1 +clone_color: vlan4-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:1 allocation score on v03-a: 1 +clone_color: vlan4-if:1 allocation score on v03-b: 0 +clone_color: vlan4-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:2 allocation score on v03-a: 0 +clone_color: vlan4-if:2 allocation score on v03-b: 0 +clone_color: vlan4-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:3 allocation score on v03-a: 0 +clone_color: vlan4-if:3 allocation score on v03-b: 0 +clone_color: vlan4-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:4 allocation score on v03-a: 0 +clone_color: vlan4-if:4 allocation score on v03-b: 0 +clone_color: vlan4-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:5 allocation score on v03-a: 0 +clone_color: vlan4-if:5 allocation score on v03-b: 0 +clone_color: vlan4-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:6 allocation score on v03-a: 0 +clone_color: vlan4-if:6 allocation score on v03-b: 0 +clone_color: vlan4-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:7 allocation score on v03-a: 0 +clone_color: vlan4-if:7 allocation score on v03-b: 0 +clone_color: vlan5-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:0 allocation score on v03-a: 0 +clone_color: vlan5-if:0 allocation score on v03-b: 1 +clone_color: vlan5-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:1 allocation score on v03-a: 1 +clone_color: vlan5-if:1 allocation score on v03-b: 0 +clone_color: vlan5-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:2 allocation score on v03-a: 0 +clone_color: vlan5-if:2 allocation score on v03-b: 0 +clone_color: vlan5-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:3 allocation score on v03-a: 0 +clone_color: vlan5-if:3 allocation score on v03-b: 0 +clone_color: vlan5-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:4 allocation score on v03-a: 0 +clone_color: vlan5-if:4 allocation score on v03-b: 0 +clone_color: vlan5-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:5 allocation score on v03-a: 0 +clone_color: vlan5-if:5 allocation score on v03-b: 0 +clone_color: vlan5-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:6 allocation score on v03-a: 0 +clone_color: vlan5-if:6 allocation score on v03-b: 0 +clone_color: vlan5-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:7 allocation score on v03-a: 0 +clone_color: vlan5-if:7 allocation score on v03-b: 0 +clone_color: vlan900-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:0 allocation score on v03-a: 0 +clone_color: vlan900-if:0 allocation score on v03-b: 1 +clone_color: vlan900-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:1 allocation score on v03-a: 1 +clone_color: vlan900-if:1 allocation score on v03-b: 0 +clone_color: vlan900-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:2 allocation score on v03-a: 0 +clone_color: vlan900-if:2 allocation score on v03-b: 0 +clone_color: vlan900-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:3 allocation score on v03-a: 0 +clone_color: vlan900-if:3 allocation score on v03-b: 0 +clone_color: vlan900-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:4 allocation score on v03-a: 0 +clone_color: vlan900-if:4 allocation score on v03-b: 0 +clone_color: vlan900-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:5 allocation score on v03-a: 0 +clone_color: vlan900-if:5 allocation score on v03-b: 0 +clone_color: vlan900-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:6 allocation score on v03-a: 0 +clone_color: vlan900-if:6 allocation score on v03-b: 0 +clone_color: vlan900-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:7 allocation score on v03-a: 0 +clone_color: vlan900-if:7 allocation score on v03-b: 0 +clone_color: vlan909-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:0 allocation score on v03-a: 0 +clone_color: vlan909-if:0 allocation score on v03-b: 1 +clone_color: vlan909-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:1 allocation score on v03-a: 1 +clone_color: vlan909-if:1 allocation score on v03-b: 0 +clone_color: vlan909-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:2 allocation score on v03-a: 0 +clone_color: vlan909-if:2 allocation score on v03-b: 0 +clone_color: vlan909-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:3 allocation score on v03-a: 0 +clone_color: vlan909-if:3 allocation score on v03-b: 0 +clone_color: vlan909-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:4 allocation score on v03-a: 0 +clone_color: vlan909-if:4 allocation score on v03-b: 0 +clone_color: vlan909-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:5 allocation score on v03-a: 0 +clone_color: vlan909-if:5 allocation score on v03-b: 0 +clone_color: vlan909-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:6 allocation score on v03-a: 0 +clone_color: vlan909-if:6 allocation score on v03-b: 0 +clone_color: vlan909-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:7 allocation score on v03-a: 0 +clone_color: vlan909-if:7 allocation score on v03-b: 0 +native_color: c5-x64-devel.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: c5-x64-devel.vds-ok.com-vm allocation score on v03-a: 0 +native_color: c5-x64-devel.vds-ok.com-vm allocation score on v03-b: 0 +native_color: c6-x64-devel.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: c6-x64-devel.vds-ok.com-vm allocation score on v03-a: 0 +native_color: c6-x64-devel.vds-ok.com-vm allocation score on v03-b: 0 +native_color: cloudsrv.credo-dialogue.com-vm allocation score on mgmt01: -INFINITY +native_color: cloudsrv.credo-dialogue.com-vm allocation score on v03-a: 0 +native_color: cloudsrv.credo-dialogue.com-vm allocation score on v03-b: 0 +native_color: clvmd:0 allocation score on mgmt01: 1 +native_color: clvmd:0 allocation score on v03-a: -INFINITY +native_color: clvmd:0 allocation score on v03-b: -INFINITY +native_color: clvmd:1 allocation score on mgmt01: -INFINITY +native_color: clvmd:1 allocation score on v03-a: -INFINITY +native_color: clvmd:1 allocation score on v03-b: 1 +native_color: clvmd:2 allocation score on mgmt01: -INFINITY +native_color: clvmd:2 allocation score on v03-a: 1 +native_color: clvmd:2 allocation score on v03-b: -INFINITY +native_color: clvmd:3 allocation score on mgmt01: -INFINITY +native_color: clvmd:3 allocation score on v03-a: -INFINITY +native_color: clvmd:3 allocation score on v03-b: -INFINITY +native_color: clvmd:4 allocation score on mgmt01: -INFINITY +native_color: clvmd:4 allocation score on v03-a: -INFINITY +native_color: clvmd:4 allocation score on v03-b: -INFINITY +native_color: clvmd:5 allocation score on mgmt01: -INFINITY +native_color: clvmd:5 allocation score on v03-a: -INFINITY +native_color: clvmd:5 allocation score on v03-b: -INFINITY +native_color: clvmd:6 allocation score on mgmt01: -INFINITY +native_color: clvmd:6 allocation score on v03-a: -INFINITY +native_color: clvmd:6 allocation score on v03-b: -INFINITY +native_color: clvmd:7 allocation score on mgmt01: -INFINITY +native_color: clvmd:7 allocation score on v03-a: -INFINITY +native_color: clvmd:7 allocation score on v03-b: -INFINITY +native_color: clvmd:8 allocation score on mgmt01: -INFINITY +native_color: clvmd:8 allocation score on v03-a: -INFINITY +native_color: clvmd:8 allocation score on v03-b: -INFINITY +native_color: dist.express-consult.org-vm allocation score on mgmt01: -INFINITY +native_color: dist.express-consult.org-vm allocation score on v03-a: -INFINITY +native_color: dist.express-consult.org-vm allocation score on v03-b: -INFINITY +native_color: dist.fly-uni.org-vm allocation score on mgmt01: -INFINITY +native_color: dist.fly-uni.org-vm allocation score on v03-a: -INFINITY +native_color: dist.fly-uni.org-vm allocation score on v03-b: -INFINITY +native_color: dlm:0 allocation score on mgmt01: 1 +native_color: dlm:0 allocation score on v03-a: -INFINITY +native_color: dlm:0 allocation score on v03-b: -INFINITY +native_color: dlm:1 allocation score on mgmt01: 0 +native_color: dlm:1 allocation score on v03-a: -INFINITY +native_color: dlm:1 allocation score on v03-b: 1 +native_color: dlm:2 allocation score on mgmt01: 0 +native_color: dlm:2 allocation score on v03-a: 1 +native_color: dlm:2 allocation score on v03-b: 0 +native_color: dlm:3 allocation score on mgmt01: -INFINITY +native_color: dlm:3 allocation score on v03-a: -INFINITY +native_color: dlm:3 allocation score on v03-b: -INFINITY +native_color: dlm:4 allocation score on mgmt01: -INFINITY +native_color: dlm:4 allocation score on v03-a: -INFINITY +native_color: dlm:4 allocation score on v03-b: -INFINITY +native_color: dlm:5 allocation score on mgmt01: -INFINITY +native_color: dlm:5 allocation score on v03-a: -INFINITY +native_color: dlm:5 allocation score on v03-b: -INFINITY +native_color: dlm:6 allocation score on mgmt01: -INFINITY +native_color: dlm:6 allocation score on v03-a: -INFINITY +native_color: dlm:6 allocation score on v03-b: -INFINITY +native_color: dlm:7 allocation score on mgmt01: -INFINITY +native_color: dlm:7 allocation score on v03-a: -INFINITY +native_color: dlm:7 allocation score on v03-b: -INFINITY +native_color: dlm:8 allocation score on mgmt01: -INFINITY +native_color: dlm:8 allocation score on v03-a: -INFINITY +native_color: dlm:8 allocation score on v03-b: -INFINITY +native_color: eu1.ca-pages.com-vm allocation score on mgmt01: -INFINITY +native_color: eu1.ca-pages.com-vm allocation score on v03-a: -INFINITY +native_color: eu1.ca-pages.com-vm allocation score on v03-b: -INFINITY +native_color: eu2.ca-pages.com-vm allocation score on mgmt01: -INFINITY +native_color: eu2.ca-pages.com-vm allocation score on v03-a: -INFINITY +native_color: eu2.ca-pages.com-vm allocation score on v03-b: -INFINITY +native_color: f13-x64-devel.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: f13-x64-devel.vds-ok.com-vm allocation score on v03-a: 0 +native_color: f13-x64-devel.vds-ok.com-vm allocation score on v03-b: 0 +native_color: git.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: git.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: git.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: gotin-bbb-vm allocation score on mgmt01: -INFINITY +native_color: gotin-bbb-vm allocation score on v03-a: -INFINITY +native_color: gotin-bbb-vm allocation score on v03-b: -INFINITY +native_color: gw.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: gw.anbriz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: gw.anbriz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: gw.gleb.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: gw.gleb.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: gw.gleb.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: gw.gotin.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: gw.gotin.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: gw.gotin.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: iscsid:0 allocation score on mgmt01: 1 +native_color: iscsid:0 allocation score on v03-a: -INFINITY +native_color: iscsid:0 allocation score on v03-b: -INFINITY +native_color: iscsid:1 allocation score on mgmt01: 0 +native_color: iscsid:1 allocation score on v03-a: -INFINITY +native_color: iscsid:1 allocation score on v03-b: 1 +native_color: iscsid:2 allocation score on mgmt01: 0 +native_color: iscsid:2 allocation score on v03-a: 1 +native_color: iscsid:2 allocation score on v03-b: 0 +native_color: iscsid:3 allocation score on mgmt01: -INFINITY +native_color: iscsid:3 allocation score on v03-a: -INFINITY +native_color: iscsid:3 allocation score on v03-b: -INFINITY +native_color: iscsid:4 allocation score on mgmt01: -INFINITY +native_color: iscsid:4 allocation score on v03-a: -INFINITY +native_color: iscsid:4 allocation score on v03-b: -INFINITY +native_color: iscsid:5 allocation score on mgmt01: -INFINITY +native_color: iscsid:5 allocation score on v03-a: -INFINITY +native_color: iscsid:5 allocation score on v03-b: -INFINITY +native_color: iscsid:6 allocation score on mgmt01: -INFINITY +native_color: iscsid:6 allocation score on v03-a: -INFINITY +native_color: iscsid:6 allocation score on v03-b: -INFINITY +native_color: iscsid:7 allocation score on mgmt01: -INFINITY +native_color: iscsid:7 allocation score on v03-a: -INFINITY +native_color: iscsid:7 allocation score on v03-b: -INFINITY +native_color: iscsid:8 allocation score on mgmt01: -INFINITY +native_color: iscsid:8 allocation score on v03-a: -INFINITY +native_color: iscsid:8 allocation score on v03-b: -INFINITY +native_color: ktstudio.net-vm allocation score on mgmt01: -INFINITY +native_color: ktstudio.net-vm allocation score on v03-a: 0 +native_color: ktstudio.net-vm allocation score on v03-b: 0 +native_color: lenny-x32-devel-vm allocation score on mgmt01: -INFINITY +native_color: lenny-x32-devel-vm allocation score on v03-a: 0 +native_color: lenny-x32-devel-vm allocation score on v03-b: 0 +native_color: libvirt-images-fs:0 allocation score on mgmt01: 1 +native_color: libvirt-images-fs:0 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:0 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:1 allocation score on mgmt01: 0 +native_color: libvirt-images-fs:1 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:1 allocation score on v03-b: 1 +native_color: libvirt-images-fs:2 allocation score on mgmt01: 0 +native_color: libvirt-images-fs:2 allocation score on v03-a: 1 +native_color: libvirt-images-fs:2 allocation score on v03-b: 0 +native_color: libvirt-images-fs:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:3 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:3 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:4 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:4 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:5 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:5 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:6 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:6 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:7 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:7 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:8 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:8 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:8 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:0 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:0 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:0 allocation score on v03-b: 1 +native_color: libvirt-images-pool:1 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:1 allocation score on v03-a: 1 +native_color: libvirt-images-pool:1 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:2 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:2 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:2 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:3 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:3 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:4 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:4 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:5 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:5 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:6 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:6 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:7 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:7 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:0 allocation score on mgmt01: 1 +native_color: libvirt-install-fs:0 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:0 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:1 allocation score on mgmt01: 0 +native_color: libvirt-install-fs:1 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:1 allocation score on v03-b: 1 +native_color: libvirt-install-fs:2 allocation score on mgmt01: 0 +native_color: libvirt-install-fs:2 allocation score on v03-a: 1 +native_color: libvirt-install-fs:2 allocation score on v03-b: 0 +native_color: libvirt-install-fs:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:3 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:3 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:4 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:4 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:5 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:5 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:6 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:6 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:7 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:7 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:8 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:8 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:8 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:0 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:0 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:0 allocation score on v03-b: 1 +native_color: libvirt-qpid:1 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:1 allocation score on v03-a: 1 +native_color: libvirt-qpid:1 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:2 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:2 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:2 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:3 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:3 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:4 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:4 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:5 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:5 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:6 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:6 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:7 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:7 allocation score on v03-b: -INFINITY +native_color: libvirtd:0 allocation score on mgmt01: -INFINITY +native_color: libvirtd:0 allocation score on v03-a: -INFINITY +native_color: libvirtd:0 allocation score on v03-b: 1 +native_color: libvirtd:1 allocation score on mgmt01: -INFINITY +native_color: libvirtd:1 allocation score on v03-a: 1 +native_color: libvirtd:1 allocation score on v03-b: -INFINITY +native_color: libvirtd:2 allocation score on mgmt01: -INFINITY +native_color: libvirtd:2 allocation score on v03-a: -INFINITY +native_color: libvirtd:2 allocation score on v03-b: -INFINITY +native_color: libvirtd:3 allocation score on mgmt01: -INFINITY +native_color: libvirtd:3 allocation score on v03-a: -INFINITY +native_color: libvirtd:3 allocation score on v03-b: -INFINITY +native_color: libvirtd:4 allocation score on mgmt01: -INFINITY +native_color: libvirtd:4 allocation score on v03-a: -INFINITY +native_color: libvirtd:4 allocation score on v03-b: -INFINITY +native_color: libvirtd:5 allocation score on mgmt01: -INFINITY +native_color: libvirtd:5 allocation score on v03-a: -INFINITY +native_color: libvirtd:5 allocation score on v03-b: -INFINITY +native_color: libvirtd:6 allocation score on mgmt01: -INFINITY +native_color: libvirtd:6 allocation score on v03-a: -INFINITY +native_color: libvirtd:6 allocation score on v03-b: -INFINITY +native_color: libvirtd:7 allocation score on mgmt01: -INFINITY +native_color: libvirtd:7 allocation score on v03-a: -INFINITY +native_color: libvirtd:7 allocation score on v03-b: -INFINITY +native_color: license.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: license.anbriz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: license.anbriz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre01-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre01-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre01-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre01-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre01-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre01-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre02-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre02-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre02-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre02-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre02-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre02-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre03-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre03-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre03-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre03-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre03-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre03-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre04-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre04-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre04-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre04-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre04-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre04-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: maxb-c55-vm allocation score on mgmt01: -INFINITY +native_color: maxb-c55-vm allocation score on v03-a: -INFINITY +native_color: maxb-c55-vm allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:0 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:0 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:0 allocation score on v03-b: 1 +native_color: mcast-anbriz-net:1 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:1 allocation score on v03-a: 1 +native_color: mcast-anbriz-net:1 allocation score on v03-b: 0 +native_color: mcast-anbriz-net:2 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:2 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:2 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:3 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:3 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:3 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:4 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:4 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:4 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:5 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:5 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:5 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:6 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:6 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:6 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:7 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:7 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:7 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:0 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:0 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:0 allocation score on v03-b: 1 +native_color: mcast-gleb-net:1 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:1 allocation score on v03-a: 1 +native_color: mcast-gleb-net:1 allocation score on v03-b: 0 +native_color: mcast-gleb-net:2 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:2 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:2 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:3 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:3 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:3 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:4 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:4 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:4 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:5 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:5 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:5 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:6 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:6 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:6 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:7 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:7 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:7 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:0 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:0 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:0 allocation score on v03-b: 1 +native_color: mcast-test-net:1 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:1 allocation score on v03-a: 1 +native_color: mcast-test-net:1 allocation score on v03-b: 0 +native_color: mcast-test-net:2 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:2 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:2 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:3 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:3 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:3 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:4 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:4 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:4 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:5 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:5 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:5 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:6 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:6 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:6 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:7 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:7 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:7 allocation score on v03-b: -INFINITY +native_color: metae.ru-vm allocation score on mgmt01: -INFINITY +native_color: metae.ru-vm allocation score on v03-a: -INFINITY +native_color: metae.ru-vm allocation score on v03-b: -INFINITY +native_color: multipathd:0 allocation score on mgmt01: 1 +native_color: multipathd:0 allocation score on v03-a: -INFINITY +native_color: multipathd:0 allocation score on v03-b: -INFINITY +native_color: multipathd:1 allocation score on mgmt01: 0 +native_color: multipathd:1 allocation score on v03-a: -INFINITY +native_color: multipathd:1 allocation score on v03-b: 1 +native_color: multipathd:2 allocation score on mgmt01: 0 +native_color: multipathd:2 allocation score on v03-a: 1 +native_color: multipathd:2 allocation score on v03-b: 0 +native_color: multipathd:3 allocation score on mgmt01: -INFINITY +native_color: multipathd:3 allocation score on v03-a: -INFINITY +native_color: multipathd:3 allocation score on v03-b: -INFINITY +native_color: multipathd:4 allocation score on mgmt01: -INFINITY +native_color: multipathd:4 allocation score on v03-a: -INFINITY +native_color: multipathd:4 allocation score on v03-b: -INFINITY +native_color: multipathd:5 allocation score on mgmt01: -INFINITY +native_color: multipathd:5 allocation score on v03-a: -INFINITY +native_color: multipathd:5 allocation score on v03-b: -INFINITY +native_color: multipathd:6 allocation score on mgmt01: -INFINITY +native_color: multipathd:6 allocation score on v03-a: -INFINITY +native_color: multipathd:6 allocation score on v03-b: -INFINITY +native_color: multipathd:7 allocation score on mgmt01: -INFINITY +native_color: multipathd:7 allocation score on v03-a: -INFINITY +native_color: multipathd:7 allocation score on v03-b: -INFINITY +native_color: multipathd:8 allocation score on mgmt01: -INFINITY +native_color: multipathd:8 allocation score on v03-a: -INFINITY +native_color: multipathd:8 allocation score on v03-b: -INFINITY +native_color: node-params:0 allocation score on mgmt01: -INFINITY +native_color: node-params:0 allocation score on v03-a: -INFINITY +native_color: node-params:0 allocation score on v03-b: 1 +native_color: node-params:1 allocation score on mgmt01: -INFINITY +native_color: node-params:1 allocation score on v03-a: 1 +native_color: node-params:1 allocation score on v03-b: 0 +native_color: node-params:2 allocation score on mgmt01: -INFINITY +native_color: node-params:2 allocation score on v03-a: -INFINITY +native_color: node-params:2 allocation score on v03-b: -INFINITY +native_color: node-params:3 allocation score on mgmt01: -INFINITY +native_color: node-params:3 allocation score on v03-a: -INFINITY +native_color: node-params:3 allocation score on v03-b: -INFINITY +native_color: node-params:4 allocation score on mgmt01: -INFINITY +native_color: node-params:4 allocation score on v03-a: -INFINITY +native_color: node-params:4 allocation score on v03-b: -INFINITY +native_color: node-params:5 allocation score on mgmt01: -INFINITY +native_color: node-params:5 allocation score on v03-a: -INFINITY +native_color: node-params:5 allocation score on v03-b: -INFINITY +native_color: node-params:6 allocation score on mgmt01: -INFINITY +native_color: node-params:6 allocation score on v03-a: -INFINITY +native_color: node-params:6 allocation score on v03-b: -INFINITY +native_color: node-params:7 allocation score on mgmt01: -INFINITY +native_color: node-params:7 allocation score on v03-a: -INFINITY +native_color: node-params:7 allocation score on v03-b: -INFINITY +native_color: rodovoepomestie.ru-vm allocation score on mgmt01: -INFINITY +native_color: rodovoepomestie.ru-vm allocation score on v03-a: -INFINITY +native_color: rodovoepomestie.ru-vm allocation score on v03-b: -INFINITY +native_color: stonith-mgmt01 allocation score on mgmt01: -INFINITY +native_color: stonith-mgmt01 allocation score on v03-a: 0 +native_color: stonith-mgmt01 allocation score on v03-b: 0 +native_color: stonith-mgmt02 allocation score on mgmt01: 0 +native_color: stonith-mgmt02 allocation score on v03-a: 0 +native_color: stonith-mgmt02 allocation score on v03-b: 0 +native_color: stonith-v02-a allocation score on mgmt01: -INFINITY +native_color: stonith-v02-a allocation score on v03-a: -INFINITY +native_color: stonith-v02-a allocation score on v03-b: -INFINITY +native_color: stonith-v02-b allocation score on mgmt01: -INFINITY +native_color: stonith-v02-b allocation score on v03-a: -INFINITY +native_color: stonith-v02-b allocation score on v03-b: -INFINITY +native_color: stonith-v02-c allocation score on mgmt01: -INFINITY +native_color: stonith-v02-c allocation score on v03-a: -INFINITY +native_color: stonith-v02-c allocation score on v03-b: -INFINITY +native_color: stonith-v02-d allocation score on mgmt01: -INFINITY +native_color: stonith-v02-d allocation score on v03-a: -INFINITY +native_color: stonith-v02-d allocation score on v03-b: -INFINITY +native_color: stonith-v03-a allocation score on mgmt01: 0 +native_color: stonith-v03-a allocation score on v03-a: -INFINITY +native_color: stonith-v03-a allocation score on v03-b: 0 +native_color: stonith-v03-b allocation score on mgmt01: 0 +native_color: stonith-v03-b allocation score on v03-a: 0 +native_color: stonith-v03-b allocation score on v03-b: -INFINITY +native_color: stonith-v03-c allocation score on mgmt01: -INFINITY +native_color: stonith-v03-c allocation score on v03-a: -INFINITY +native_color: stonith-v03-c allocation score on v03-b: -INFINITY +native_color: stonith-v03-d allocation score on mgmt01: -INFINITY +native_color: stonith-v03-d allocation score on v03-a: -INFINITY +native_color: stonith-v03-d allocation score on v03-b: -INFINITY +native_color: terminal.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: terminal.anbriz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: terminal.anbriz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: terminal0.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: terminal0.anbriz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: terminal0.anbriz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: test-01.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: test-01.vds-ok.com-vm allocation score on v03-a: 0 +native_color: test-01.vds-ok.com-vm allocation score on v03-b: 0 +native_color: ubuntu9.10-gotin-vm allocation score on mgmt01: -INFINITY +native_color: ubuntu9.10-gotin-vm allocation score on v03-a: -INFINITY +native_color: ubuntu9.10-gotin-vm allocation score on v03-b: -INFINITY +native_color: vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-0-iscsi:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-0-iscsi:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-0-iscsi:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:8 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:0 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:0 allocation score on v03-b: 1 +native_color: vds-ok-pool-0-pool:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:1 allocation score on v03-a: 1 +native_color: vds-ok-pool-0-pool:1 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:2 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-0-vg:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-0-vg:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-0-vg:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:8 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-1-iscsi:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-1-iscsi:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-1-iscsi:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:8 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:0 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:0 allocation score on v03-b: 1 +native_color: vds-ok-pool-1-pool:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:1 allocation score on v03-a: 1 +native_color: vds-ok-pool-1-pool:1 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:2 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-1-vg:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-1-vg:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-1-vg:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:8 allocation score on v03-b: -INFINITY +native_color: vlan1-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:0 allocation score on v03-a: -INFINITY +native_color: vlan1-if:0 allocation score on v03-b: 1 +native_color: vlan1-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:1 allocation score on v03-a: 1 +native_color: vlan1-if:1 allocation score on v03-b: 0 +native_color: vlan1-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:2 allocation score on v03-a: -INFINITY +native_color: vlan1-if:2 allocation score on v03-b: -INFINITY +native_color: vlan1-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:3 allocation score on v03-a: -INFINITY +native_color: vlan1-if:3 allocation score on v03-b: -INFINITY +native_color: vlan1-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:4 allocation score on v03-a: -INFINITY +native_color: vlan1-if:4 allocation score on v03-b: -INFINITY +native_color: vlan1-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:5 allocation score on v03-a: -INFINITY +native_color: vlan1-if:5 allocation score on v03-b: -INFINITY +native_color: vlan1-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:6 allocation score on v03-a: -INFINITY +native_color: vlan1-if:6 allocation score on v03-b: -INFINITY +native_color: vlan1-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:7 allocation score on v03-a: -INFINITY +native_color: vlan1-if:7 allocation score on v03-b: -INFINITY +native_color: vlan101-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:0 allocation score on v03-a: -INFINITY +native_color: vlan101-if:0 allocation score on v03-b: 1 +native_color: vlan101-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:1 allocation score on v03-a: 1 +native_color: vlan101-if:1 allocation score on v03-b: 0 +native_color: vlan101-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:2 allocation score on v03-a: -INFINITY +native_color: vlan101-if:2 allocation score on v03-b: -INFINITY +native_color: vlan101-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:3 allocation score on v03-a: -INFINITY +native_color: vlan101-if:3 allocation score on v03-b: -INFINITY +native_color: vlan101-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:4 allocation score on v03-a: -INFINITY +native_color: vlan101-if:4 allocation score on v03-b: -INFINITY +native_color: vlan101-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:5 allocation score on v03-a: -INFINITY +native_color: vlan101-if:5 allocation score on v03-b: -INFINITY +native_color: vlan101-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:6 allocation score on v03-a: -INFINITY +native_color: vlan101-if:6 allocation score on v03-b: -INFINITY +native_color: vlan101-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:7 allocation score on v03-a: -INFINITY +native_color: vlan101-if:7 allocation score on v03-b: -INFINITY +native_color: vlan102-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:0 allocation score on v03-a: -INFINITY +native_color: vlan102-if:0 allocation score on v03-b: 1 +native_color: vlan102-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:1 allocation score on v03-a: 1 +native_color: vlan102-if:1 allocation score on v03-b: 0 +native_color: vlan102-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:2 allocation score on v03-a: -INFINITY +native_color: vlan102-if:2 allocation score on v03-b: -INFINITY +native_color: vlan102-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:3 allocation score on v03-a: -INFINITY +native_color: vlan102-if:3 allocation score on v03-b: -INFINITY +native_color: vlan102-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:4 allocation score on v03-a: -INFINITY +native_color: vlan102-if:4 allocation score on v03-b: -INFINITY +native_color: vlan102-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:5 allocation score on v03-a: -INFINITY +native_color: vlan102-if:5 allocation score on v03-b: -INFINITY +native_color: vlan102-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:6 allocation score on v03-a: -INFINITY +native_color: vlan102-if:6 allocation score on v03-b: -INFINITY +native_color: vlan102-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:7 allocation score on v03-a: -INFINITY +native_color: vlan102-if:7 allocation score on v03-b: -INFINITY +native_color: vlan103-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:0 allocation score on v03-a: -INFINITY +native_color: vlan103-if:0 allocation score on v03-b: 1 +native_color: vlan103-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:1 allocation score on v03-a: 1 +native_color: vlan103-if:1 allocation score on v03-b: 0 +native_color: vlan103-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:2 allocation score on v03-a: -INFINITY +native_color: vlan103-if:2 allocation score on v03-b: -INFINITY +native_color: vlan103-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:3 allocation score on v03-a: -INFINITY +native_color: vlan103-if:3 allocation score on v03-b: -INFINITY +native_color: vlan103-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:4 allocation score on v03-a: -INFINITY +native_color: vlan103-if:4 allocation score on v03-b: -INFINITY +native_color: vlan103-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:5 allocation score on v03-a: -INFINITY +native_color: vlan103-if:5 allocation score on v03-b: -INFINITY +native_color: vlan103-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:6 allocation score on v03-a: -INFINITY +native_color: vlan103-if:6 allocation score on v03-b: -INFINITY +native_color: vlan103-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:7 allocation score on v03-a: -INFINITY +native_color: vlan103-if:7 allocation score on v03-b: -INFINITY +native_color: vlan104-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:0 allocation score on v03-a: -INFINITY +native_color: vlan104-if:0 allocation score on v03-b: 1 +native_color: vlan104-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:1 allocation score on v03-a: 1 +native_color: vlan104-if:1 allocation score on v03-b: 0 +native_color: vlan104-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:2 allocation score on v03-a: -INFINITY +native_color: vlan104-if:2 allocation score on v03-b: -INFINITY +native_color: vlan104-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:3 allocation score on v03-a: -INFINITY +native_color: vlan104-if:3 allocation score on v03-b: -INFINITY +native_color: vlan104-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:4 allocation score on v03-a: -INFINITY +native_color: vlan104-if:4 allocation score on v03-b: -INFINITY +native_color: vlan104-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:5 allocation score on v03-a: -INFINITY +native_color: vlan104-if:5 allocation score on v03-b: -INFINITY +native_color: vlan104-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:6 allocation score on v03-a: -INFINITY +native_color: vlan104-if:6 allocation score on v03-b: -INFINITY +native_color: vlan104-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:7 allocation score on v03-a: -INFINITY +native_color: vlan104-if:7 allocation score on v03-b: -INFINITY +native_color: vlan200-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:0 allocation score on v03-a: -INFINITY +native_color: vlan200-if:0 allocation score on v03-b: 1 +native_color: vlan200-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:1 allocation score on v03-a: 1 +native_color: vlan200-if:1 allocation score on v03-b: 0 +native_color: vlan200-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:2 allocation score on v03-a: -INFINITY +native_color: vlan200-if:2 allocation score on v03-b: -INFINITY +native_color: vlan200-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:3 allocation score on v03-a: -INFINITY +native_color: vlan200-if:3 allocation score on v03-b: -INFINITY +native_color: vlan200-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:4 allocation score on v03-a: -INFINITY +native_color: vlan200-if:4 allocation score on v03-b: -INFINITY +native_color: vlan200-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:5 allocation score on v03-a: -INFINITY +native_color: vlan200-if:5 allocation score on v03-b: -INFINITY +native_color: vlan200-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:6 allocation score on v03-a: -INFINITY +native_color: vlan200-if:6 allocation score on v03-b: -INFINITY +native_color: vlan200-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:7 allocation score on v03-a: -INFINITY +native_color: vlan200-if:7 allocation score on v03-b: -INFINITY +native_color: vlan3-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:0 allocation score on v03-a: -INFINITY +native_color: vlan3-if:0 allocation score on v03-b: 1 +native_color: vlan3-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:1 allocation score on v03-a: 1 +native_color: vlan3-if:1 allocation score on v03-b: 0 +native_color: vlan3-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:2 allocation score on v03-a: -INFINITY +native_color: vlan3-if:2 allocation score on v03-b: -INFINITY +native_color: vlan3-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:3 allocation score on v03-a: -INFINITY +native_color: vlan3-if:3 allocation score on v03-b: -INFINITY +native_color: vlan3-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:4 allocation score on v03-a: -INFINITY +native_color: vlan3-if:4 allocation score on v03-b: -INFINITY +native_color: vlan3-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:5 allocation score on v03-a: -INFINITY +native_color: vlan3-if:5 allocation score on v03-b: -INFINITY +native_color: vlan3-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:6 allocation score on v03-a: -INFINITY +native_color: vlan3-if:6 allocation score on v03-b: -INFINITY +native_color: vlan3-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:7 allocation score on v03-a: -INFINITY +native_color: vlan3-if:7 allocation score on v03-b: -INFINITY +native_color: vlan4-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:0 allocation score on v03-a: -INFINITY +native_color: vlan4-if:0 allocation score on v03-b: 1 +native_color: vlan4-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:1 allocation score on v03-a: 1 +native_color: vlan4-if:1 allocation score on v03-b: 0 +native_color: vlan4-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:2 allocation score on v03-a: -INFINITY +native_color: vlan4-if:2 allocation score on v03-b: -INFINITY +native_color: vlan4-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:3 allocation score on v03-a: -INFINITY +native_color: vlan4-if:3 allocation score on v03-b: -INFINITY +native_color: vlan4-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:4 allocation score on v03-a: -INFINITY +native_color: vlan4-if:4 allocation score on v03-b: -INFINITY +native_color: vlan4-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:5 allocation score on v03-a: -INFINITY +native_color: vlan4-if:5 allocation score on v03-b: -INFINITY +native_color: vlan4-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:6 allocation score on v03-a: -INFINITY +native_color: vlan4-if:6 allocation score on v03-b: -INFINITY +native_color: vlan4-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:7 allocation score on v03-a: -INFINITY +native_color: vlan4-if:7 allocation score on v03-b: -INFINITY +native_color: vlan5-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:0 allocation score on v03-a: -INFINITY +native_color: vlan5-if:0 allocation score on v03-b: 1 +native_color: vlan5-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:1 allocation score on v03-a: 1 +native_color: vlan5-if:1 allocation score on v03-b: 0 +native_color: vlan5-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:2 allocation score on v03-a: -INFINITY +native_color: vlan5-if:2 allocation score on v03-b: -INFINITY +native_color: vlan5-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:3 allocation score on v03-a: -INFINITY +native_color: vlan5-if:3 allocation score on v03-b: -INFINITY +native_color: vlan5-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:4 allocation score on v03-a: -INFINITY +native_color: vlan5-if:4 allocation score on v03-b: -INFINITY +native_color: vlan5-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:5 allocation score on v03-a: -INFINITY +native_color: vlan5-if:5 allocation score on v03-b: -INFINITY +native_color: vlan5-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:6 allocation score on v03-a: -INFINITY +native_color: vlan5-if:6 allocation score on v03-b: -INFINITY +native_color: vlan5-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:7 allocation score on v03-a: -INFINITY +native_color: vlan5-if:7 allocation score on v03-b: -INFINITY +native_color: vlan900-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:0 allocation score on v03-a: -INFINITY +native_color: vlan900-if:0 allocation score on v03-b: 1 +native_color: vlan900-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:1 allocation score on v03-a: 1 +native_color: vlan900-if:1 allocation score on v03-b: 0 +native_color: vlan900-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:2 allocation score on v03-a: -INFINITY +native_color: vlan900-if:2 allocation score on v03-b: -INFINITY +native_color: vlan900-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:3 allocation score on v03-a: -INFINITY +native_color: vlan900-if:3 allocation score on v03-b: -INFINITY +native_color: vlan900-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:4 allocation score on v03-a: -INFINITY +native_color: vlan900-if:4 allocation score on v03-b: -INFINITY +native_color: vlan900-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:5 allocation score on v03-a: -INFINITY +native_color: vlan900-if:5 allocation score on v03-b: -INFINITY +native_color: vlan900-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:6 allocation score on v03-a: -INFINITY +native_color: vlan900-if:6 allocation score on v03-b: -INFINITY +native_color: vlan900-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:7 allocation score on v03-a: -INFINITY +native_color: vlan900-if:7 allocation score on v03-b: -INFINITY +native_color: vlan909-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:0 allocation score on v03-a: -INFINITY +native_color: vlan909-if:0 allocation score on v03-b: 1 +native_color: vlan909-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:1 allocation score on v03-a: 1 +native_color: vlan909-if:1 allocation score on v03-b: 0 +native_color: vlan909-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:2 allocation score on v03-a: -INFINITY +native_color: vlan909-if:2 allocation score on v03-b: -INFINITY +native_color: vlan909-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:3 allocation score on v03-a: -INFINITY +native_color: vlan909-if:3 allocation score on v03-b: -INFINITY +native_color: vlan909-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:4 allocation score on v03-a: -INFINITY +native_color: vlan909-if:4 allocation score on v03-b: -INFINITY +native_color: vlan909-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:5 allocation score on v03-a: -INFINITY +native_color: vlan909-if:5 allocation score on v03-b: -INFINITY +native_color: vlan909-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:6 allocation score on v03-a: -INFINITY +native_color: vlan909-if:6 allocation score on v03-b: -INFINITY +native_color: vlan909-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:7 allocation score on v03-a: -INFINITY +native_color: vlan909-if:7 allocation score on v03-b: -INFINITY +native_color: zakaz.transferrus.ru-vm allocation score on mgmt01: -INFINITY +native_color: zakaz.transferrus.ru-vm allocation score on v03-a: -INFINITY +native_color: zakaz.transferrus.ru-vm allocation score on v03-b: -INFINITY diff --git a/pengine/test10/load-stopped-loop.summary b/pengine/test10/load-stopped-loop.summary new file mode 100644 index 0000000000..c14e05ddce --- /dev/null +++ b/pengine/test10/load-stopped-loop.summary @@ -0,0 +1,354 @@ + +Current cluster status: +Online: [ mgmt01 v03-a v03-b ] + + stonith-v02-a (stonith:fence_ipmilan): Stopped + stonith-v02-b (stonith:fence_ipmilan): Stopped + stonith-v02-c (stonith:fence_ipmilan): Stopped + stonith-v02-d (stonith:fence_ipmilan): Stopped + stonith-mgmt01 (stonith:fence_xvm): Started v03-b + stonith-mgmt02 (stonith:meatware): Started mgmt01 + stonith-v03-c (stonith:fence_ipmilan): Stopped + stonith-v03-a (stonith:fence_ipmilan): Started v03-b + stonith-v03-b (stonith:fence_ipmilan): Started v03-a + stonith-v03-d (stonith:fence_ipmilan): Stopped + Clone Set: cl-clvmd [clvmd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ clvmd:3 clvmd:4 clvmd:5 clvmd:6 clvmd:7 clvmd:8 ] + Clone Set: cl-dlm [dlm] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ dlm:3 dlm:4 dlm:5 dlm:6 dlm:7 dlm:8 ] + Clone Set: cl-iscsid [iscsid] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ iscsid:3 iscsid:4 iscsid:5 iscsid:6 iscsid:7 iscsid:8 ] + Clone Set: cl-libvirtd [libvirtd] + Started: [ v03-a v03-b ] + Stopped: [ libvirtd:2 libvirtd:3 libvirtd:4 libvirtd:5 libvirtd:6 libvirtd:7 ] + Clone Set: cl-multipathd [multipathd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ multipathd:3 multipathd:4 multipathd:5 multipathd:6 multipathd:7 multipathd:8 ] + Clone Set: cl-node-params [node-params] + Started: [ v03-a v03-b ] + Stopped: [ node-params:2 node-params:3 node-params:4 node-params:5 node-params:6 node-params:7 ] + Clone Set: cl-vlan1-if [vlan1-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan1-if:2 vlan1-if:3 vlan1-if:4 vlan1-if:5 vlan1-if:6 vlan1-if:7 ] + Clone Set: cl-vlan101-if [vlan101-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan101-if:2 vlan101-if:3 vlan101-if:4 vlan101-if:5 vlan101-if:6 vlan101-if:7 ] + Clone Set: cl-vlan102-if [vlan102-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan102-if:2 vlan102-if:3 vlan102-if:4 vlan102-if:5 vlan102-if:6 vlan102-if:7 ] + Clone Set: cl-vlan103-if [vlan103-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan103-if:2 vlan103-if:3 vlan103-if:4 vlan103-if:5 vlan103-if:6 vlan103-if:7 ] + Clone Set: cl-vlan104-if [vlan104-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan104-if:2 vlan104-if:3 vlan104-if:4 vlan104-if:5 vlan104-if:6 vlan104-if:7 ] + Clone Set: cl-vlan3-if [vlan3-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan3-if:2 vlan3-if:3 vlan3-if:4 vlan3-if:5 vlan3-if:6 vlan3-if:7 ] + Clone Set: cl-vlan4-if [vlan4-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan4-if:2 vlan4-if:3 vlan4-if:4 vlan4-if:5 vlan4-if:6 vlan4-if:7 ] + Clone Set: cl-vlan5-if [vlan5-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan5-if:2 vlan5-if:3 vlan5-if:4 vlan5-if:5 vlan5-if:6 vlan5-if:7 ] + Clone Set: cl-vlan900-if [vlan900-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan900-if:2 vlan900-if:3 vlan900-if:4 vlan900-if:5 vlan900-if:6 vlan900-if:7 ] + Clone Set: cl-vlan909-if [vlan909-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan909-if:2 vlan909-if:3 vlan909-if:4 vlan909-if:5 vlan909-if:6 vlan909-if:7 ] + Clone Set: cl-libvirt-images-fs [libvirt-images-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-images-fs:3 libvirt-images-fs:4 libvirt-images-fs:5 libvirt-images-fs:6 libvirt-images-fs:7 libvirt-images-fs:8 ] + Clone Set: cl-libvirt-install-fs [libvirt-install-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-install-fs:3 libvirt-install-fs:4 libvirt-install-fs:5 libvirt-install-fs:6 libvirt-install-fs:7 libvirt-install-fs:8 ] + Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-iscsi:3 vds-ok-pool-0-iscsi:4 vds-ok-pool-0-iscsi:5 vds-ok-pool-0-iscsi:6 vds-ok-pool-0-iscsi:7 vds-ok-pool-0-iscsi:8 ] + Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-vg:3 vds-ok-pool-0-vg:4 vds-ok-pool-0-vg:5 vds-ok-pool-0-vg:6 vds-ok-pool-0-vg:7 vds-ok-pool-0-vg:8 ] + Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-iscsi:3 vds-ok-pool-1-iscsi:4 vds-ok-pool-1-iscsi:5 vds-ok-pool-1-iscsi:6 vds-ok-pool-1-iscsi:7 vds-ok-pool-1-iscsi:8 ] + Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-vg:3 vds-ok-pool-1-vg:4 vds-ok-pool-1-vg:5 vds-ok-pool-1-vg:6 vds-ok-pool-1-vg:7 vds-ok-pool-1-vg:8 ] + Clone Set: cl-libvirt-images-pool [libvirt-images-pool] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-images-pool:2 libvirt-images-pool:3 libvirt-images-pool:4 libvirt-images-pool:5 libvirt-images-pool:6 libvirt-images-pool:7 ] + Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-0-pool:2 vds-ok-pool-0-pool:3 vds-ok-pool-0-pool:4 vds-ok-pool-0-pool:5 vds-ok-pool-0-pool:6 vds-ok-pool-0-pool:7 ] + Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-1-pool:2 vds-ok-pool-1-pool:3 vds-ok-pool-1-pool:4 vds-ok-pool-1-pool:5 vds-ok-pool-1-pool:6 vds-ok-pool-1-pool:7 ] + git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + Clone Set: cl-vlan200-if [vlan200-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan200-if:2 vlan200-if:3 vlan200-if:4 vlan200-if:5 vlan200-if:6 vlan200-if:7 ] + lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a + dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped + eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped + maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped + metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped + c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + Clone Set: cl-mcast-test-net [mcast-test-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-test-net:2 mcast-test-net:3 mcast-test-net:4 mcast-test-net:5 mcast-test-net:6 mcast-test-net:7 ] + dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped + ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a + cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-anbriz-net:2 mcast-anbriz-net:3 mcast-anbriz-net:4 mcast-anbriz-net:5 mcast-anbriz-net:6 mcast-anbriz-net:7 ] + gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-libvirt-qpid [libvirt-qpid] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-qpid:2 libvirt-qpid:3 libvirt-qpid:4 libvirt-qpid:5 libvirt-qpid:6 libvirt-qpid:7 ] + gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-mcast-gleb-net [mcast-gleb-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-gleb-net:2 mcast-gleb-net:3 mcast-gleb-net:4 mcast-gleb-net:5 mcast-gleb-net:6 mcast-gleb-net:7 ] + +Transition Summary: + * Reload vds-ok-pool-0-iscsi:0 (Started mgmt01) + * Reload vds-ok-pool-0-iscsi:1 (Started v03-b) + * Reload vds-ok-pool-0-iscsi:2 (Started v03-a) + * Reload vds-ok-pool-1-iscsi:0 (Started mgmt01) + * Reload vds-ok-pool-1-iscsi:1 (Started v03-b) + * Reload vds-ok-pool-1-iscsi:2 (Started v03-a) + * Restart stonith-v03-b (Started v03-a) + * Restart stonith-v03-a (Started v03-b) + * Migrate license.anbriz.vds-ok.com-vm (Started v03-b -> v03-a) + * Migrate terminal0.anbriz.vds-ok.com-vm (Started v03-a -> v03-b) + * Start vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (v03-a) + +Executing cluster transition: + * Resource action: vds-ok-pool-0-iscsi:1 reload on mgmt01 + * Resource action: vds-ok-pool-0-iscsi:1 monitor=30000 on mgmt01 + * Resource action: vds-ok-pool-0-iscsi:0 reload on v03-b + * Resource action: vds-ok-pool-0-iscsi:0 monitor=30000 on v03-b + * Resource action: vds-ok-pool-0-iscsi:2 reload on v03-a + * Resource action: vds-ok-pool-0-iscsi:2 monitor=30000 on v03-a + * Resource action: vds-ok-pool-1-iscsi:1 reload on mgmt01 + * Resource action: vds-ok-pool-1-iscsi:1 monitor=30000 on mgmt01 + * Resource action: vds-ok-pool-1-iscsi:0 reload on v03-b + * Resource action: vds-ok-pool-1-iscsi:0 monitor=30000 on v03-b + * Resource action: vds-ok-pool-1-iscsi:2 reload on v03-a + * Resource action: vds-ok-pool-1-iscsi:2 monitor=30000 on v03-a + * Resource action: stonith-v03-b stop on v03-a + * Resource action: stonith-v03-b start on v03-a + * Resource action: stonith-v03-b monitor=60000 on v03-a + * Resource action: stonith-v03-a stop on v03-b + * Resource action: stonith-v03-a start on v03-b + * Resource action: stonith-v03-a monitor=60000 on v03-b + * Resource action: license.anbriz.vds-ok.com-vm migrate_to on v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a + * Pseudo action: load_stopped_mgmt01 + * Resource action: license.anbriz.vds-ok.com-vm migrate_from on v03-a + * Resource action: license.anbriz.vds-ok.com-vm stop on v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_from on v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm stop on v03-a + * Pseudo action: load_stopped_v03-b + * Pseudo action: load_stopped_v03-a + * Pseudo action: all_stopped + * Pseudo action: license.anbriz.vds-ok.com-vm_start_0 + * Pseudo action: terminal0.anbriz.vds-ok.com-vm_start_0 + * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm start on v03-a + * Resource action: license.anbriz.vds-ok.com-vm monitor=10000 on v03-a + * Resource action: terminal0.anbriz.vds-ok.com-vm monitor=10000 on v03-b + * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-a + +Revised cluster status: +Online: [ mgmt01 v03-a v03-b ] + + stonith-v02-a (stonith:fence_ipmilan): Stopped + stonith-v02-b (stonith:fence_ipmilan): Stopped + stonith-v02-c (stonith:fence_ipmilan): Stopped + stonith-v02-d (stonith:fence_ipmilan): Stopped + stonith-mgmt01 (stonith:fence_xvm): Started v03-b + stonith-mgmt02 (stonith:meatware): Started mgmt01 + stonith-v03-c (stonith:fence_ipmilan): Stopped + stonith-v03-a (stonith:fence_ipmilan): Started v03-b + stonith-v03-b (stonith:fence_ipmilan): Started v03-a + stonith-v03-d (stonith:fence_ipmilan): Stopped + Clone Set: cl-clvmd [clvmd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ clvmd:3 clvmd:4 clvmd:5 clvmd:6 clvmd:7 clvmd:8 ] + Clone Set: cl-dlm [dlm] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ dlm:3 dlm:4 dlm:5 dlm:6 dlm:7 dlm:8 ] + Clone Set: cl-iscsid [iscsid] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ iscsid:3 iscsid:4 iscsid:5 iscsid:6 iscsid:7 iscsid:8 ] + Clone Set: cl-libvirtd [libvirtd] + Started: [ v03-a v03-b ] + Stopped: [ libvirtd:2 libvirtd:3 libvirtd:4 libvirtd:5 libvirtd:6 libvirtd:7 ] + Clone Set: cl-multipathd [multipathd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ multipathd:3 multipathd:4 multipathd:5 multipathd:6 multipathd:7 multipathd:8 ] + Clone Set: cl-node-params [node-params] + Started: [ v03-a v03-b ] + Stopped: [ node-params:2 node-params:3 node-params:4 node-params:5 node-params:6 node-params:7 ] + Clone Set: cl-vlan1-if [vlan1-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan1-if:2 vlan1-if:3 vlan1-if:4 vlan1-if:5 vlan1-if:6 vlan1-if:7 ] + Clone Set: cl-vlan101-if [vlan101-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan101-if:2 vlan101-if:3 vlan101-if:4 vlan101-if:5 vlan101-if:6 vlan101-if:7 ] + Clone Set: cl-vlan102-if [vlan102-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan102-if:2 vlan102-if:3 vlan102-if:4 vlan102-if:5 vlan102-if:6 vlan102-if:7 ] + Clone Set: cl-vlan103-if [vlan103-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan103-if:2 vlan103-if:3 vlan103-if:4 vlan103-if:5 vlan103-if:6 vlan103-if:7 ] + Clone Set: cl-vlan104-if [vlan104-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan104-if:2 vlan104-if:3 vlan104-if:4 vlan104-if:5 vlan104-if:6 vlan104-if:7 ] + Clone Set: cl-vlan3-if [vlan3-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan3-if:2 vlan3-if:3 vlan3-if:4 vlan3-if:5 vlan3-if:6 vlan3-if:7 ] + Clone Set: cl-vlan4-if [vlan4-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan4-if:2 vlan4-if:3 vlan4-if:4 vlan4-if:5 vlan4-if:6 vlan4-if:7 ] + Clone Set: cl-vlan5-if [vlan5-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan5-if:2 vlan5-if:3 vlan5-if:4 vlan5-if:5 vlan5-if:6 vlan5-if:7 ] + Clone Set: cl-vlan900-if [vlan900-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan900-if:2 vlan900-if:3 vlan900-if:4 vlan900-if:5 vlan900-if:6 vlan900-if:7 ] + Clone Set: cl-vlan909-if [vlan909-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan909-if:2 vlan909-if:3 vlan909-if:4 vlan909-if:5 vlan909-if:6 vlan909-if:7 ] + Clone Set: cl-libvirt-images-fs [libvirt-images-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-images-fs:3 libvirt-images-fs:4 libvirt-images-fs:5 libvirt-images-fs:6 libvirt-images-fs:7 libvirt-images-fs:8 ] + Clone Set: cl-libvirt-install-fs [libvirt-install-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-install-fs:3 libvirt-install-fs:4 libvirt-install-fs:5 libvirt-install-fs:6 libvirt-install-fs:7 libvirt-install-fs:8 ] + Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-iscsi:3 vds-ok-pool-0-iscsi:4 vds-ok-pool-0-iscsi:5 vds-ok-pool-0-iscsi:6 vds-ok-pool-0-iscsi:7 vds-ok-pool-0-iscsi:8 ] + Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-vg:3 vds-ok-pool-0-vg:4 vds-ok-pool-0-vg:5 vds-ok-pool-0-vg:6 vds-ok-pool-0-vg:7 vds-ok-pool-0-vg:8 ] + Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-iscsi:3 vds-ok-pool-1-iscsi:4 vds-ok-pool-1-iscsi:5 vds-ok-pool-1-iscsi:6 vds-ok-pool-1-iscsi:7 vds-ok-pool-1-iscsi:8 ] + Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-vg:3 vds-ok-pool-1-vg:4 vds-ok-pool-1-vg:5 vds-ok-pool-1-vg:6 vds-ok-pool-1-vg:7 vds-ok-pool-1-vg:8 ] + Clone Set: cl-libvirt-images-pool [libvirt-images-pool] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-images-pool:2 libvirt-images-pool:3 libvirt-images-pool:4 libvirt-images-pool:5 libvirt-images-pool:6 libvirt-images-pool:7 ] + Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-0-pool:2 vds-ok-pool-0-pool:3 vds-ok-pool-0-pool:4 vds-ok-pool-0-pool:5 vds-ok-pool-0-pool:6 vds-ok-pool-0-pool:7 ] + Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-1-pool:2 vds-ok-pool-1-pool:3 vds-ok-pool-1-pool:4 vds-ok-pool-1-pool:5 vds-ok-pool-1-pool:6 vds-ok-pool-1-pool:7 ] + git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + Clone Set: cl-vlan200-if [vlan200-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan200-if:2 vlan200-if:3 vlan200-if:4 vlan200-if:5 vlan200-if:6 vlan200-if:7 ] + lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a + dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped + eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped + maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped + metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped + c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + Clone Set: cl-mcast-test-net [mcast-test-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-test-net:2 mcast-test-net:3 mcast-test-net:4 mcast-test-net:5 mcast-test-net:6 mcast-test-net:7 ] + dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped + ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a + cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-anbriz-net:2 mcast-anbriz-net:3 mcast-anbriz-net:4 mcast-anbriz-net:5 mcast-anbriz-net:6 mcast-anbriz-net:7 ] + gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-libvirt-qpid [libvirt-qpid] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-qpid:2 libvirt-qpid:3 libvirt-qpid:4 libvirt-qpid:5 libvirt-qpid:6 libvirt-qpid:7 ] + gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + Clone Set: cl-mcast-gleb-net [mcast-gleb-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-gleb-net:2 mcast-gleb-net:3 mcast-gleb-net:4 mcast-gleb-net:5 mcast-gleb-net:6 mcast-gleb-net:7 ] + diff --git a/pengine/test10/load-stopped-loop.xml b/pengine/test10/load-stopped-loop.xml new file mode 100644 index 0000000000..31bd172f88 --- /dev/null +++ b/pengine/test10/load-stopped-loop.xml @@ -0,0 +1,3959 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pengine/test10/migrate-begin.dot b/pengine/test10/migrate-begin.dot index 36c3acd0b8..b7424a8e5d 100644 --- a/pengine/test10/migrate-begin.dot +++ b/pengine/test10/migrate-begin.dot @@ -1,16 +1,15 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] -"load_stopped_hex-13 hex-13" -> "test-vm_migrate_to_0 hex-14" [ style = bold] "load_stopped_hex-13 hex-13" -> "test-vm_start_0 hex-13" [ style = bold] "load_stopped_hex-13 hex-13" [ style=bold color="green" fontcolor="orange" ] "load_stopped_hex-14 hex-14" [ style=bold color="green" fontcolor="orange" ] "test-vm_migrate_from_0 hex-13" -> "test-vm_stop_0 hex-14" [ style = bold] "test-vm_migrate_from_0 hex-13" [ style=bold color="green" fontcolor="black" ] "test-vm_migrate_to_0 hex-14" -> "test-vm_migrate_from_0 hex-13" [ style = bold] "test-vm_migrate_to_0 hex-14" [ style=bold color="green" fontcolor="black" ] "test-vm_start_0 hex-13" [ style=bold color="green" fontcolor="orange" ] "test-vm_stop_0 hex-14" -> "all_stopped" [ style = bold] "test-vm_stop_0 hex-14" -> "load_stopped_hex-14 hex-14" [ style = bold] "test-vm_stop_0 hex-14" -> "test-vm_start_0 hex-13" [ style = bold] "test-vm_stop_0 hex-14" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/migrate-begin.exp b/pengine/test10/migrate-begin.exp index fa817f3353..a19bb7b09a 100644 --- a/pengine/test10/migrate-begin.exp +++ b/pengine/test10/migrate-begin.exp @@ -1,89 +1,85 @@ - - - - - + diff --git a/pengine/test10/migrate-begin.summary b/pengine/test10/migrate-begin.summary index b4b58703f1..3dea03a7fb 100644 --- a/pengine/test10/migrate-begin.summary +++ b/pengine/test10/migrate-begin.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-14 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: * Migrate test-vm (Started hex-14 -> hex-13) Executing cluster transition: - * Pseudo action: load_stopped_hex-13 * Resource action: test-vm migrate_to on hex-14 + * Pseudo action: load_stopped_hex-13 * Resource action: test-vm migrate_from on hex-13 * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-14 * Pseudo action: all_stopped * Pseudo action: test-vm_start_0 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/utilization-order3.dot b/pengine/test10/utilization-order3.dot index 9d9c4d2646..84659beccf 100644 --- a/pengine/test10/utilization-order3.dot +++ b/pengine/test10/utilization-order3.dot @@ -1,20 +1,19 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "load_stopped_node1 node1" -> "rsc2_start_0 node1" [ style = bold] "load_stopped_node1 node1" [ style=bold color="green" fontcolor="orange" ] -"load_stopped_node2 node2" -> "rsc1_migrate_to_0 node1" [ style = bold] "load_stopped_node2 node2" -> "rsc1_start_0 node2" [ style = bold] "load_stopped_node2 node2" [ style=bold color="green" fontcolor="orange" ] "probe_complete node1" [ style=bold color="green" fontcolor="black" ] "probe_complete node2" [ style=bold color="green" fontcolor="black" ] "rsc1_migrate_from_0 node2" -> "rsc1_stop_0 node1" [ style = bold] "rsc1_migrate_from_0 node2" [ style=bold color="green" fontcolor="black" ] "rsc1_migrate_to_0 node1" -> "rsc1_migrate_from_0 node2" [ style = bold] "rsc1_migrate_to_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc1_start_0 node2" [ style=bold color="green" fontcolor="orange" ] "rsc1_stop_0 node1" -> "all_stopped" [ style = bold] "rsc1_stop_0 node1" -> "load_stopped_node1 node1" [ style = bold] "rsc1_stop_0 node1" -> "rsc1_start_0 node2" [ style = bold] "rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc2_start_0 node1" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/utilization-order3.exp b/pengine/test10/utilization-order3.exp index d8e0dfc057..2975e70050 100644 --- a/pengine/test10/utilization-order3.exp +++ b/pengine/test10/utilization-order3.exp @@ -1,118 +1,114 @@ - - - - - + diff --git a/pengine/test10/utilization-order3.summary b/pengine/test10/utilization-order3.summary index cfeea48924..23411ee578 100644 --- a/pengine/test10/utilization-order3.summary +++ b/pengine/test10/utilization-order3.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Stopped rsc1 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Start rsc2 (node1) * Migrate rsc1 (Started node1 -> node2) Executing cluster transition: - * Pseudo action: load_stopped_node2 * Resource action: rsc1 migrate_to on node1 + * Pseudo action: load_stopped_node2 * Resource action: rsc1 migrate_from on node2 * Resource action: rsc1 stop on node1 * Pseudo action: load_stopped_node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 * Pseudo action: rsc1_start_0 Revised cluster status: Online: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/utilization-order4.dot b/pengine/test10/utilization-order4.dot index 8f4ee501e5..7439bd9846 100644 --- a/pengine/test10/utilization-order4.dot +++ b/pengine/test10/utilization-order4.dot @@ -1,54 +1,53 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "clone-nfs_stop_0" -> "clone-nfs_stopped_0" [ style = bold] "clone-nfs_stop_0" -> "grp-nfs:1_stop_0" [ style = bold] "clone-nfs_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-nfs_stopped_0" -> "clone-ping_stop_0" [ style = bold] "clone-nfs_stopped_0" [ style=bold color="green" fontcolor="orange" ] "clone-ping_stop_0" -> "clone-ping_stopped_0" [ style = bold] "clone-ping_stop_0" -> "prim-ping:0_stop_0 deglxen002" [ style = bold] "clone-ping_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-ping_stopped_0" [ style=bold color="green" fontcolor="orange" ] "degllx61-vm_stop_0 deglxen001" -> "all_stopped" [ style = bold] "degllx61-vm_stop_0 deglxen001" -> "clone-nfs_stop_0" [ style = bold] "degllx61-vm_stop_0 deglxen001" -> "load_stopped_deglxen001 deglxen001" [ style = bold] "degllx61-vm_stop_0 deglxen001" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_migrate_from_0 deglxen001" -> "degllx62-vm_stop_0 deglxen002" [ style = bold] "degllx62-vm_migrate_from_0 deglxen001" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_migrate_to_0 deglxen002" -> "degllx62-vm_migrate_from_0 deglxen001" [ style = bold] "degllx62-vm_migrate_to_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_monitor_30000 deglxen001" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_start_0 deglxen001" -> "degllx62-vm_monitor_30000 deglxen001" [ style = bold] "degllx62-vm_start_0 deglxen001" [ style=bold color="green" fontcolor="orange" ] "degllx62-vm_stop_0 deglxen002" -> "all_stopped" [ style = bold] "degllx62-vm_stop_0 deglxen002" -> "clone-nfs_stop_0" [ style = bold] "degllx62-vm_stop_0 deglxen002" -> "degllx62-vm_start_0 deglxen001" [ style = bold] "degllx62-vm_stop_0 deglxen002" -> "load_stopped_deglxen002 deglxen002" [ style = bold] "degllx62-vm_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "grp-nfs:1_stop_0" -> "grp-nfs:1_stopped_0" [ style = bold] "grp-nfs:1_stop_0" -> "nfs-xen_config:1_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stop_0" -> "nfs-xen_images:1_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stop_0" -> "nfs-xen_swapfiles:1_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stop_0" [ style=bold color="green" fontcolor="orange" ] "grp-nfs:1_stopped_0" -> "clone-nfs_stopped_0" [ style = bold] "grp-nfs:1_stopped_0" -> "prim-ping:0_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stopped_0" [ style=bold color="green" fontcolor="orange" ] -"load_stopped_deglxen001 deglxen001" -> "degllx62-vm_migrate_to_0 deglxen002" [ style = bold] "load_stopped_deglxen001 deglxen001" -> "degllx62-vm_start_0 deglxen001" [ style = bold] "load_stopped_deglxen001 deglxen001" [ style=bold color="green" fontcolor="orange" ] "load_stopped_deglxen002 deglxen002" [ style=bold color="green" fontcolor="orange" ] "nfs-xen_config:1_stop_0 deglxen002" -> "all_stopped" [ style = bold] "nfs-xen_config:1_stop_0 deglxen002" -> "grp-nfs:1_stopped_0" [ style = bold] "nfs-xen_config:1_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "nfs-xen_images:1_stop_0 deglxen002" -> "all_stopped" [ style = bold] "nfs-xen_images:1_stop_0 deglxen002" -> "grp-nfs:1_stopped_0" [ style = bold] "nfs-xen_images:1_stop_0 deglxen002" -> "nfs-xen_swapfiles:1_stop_0 deglxen002" [ style = bold] "nfs-xen_images:1_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "nfs-xen_swapfiles:1_stop_0 deglxen002" -> "all_stopped" [ style = bold] "nfs-xen_swapfiles:1_stop_0 deglxen002" -> "grp-nfs:1_stopped_0" [ style = bold] "nfs-xen_swapfiles:1_stop_0 deglxen002" -> "nfs-xen_config:1_stop_0 deglxen002" [ style = bold] "nfs-xen_swapfiles:1_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "prim-ping:0_stop_0 deglxen002" -> "all_stopped" [ style = bold] "prim-ping:0_stop_0 deglxen002" -> "clone-ping_stopped_0" [ style = bold] "prim-ping:0_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/utilization-order4.exp b/pengine/test10/utilization-order4.exp index 8a3557ecf8..078b5733de 100644 --- a/pengine/test10/utilization-order4.exp +++ b/pengine/test10/utilization-order4.exp @@ -1,281 +1,277 @@ - - - - - + diff --git a/pengine/test10/utilization-order4.summary b/pengine/test10/utilization-order4.summary index 469bd4bf99..22a9610507 100644 --- a/pengine/test10/utilization-order4.summary +++ b/pengine/test10/utilization-order4.summary @@ -1,60 +1,60 @@ Current cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen002 degllx63-vm (ocf::heartbeat:Xen): Stopped degllx61-vm (ocf::heartbeat:Xen): Started deglxen001 degllx64-vm (ocf::heartbeat:Xen): Stopped stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 deglxen002 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 deglxen002 ] Transition Summary: * Migrate degllx62-vm (Started deglxen002 -> deglxen001) * Stop degllx61-vm (deglxen001) * Stop nfs-xen_config:1 (deglxen002) * Stop nfs-xen_swapfiles:1 (deglxen002) * Stop nfs-xen_images:1 (deglxen002) * Stop prim-ping:1 (deglxen002) Executing cluster transition: + * Resource action: degllx62-vm migrate_to on deglxen002 * Resource action: degllx61-vm stop on deglxen001 * Pseudo action: load_stopped_deglxen001 - * Resource action: degllx62-vm migrate_to on deglxen002 * Resource action: degllx62-vm migrate_from on deglxen001 * Resource action: degllx62-vm stop on deglxen002 * Pseudo action: clone-nfs_stop_0 * Pseudo action: load_stopped_deglxen002 * Pseudo action: degllx62-vm_start_0 * Pseudo action: grp-nfs:1_stop_0 * Resource action: nfs-xen_images:1 stop on deglxen002 * Resource action: degllx62-vm monitor=30000 on deglxen001 * Resource action: nfs-xen_swapfiles:1 stop on deglxen002 * Resource action: nfs-xen_config:1 stop on deglxen002 * Pseudo action: grp-nfs:1_stopped_0 * Pseudo action: clone-nfs_stopped_0 * Pseudo action: clone-ping_stop_0 * Resource action: prim-ping:0 stop on deglxen002 * Pseudo action: clone-ping_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen001 degllx63-vm (ocf::heartbeat:Xen): Stopped degllx61-vm (ocf::heartbeat:Xen): Stopped deglxen002 degllx64-vm (ocf::heartbeat:Xen): Stopped stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 ] Stopped: [ grp-nfs:1 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 ] Stopped: [ prim-ping:1 ]