diff --git a/pengine/native.c b/pengine/native.c index 89ff52b51a..e79bca0f4e 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,2158 +1,2172 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #define DELETE_THEN_REFRESH 1 /* The crmd will remove the resource from the CIB itself, making this redundant */ #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); void pe_post_notify( resource_t *rsc, node_t *node, action_t *op, notify_data_t *n_data, pe_working_set_t *data_set); void NoRoleChange (resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set); gboolean DeleteRsc (resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set); gboolean StopRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean StartRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean DemoteRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean RoleError (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean NullOp (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, RoleError, NullOp, PromoteRsc, }, /* Master */ { RoleError, RoleError, RoleError, DemoteRsc, NullOp, }, }; static gboolean native_choose_node(resource_t *rsc) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = g_list_length(rsc->allowed_nodes); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to?TRUE:FALSE; } crm_debug_3("Choosing node for %s from %d candidates", rsc->id, length); if(rsc->allowed_nodes) { rsc->allowed_nodes = g_list_sort(rsc->allowed_nodes, sort_node_weight); nodes = rsc->allowed_nodes; chosen = g_list_nth_data(nodes, 0); if(chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if(can_run_resources(running) == FALSE) { running = NULL; } for(lpc = 1; lpc < length; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if(tmp->weight == chosen->weight) { multiple++; if(running && tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if(multiple > 1) { int log_level = LOG_INFO; char *score = score2char(chosen->weight); if(chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); crm_free(score); } return native_assign_node(rsc, nodes, chosen, FALSE); } int node_list_attr_score(GListPtr list, const char *attr, const char *value) { int best_score = -INFINITY; const char *best_node = NULL; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } slist_iter(node, node_t, list, lpc, int weight = node->weight; if(can_run_resources(node) == FALSE) { weight = -INFINITY; } if(weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if(safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } ); if(safe_str_neq(attr, "#"XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node?best_node:"", best_score); } return best_score; } static void node_list_update(GListPtr list1, GListPtr list2, const char *attr, int factor) { int score = 0; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } slist_iter( node, node_t, list1, lpc, CRM_CHECK(node != NULL, continue); score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); if(factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO: Decide if we want to filter only if weight == -INFINITY * */ continue; } crm_debug_2("%s: %d + %d*%d", node->details->uname, node->weight, factor, score); node->weight = merge_weights(factor*score, node->weight); ); } GListPtr native_merge_weights( resource_t *rsc, const char *rhs, GListPtr nodes, const char *attr, int factor, gboolean allow_rollback) { GListPtr archive = NULL; int multiplier = 1; if(factor < 0) { multiplier = -1; } if(is_set(rsc->flags, pe_rsc_merging)) { crm_info("%s: Breaking dependancy loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); crm_debug_2("%s: Combining scores from %s", rhs, rsc->id); if(allow_rollback) { archive = node_list_dup(nodes, FALSE, FALSE); } node_list_update(nodes, rsc->allowed_nodes, attr, factor); if(can_run_any(nodes) == FALSE) { if(archive) { crm_info("%s: Rolling back scores from %s", rhs, rsc->id); pe_free_shallow_adv(nodes, TRUE); nodes = archive; } goto bail; } pe_free_shallow_adv(archive, TRUE); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, crm_info("%s: Rolling back scores from %s", rhs, rsc->id); nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rhs, nodes, constraint->node_attribute, multiplier*constraint->score/INFINITY, allow_rollback); ); bail: clear_bit(rsc->flags, pe_rsc_merging); return nodes; } node_t * native_color(resource_t *rsc, pe_working_set_t *data_set) { int alloc_details = scores_log_level+1; if(rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ crm_debug("Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->color(rsc->parent, data_set); } if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependancy loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-allloc", rsc->allowed_nodes); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *rsc_rh = constraint->rsc_rh; crm_debug_2("%s: Pre-Processing %s (%s)", rsc->id, constraint->id, rsc_rh->id); rsc_rh->cmds->color(rsc_rh, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); ); dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE); ); print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if(rsc->next_role == RSC_ROLE_STOPPED) { crm_debug_2("Making sure %s doesn't get allocated", rsc->id); /* make sure it doesnt come up again */ resource_location( rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } dump_node_scores(show_scores?0:scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); if(is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if(is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; if(rsc->running_on == NULL) { reason = "inactive"; } else if(rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if(is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } crm_info("Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to?assign_to->details->uname:"'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if(is_set(data_set->flags, pe_flag_stop_everything)) { crm_debug("Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if(is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc) ) { crm_debug_3("Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if(rsc->allocated_to == NULL) { if(is_not_set(rsc->flags, pe_rsc_orphan)) { pe_warn("Resource %s cannot run anywhere", rsc->id); } else if(rsc->running_on != NULL) { crm_info("Stopping orphan resource %s", rsc->id); } } else { crm_debug("Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); return rsc->allocated_to; } static gboolean is_op_dup( resource_t *rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xml_child_iter_filter( rsc->ops_xml, operation, "op", value = crm_element_value(operation, "name"); if(safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if(value == NULL) { value = "0"; } if(safe_str_neq(value, interval)) { continue; } if(id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } ); return dup; } void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; crm_debug_2("Creating recurring action %s for %s in role %s", ID(operation), rsc->id, role2text(rsc->next_role)); if(node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if(interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if(is_op_dup(rsc, name, interval)) { return; } key = generate_op_key(rsc->id, name, interval_ms); if(find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ return; } if(start != NULL) { crm_debug_3("Marking %s %s due to %s", key, start->optional?"optional":"manditory", start->uuid); is_optional = start->optional; } else { crm_debug_2("Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if(possible_matches == NULL) { is_optional = FALSE; crm_debug_3("Marking %s manditory: not active", key); } else { g_list_free(possible_matches); } value = crm_element_value(operation, "role"); if((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if(is_optional) { char *local_key = crm_strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* its running : cancel it */ mon = custom_action( rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); crm_free(mon->task); mon->task = crm_strdup(RSC_CANCEL); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch(rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if(rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if(rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if(local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result , key, value?value:role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); crm_free(key); key = NULL; return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if(is_optional) { crm_debug_2("%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if(start == NULL || start->runnable == FALSE) { crm_debug("%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); mon->runnable = FALSE; } else if(node == NULL || node->details->online == FALSE || node->details->unclean) { crm_debug("%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); mon->runnable = FALSE; } else if(mon->optional == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", mon->task, interval_ms/1000, rsc->id, crm_str(node_uname)); } if(rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(EXECRA_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); crm_free(running_master); } if(node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, crm_strdup(key), mon, pe_order_implies_right|pe_order_runnable_left, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { custom_action_order( rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } else if(rsc->role == RSC_ROLE_MASTER) { custom_action_order( rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } } } void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set) { if(is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xml_child_iter_filter( rsc->ops_xml, operation, "op", RecurringOp(rsc, start, node, operation, data_set); ); } } void native_create_actions(resource_t *rsc, pe_working_set_t *data_set) { action_t *start = NULL; node_t *chosen = NULL; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; crm_debug_2("Createing actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); chosen = rsc->allocated_to; if(chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; } else if(rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; } get_rsc_attributes(rsc->parameters, rsc, chosen, data_set); if(g_list_length(rsc->running_on) > 1) { if(rsc->recovery_type == recovery_stop_start) { pe_proc_warn("Attempting recovery of resource %s", rsc->id); if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, NULL, FALSE, data_set); } StopRsc(rsc, NULL, FALSE, data_set); rsc->role = RSC_ROLE_STOPPED; } } else if(rsc->running_on != NULL) { node_t *current = rsc->running_on->data; NoRoleChange(rsc, current, chosen, data_set); } else if(rsc->role == RSC_ROLE_STOPPED && rsc->next_role == RSC_ROLE_STOPPED) { char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, NULL); slist_iter( action, action_t, possible_matches, lpc, action->optional = TRUE; /* action->pseudo = TRUE; */ ); g_list_free(possible_matches); crm_debug_2("Stopping a stopped resource"); crm_free(key); goto do_recurring; } else if(rsc->role != RSC_ROLE_STOPPED) { /* A cheap trick to account for the fact that Master/Slave groups may not be * completely running when we set their role to Slave */ crm_debug_2("Resetting %s.role = %s (was %s)", rsc->id, role2text(RSC_ROLE_STOPPED), role2text(rsc->role)); rsc->role = RSC_ROLE_STOPPED; } role = rsc->role; while(role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; crm_debug_2("Executing: %s->%s (%s)", role2text(role), role2text(next_role), rsc->id); if(rsc_action_matrix[role][next_role]( rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } do_recurring: if(rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); } } void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { int type = pe_order_optional; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); if(rsc->variant == pe_native) { type |= pe_order_implies_right; } if(rsc->parent == NULL || rsc->parent->variant == pe_group) { type |= pe_order_restart; } new_rsc_order(rsc, RSC_STOP, rsc, RSC_START, type, data_set); new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_STOP, pe_order_demote_stop, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, pe_order_optional, data_set); if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(rsc->variant == pe_native && safe_str_neq(class, "stonith")) { custom_action_order( rsc, stop_key(rsc), NULL, NULL, crm_strdup(all_stopped->task), all_stopped, pe_order_implies_right|pe_order_runnable_left, data_set); } } void native_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { if(rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if(constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } crm_debug_2("Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } static gboolean filter_colocation_constraint( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { int level = LOG_DEBUG_4; if(constraint->score == 0){ return FALSE; } if(constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } return TRUE; } static void colocation_match( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { const char *tmp = NULL; const char *value = NULL; - gboolean do_check = FALSE; const char *attribute = "#id"; + GListPtr archive = NULL; + gboolean do_check = FALSE; + if(constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if(rsc_rh->allocated_to) { value = g_hash_table_lookup( rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if(constraint->score < 0) { /* nothing to do: * anti-colocation with something thats not running */ return; } + + if(constraint->score > -INFINITY && constraint->score < INFINITY) { + archive = node_list_dup(rsc_lh->allowed_nodes, FALSE, FALSE); + } slist_iter( node, node_t, rsc_lh->allowed_nodes, lpc, tmp = g_hash_table_lookup(node->details->attrs, attribute); if(do_check && safe_str_eq(tmp, value)) { if(constraint->score < INFINITY) { crm_debug_2("%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights( constraint->score, node->weight); } } else if(do_check == FALSE || constraint->score >= INFINITY) { crm_debug_2("%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check?"failed":"unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } - ); + + if(can_run_any(rsc_lh->allowed_nodes) == FALSE) { + if(archive) { + crm_info("%s: Rolling back scores from %s (%d, %s)", + rsc_lh->id, rsc_rh->id, do_check, score2char(constraint->score)); + pe_free_shallow_adv(rsc_lh->allowed_nodes, TRUE); + rsc_lh->allowed_nodes = archive; + } + } } void native_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { crm_debug_2("%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0?"":"Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); if(filter_colocation_constraint(rsc_lh, rsc_rh, constraint) == FALSE) { return; } if(is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if(is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return; } details_rh = rsc_rh->allocated_to?rsc_rh->allocated_to->details:NULL; details_lh = rsc_lh->allocated_to?rsc_lh->allocated_to->details:NULL; if(constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh?details_lh->uname:"n/a", details_rh?details_rh->uname:"n/a"); } else if(constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh?details_rh->uname:"n/a"); } return; } else { colocation_match(rsc_lh, rsc_rh, constraint); } } static GListPtr find_actions_by_task(GListPtr actions, resource_t *rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if(list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *tmp = NULL; char *task = NULL; int interval = 0; if(parse_op_key(original_key, &tmp, &task, &interval)) { key = generate_op_key(rsc->id, task, interval); /* crm_err("looking up %s instead of %s", key, original_key); */ /* slist_iter(action, action_t, actions, lpc, */ /* crm_err(" - %s", action->uuid)); */ list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } crm_free(key); crm_free(tmp); crm_free(task); } return list; } void native_rsc_order_lh(resource_t *lh_rsc, order_constraint_t *order, pe_working_set_t *data_set) { GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_debug_3("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if(lh_action != NULL) { lh_actions = g_list_append(NULL, lh_action); } else if(lh_action == NULL) { lh_actions = find_actions_by_task( lh_rsc->actions, lh_rsc, order->lh_action_task); } if(lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *rsc_id = NULL; char *op_type = NULL; int interval = 0; crm_debug_4("No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); parse_op_key( order->lh_action_task, &rsc_id, &op_type, &interval); key = generate_op_key(lh_rsc->id, op_type, interval); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); if(lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { lh_action->pseudo = TRUE; lh_action->runnable = TRUE; } lh_actions = g_list_append(NULL, lh_action); crm_free(op_type); crm_free(rsc_id); } slist_iter( lh_action_iter, action_t, lh_actions, lpc, if(rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if(rh_rsc) { rh_rsc->cmds->rsc_order_rh( lh_action_iter, rh_rsc, order); } else if(order->rh_action) { order_actions( lh_action_iter, order->rh_action, order->type); } ); pe_free_shallow_adv(lh_actions, FALSE); } void native_rsc_order_rh( action_t *lh_action, resource_t *rsc, order_constraint_t *order) { GListPtr rh_actions = NULL; action_t *rh_action = NULL; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); rh_action = order->rh_action; crm_debug_3("Processing RH of ordering constraint %d", order->id); if(rh_action != NULL) { rh_actions = g_list_append(NULL, rh_action); } else if(rsc != NULL) { rh_actions = find_actions_by_task( rsc->actions, rsc, order->rh_action_task); } if(rh_actions == NULL) { crm_debug_4("No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id,order->rh_action_task); if(lh_action) { crm_debug_4("LH-Side was: %s", lh_action->uuid); } return; } slist_iter( rh_action_iter, action_t, rh_actions, lpc, if(lh_action) { order_actions(lh_action, rh_action_iter, order->type); } else if(order->type & pe_order_implies_right) { rh_action_iter->runnable = FALSE; crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, order->type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, order->type); } ); pe_free_shallow_adv(rh_actions, FALSE); } void native_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { GListPtr or_list; crm_debug_2("Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if(constraint == NULL) { pe_err("Constraint is NULL"); return; } else if(rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } else if(constraint->role_filter > 0 && constraint->role_filter != rsc->next_role) { crm_debug("Constraint (%s) is not active (role : %s)", constraint->id, role2text(constraint->role_filter)); return; } else if(is_active(constraint) == FALSE) { crm_debug_2("Constraint (%s) is not active", constraint->id); return; } if(constraint->node_list_rh == NULL) { crm_debug_2("RHS of constraint %s is NULL", constraint->id); return; } or_list = node_list_or( rsc->allowed_nodes, constraint->node_list_rh, FALSE); pe_free_shallow(rsc->allowed_nodes); rsc->allowed_nodes = or_list; slist_iter(node, node_t, or_list, lpc, crm_debug_3("%s + %s : %d", rsc->id, node->details->uname, node->weight); ); } void native_expand(resource_t *rsc, pe_working_set_t *data_set) { crm_debug_3("Processing actions from %s", rsc->id); slist_iter( action, action_t, rsc->actions, lpc, crm_debug_4("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); ); slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->expand(child_rsc, data_set); ); } void LogActions(resource_t *rsc, pe_working_set_t *data_set) { node_t *next = NULL; node_t *current = NULL; gboolean moving = FALSE; if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, LogActions(child_rsc, data_set); ); return; } next = rsc->allocated_to; if(rsc->running_on) { current = rsc->running_on->data; if(rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if(current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if(is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { crm_notice("Leave resource %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed)?" unmanaged":""); return; } if(current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } if(rsc->role == rsc->next_role) { action_t *start = NULL; char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, next); crm_free(key); if(possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = generate_op_key(rsc->id, CRMD_ACTION_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); crm_free(key); CRM_CHECK(next != NULL,); if(next == NULL) { } else if(possible_matches) { crm_notice("Migrate resource %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); g_list_free(possible_matches); } else if(start == NULL || start->optional) { crm_notice("Leave resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(moving && current) { crm_notice("Move resource %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Recover resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(start && start->runnable == FALSE) { crm_notice("Stop resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { crm_notice("Restart resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } return; } if(rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if(current != NULL) { crm_notice("Demote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname); } } if(rsc->next_role == RSC_ROLE_STOPPED || moving) { CRM_CHECK(current != NULL,); slist_iter(node, node_t, rsc->running_on, lpc, crm_notice("Stop resource %s\t(%s)", rsc->id, node->details->uname)); } if(rsc->role == RSC_ROLE_STOPPED || moving) { CRM_CHECK(next != NULL,); if(next != NULL) { crm_notice("Start %s\t(%s)", rsc->id, next->details->uname); } } if(rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { CRM_CHECK(next != NULL,); crm_notice("Promote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next->details->uname); } } void NoRoleChange(resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set) { action_t *stop = NULL; action_t *start = NULL; GListPtr possible_matches = NULL; crm_debug_2("Executing: %s (role=%s)", rsc->id, role2text(rsc->next_role)); if(current == NULL || next == NULL) { return; } if(is_set(rsc->flags, pe_rsc_failed) || safe_str_neq(current->details->id, next->details->id)) { if(rsc->next_role > RSC_ROLE_STARTED) { gboolean optional = TRUE; if(rsc->role == RSC_ROLE_MASTER) { optional = FALSE; } DemoteRsc(rsc, current, optional, data_set); } if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, current, FALSE, data_set); } StopRsc(rsc, current, FALSE, data_set); StartRsc(rsc, next, FALSE, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, FALSE, data_set); } possible_matches = find_recurring_actions(rsc->actions, next); slist_iter(match, action_t, possible_matches, lpc, if(match->optional == FALSE) { crm_debug("Fixing recurring action: %s", match->uuid); match->optional = TRUE; } ); g_list_free(possible_matches); } else if(is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, next, TRUE); if(start->runnable) { /* wait for StartRsc() to be called */ rsc->role = RSC_ROLE_STOPPED; } else { /* wait for StopRsc() to be called */ rsc->next_role = RSC_ROLE_STOPPED; } } else { stop = stop_action(rsc, current, TRUE); start = start_action(rsc, next, TRUE); stop->optional = start->optional; if(rsc->next_role > RSC_ROLE_STARTED) { DemoteRsc(rsc, current, start->optional, data_set); } StopRsc(rsc, current, start->optional, data_set); StartRsc(rsc, current, start->optional, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, start->optional, data_set); } if(start->runnable == FALSE) { rsc->next_role = RSC_ROLE_STOPPED; } } } gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *stop = NULL; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); crm_debug_2("Executing: %s", rsc->id); if(rsc->next_role == RSC_ROLE_STOPPED && rsc->variant == pe_native && safe_str_eq(class, "stonith")) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order( NULL, crm_strdup(all_stopped->task), all_stopped, rsc, stop_key(rsc), NULL, pe_order_implies_left|pe_order_stonith_stop, data_set); } slist_iter( current, node_t, rsc->running_on, lpc, stop = stop_action(rsc, current, optional); if(is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } ); return TRUE; } gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *start = NULL; crm_debug_2("Executing: %s", rsc->id); start = start_action(rsc, next, TRUE); if(start->runnable && optional == FALSE) { start->optional = FALSE; } return TRUE; } gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { char *key = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; crm_debug_2("Executing: %s", rsc->id); CRM_CHECK(rsc->next_role == RSC_ROLE_MASTER, crm_err("Next role: %s", role2text(rsc->next_role)); return FALSE); CRM_CHECK(next != NULL, return FALSE); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); slist_iter(start, action_t, action_list, lpc, if(start->runnable == FALSE) { runnable = FALSE; } ); g_list_free(action_list); if(runnable) { promote_action(rsc, next, optional); return TRUE; } crm_debug("%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); slist_iter(promote, action_t, action_list, lpc, promote->runnable = FALSE; ); g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ slist_iter( current, node_t, rsc->running_on, lpc, demote_action(rsc, current, optional); ); return TRUE; } gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug("Executing: %s", rsc->id); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set) { action_t *delete = NULL; #if DELETE_THEN_REFRESH action_t *refresh = NULL; #endif if(is_set(rsc->flags, pe_rsc_failed)) { crm_debug_2("Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if(node == NULL) { crm_debug_2("Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if(node->details->unclean || node->details->online == FALSE) { crm_debug_2("Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete = delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional?pe_order_implies_right:pe_order_implies_left, data_set); #if DELETE_THEN_REFRESH refresh = custom_action( NULL, crm_strdup(CRM_OP_LRM_REFRESH), CRM_OP_LRM_REFRESH, node, FALSE, TRUE, data_set); add_hash_param(refresh->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); order_actions(delete, refresh, pe_order_optional); #endif return TRUE; } #include <../lib/pengine/unpack.h> gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set) { char *key = NULL; char *target_rc = NULL; action_t *probe = NULL; node_t *running = NULL; resource_t *top = uber_parent(rsc); CRM_CHECK(node != NULL, return FALSE); if(rsc->children) { gboolean any_created = FALSE; slist_iter( child_rsc, resource_t, rsc->children, lpc, any_created = child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set) || any_created; ); return any_created; } if(is_set(rsc->flags, pe_rsc_orphan)) { crm_debug_2("Skipping orphan: %s", rsc->id); return FALSE; } running = pe_find_node_id(rsc->known_on, node->details->id); if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active: %s", rsc->id); return FALSE; } if(running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while(peer && running == NULL) { running = pe_find_node_id(peer->known_on, node->details->id); if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active clone: %s", rsc->id); crm_free(clone_id); return FALSE; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } crm_free(clone_id); } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); probe->optional = FALSE; running = pe_find_node_id(rsc->running_on, node->details->id); if(running == NULL) { target_rc = crm_itoa(EXECRA_NOT_RUNNING); } else if(rsc->role == RSC_ROLE_MASTER) { target_rc = crm_itoa(EXECRA_RUNNING_MASTER); } if(target_rc != NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, target_rc); crm_free(target_rc); } crm_debug("Probing %s on %s (%s)", rsc->id, node->details->uname, role2text(rsc->role)); order_actions(probe, complete, pe_order_implies_right); return TRUE; } static void native_start_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { node_t *target = stonith_op?stonith_op->node:NULL; if(is_stonith) { char *key = start_key(rsc); action_t *ready = get_pseudo_op(STONITH_UP, data_set); crm_debug_2("Ordering %s action before stonith events", key); custom_action_order( rsc, key, NULL, NULL, crm_strdup(ready->task), ready, pe_order_optional, data_set); } else { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); slist_iter(action, action_t, rsc->actions, lpc2, if(action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_implies_left); } else if(target != NULL && safe_str_eq(action->task, RSC_START) && NULL == pe_find_node_id( rsc->known_on, target->details->id)) { /* if known == NULL, then we dont know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * its analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explaination is that the * DC died and took its status with it */ crm_info("Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_implies_left|pe_order_runnable_left); } ); } } static void native_stop_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { char *key = NULL; GListPtr action_list = NULL; resource_t *top = uber_parent(rsc); key = stop_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ slist_iter( action, action_t, action_list, lpc2, resource_t *parent = NULL; if(action->node->details->online && action->node->details->unclean == FALSE && is_set(rsc->flags, pe_rsc_failed)) { continue; } if(is_set(rsc->flags, pe_rsc_failed)) { crm_warn("Stop of failed resource %s is" " implicit after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ action->pseudo = TRUE; action->runnable = TRUE; action->implied_by_stonith = TRUE; if(is_stonith == FALSE) { action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); order_actions(stonith_op, action, pe_order_optional); order_actions(stonith_op, parent_stop, pe_order_optional); } if(is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ notify_data_t *n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); crm_info("Creating secondary notification for %s", action->uuid); collect_notification_data(rsc, TRUE, FALSE, n_data); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_resource"), crm_strdup(rsc->id)); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_uname"), crm_strdup(action->node->details->uname)); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } /* find the top-most resource */ parent = rsc->parent; while(parent != NULL && parent->parent != NULL) { parent = parent->parent; } if(parent) { crm_debug_2("Re-creating actions for %s", parent->id); parent->cmds->create_actions(parent, data_set); /* make sure we dont mess anything up in create_actions */ CRM_CHECK(action->pseudo, action->pseudo = TRUE); CRM_CHECK(action->runnable, action->runnable = TRUE); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependancy and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependancy in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,crm_strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ ); g_list_free(action_list); key = demote_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); slist_iter( action, action_t, action_list, lpc2, if(action->node->details->online == FALSE || is_set(rsc->flags, pe_rsc_failed)) { crm_info("Demote of failed resource %s is" " implict after %s is fenced", rsc->id, action->node->details->uname); /* the stop would never complete and is * now implied by the stonith operation */ action->pseudo = TRUE; action->runnable = TRUE; if(is_stonith == FALSE) { order_actions(stonith_op, action, pe_order_optional); } } ); g_list_free(action_list); } void complex_stonith_ordering( resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set) { gboolean is_stonith = FALSE; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->stonith_ordering( child_rsc, stonith_op, data_set); ); return; } if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(stonith_op != NULL && safe_str_eq(class, "stonith")) { is_stonith = TRUE; } /* Start constraints */ native_start_constraints(rsc, stonith_op, is_stonith, data_set); /* Stop constraints */ native_stop_constraints(rsc, stonith_op, is_stonith, data_set); } #define ALLOW_WEAK_MIGRATION 0 enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static enum stack_activity find_clone_activity_on(resource_t *rsc, resource_t *target, node_t *node, const char *type) { int mode = stack_stable; action_t *active = NULL; if(target->children) { slist_iter( child, resource_t, target->children, lpc, mode |= find_clone_activity_on(rsc, child, node, type); ); return mode; } active = find_first_action(target->actions, NULL, CRMD_ACTION_START, NULL); if(active && active->optional == FALSE && active->pseudo == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_starting; } active = find_first_action(target->actions, NULL, CRMD_ACTION_STOP, node); if(active && active->optional == FALSE && active->pseudo == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_stopping; } return mode; } static enum stack_activity check_stack_element(resource_t *rsc, resource_t *other_rsc, const char *type) { resource_t *other_p = uber_parent(other_rsc); if(other_rsc == NULL || other_rsc == rsc) { return stack_stable; } else if(other_p->variant == pe_native) { crm_notice("Cannot migrate %s due to dependancy on %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } else if(other_rsc == rsc->parent) { int mode = 0; slist_iter(constraint, rsc_colocation_t, other_rsc->rsc_cons, lpc, if(constraint->score > 0) { mode |= check_stack_element(rsc, constraint->rsc_rh, type); } ); return mode; } else if(other_p->variant == pe_group) { crm_notice("Cannot migrate %s due to dependancy on group %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } /* else: >= clone */ /* ## Assumption A depends on clone(B) ## Resource Activity During Move N1 N2 N3 --- --- --- t0 A.stop t1 B.stop B.stop t2 B.start B.start t3 A.start ## Resource Activity During Migration N1 N2 N3 --- --- --- t0 B.start B.start t1 A.stop (1) t2 A.start (2) t3 B.stop B.stop Node 1: Rewritten to be a migrate-to operation Node 2: Rewritten to be a migrate-from operation # Constraints The following constraints already exist in the system. The 'ok' and 'fail' column refers to whether they still hold for migration. a) A.stop -> A.start - ok b) B.stop -> B.start - fail c) A.stop -> B.stop - ok d) B.start -> A.start - ok e) B.stop -> A.start - fail f) A.stop -> B.start - fail ## Scenarios B unchanged - ok B stopping only - fail - possible after fixing 'e' B starting only - fail - possible after fixing 'f' B stoping and starting - fail - constraint 'b' is unfixable B restarting only on N2 - fail - as-per previous only rarer */ /* Only allow migration when the clone is either stable, only starting or only stopping */ return find_clone_activity_on(rsc, other_rsc, NULL, type); } static gboolean at_stack_bottom(resource_t *rsc) { char *key = NULL; action_t *start = NULL; action_t *other = NULL; int mode = stack_stable; GListPtr action_list = NULL; key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); crm_debug_3("%s: processing", rsc->id); CRM_CHECK(action_list != NULL, return FALSE); start = action_list->data; g_list_free(action_list); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *target = constraint->rsc_rh; crm_debug_4("Checking %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { mode |= check_stack_element(rsc, target, "coloc"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to colocation activity (last was %s)", rsc->id, target->id); return FALSE; } } ); slist_iter( other_w, action_wrapper_t, start->actions_before, lpc, other = other_w->action; #if ALLOW_WEAK_MIGRATION if((other_w->type & pe_order_implies_right) == 0) { crm_debug_3("%s: depends on %s (optional ordering)", rsc->id, other->uuid); continue; } #endif crm_debug_2("%s: Checking %s ordering", rsc->id, other->uuid); if(other->optional == FALSE) { mode |= check_stack_element(rsc, other->rsc, "order"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to ordering activity (last was %s)", rsc->id, other->rsc->id); return FALSE; } } ); return TRUE; } void complex_migrate_reload(resource_t *rsc, pe_working_set_t *data_set) { char *key = NULL; int level = LOG_DEBUG; GListPtr action_list = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *other = NULL; action_t *action = NULL; const char *value = NULL; if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->migrate_reload(child_rsc, data_set); ); other = NULL; return; } else if(rsc->variant > pe_native) { return; } do_crm_log_unlikely(level+1, "Processing %s", rsc->id); if(is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || rsc->next_role < RSC_ROLE_STARTED || g_list_length(rsc->running_on) != 1) { do_crm_log_unlikely( level+1, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); return; } value = g_hash_table_lookup(rsc->meta, XML_OP_ATTR_ALLOW_MIGRATE); if(crm_is_true(value)) { set_bit(rsc->flags, pe_rsc_can_migrate); } if(rsc->next_role > RSC_ROLE_SLAVE) { clear_bit(rsc->flags, pe_rsc_can_migrate); do_crm_log_unlikely( level+1, "%s: resource role: role=%s", rsc->id, role2text(rsc->next_role)); } key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no start action", rsc->id); return; } start = action_list->data; g_list_free(action_list); if(is_not_set(rsc->flags, pe_rsc_can_migrate) && start->allow_reload_conversion == FALSE) { do_crm_log_unlikely(level+1, "%s: no need to continue", rsc->id); return; } key = stop_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no stop action", rsc->id); return; } stop = action_list->data; g_list_free(action_list); action = start; if(action->pseudo || action->optional || action->node == NULL || action->runnable == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } action = stop; if(action->pseudo || action->optional || action->node == NULL || action->runnable == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } if(is_set(rsc->flags, pe_rsc_can_migrate)) { if(start->node == NULL || stop->node == NULL || stop->node->details == start->node->details) { clear_bit(rsc->flags, pe_rsc_can_migrate); } else if(at_stack_bottom(rsc) == FALSE) { clear_bit(rsc->flags, pe_rsc_can_migrate); } } if(is_set(rsc->flags, pe_rsc_can_migrate)) { crm_info("Migrating %s from %s to %s", rsc->id, stop->node->details->uname, start->node->details->uname); crm_free(stop->uuid); crm_free(stop->task); stop->task = crm_strdup(RSC_MIGRATE); stop->uuid = generate_op_key(rsc->id, stop->task, 0); add_hash_param(stop->meta, "migrate_source", stop->node->details->uname); add_hash_param(stop->meta, "migrate_target", start->node->details->uname); /* Create the correct ordering ajustments based on find_clone_activity_on(); */ slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *target = constraint->rsc_rh; crm_info("Repairing %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { int mode = check_stack_element(rsc, target, "coloc"); action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); CRM_ASSERT(clone_stop != NULL); CRM_ASSERT(clone_start != NULL); CRM_ASSERT((mode & stack_middle) == 0); CRM_ASSERT(((mode & stack_stopping) && (mode & stack_starting)) == 0); if(mode & stack_stopping) { action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); crm_debug("Creating %s.start -> %s.stop ordering", rsc->id, target->id); order_actions(start, clone_stop, pe_order_optional); slist_iter( other_w, action_wrapper_t, start->actions_before, lpc2, /* Needed if the clone's started pseudo-action ever gets printed in the graph */ if(other_w->action == clone_start) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, start->uuid); other_w->type = pe_order_none; } ); } else if(mode & stack_starting) { crm_debug("Creating %s.started -> %s.stop ordering", target->id, rsc->id); order_actions(clone_start, stop, pe_order_optional); slist_iter( other_w, action_wrapper_t, clone_stop->actions_before, lpc2, /* Needed if the clone's stop pseudo-action ever gets printed in the graph */ if(other_w->action == stop) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, clone_stop->uuid); other_w->type = pe_order_none; } ); } } ); crm_free(start->uuid); crm_free(start->task); start->task = crm_strdup(RSC_MIGRATED); start->uuid = generate_op_key(rsc->id, start->task, 0); add_hash_param(start->meta, "migrate_source_uuid", stop->node->details->id); add_hash_param(start->meta, "migrate_source", stop->node->details->uname); add_hash_param(start->meta, "migrate_target", start->node->details->uname); /* Anything that needed stop to complete, now also needs start to have completed */ slist_iter( other_w, action_wrapper_t, stop->actions_after, lpc, other = other_w->action; if(other->optional || other->rsc != NULL) { continue; } crm_debug("Ordering %s before %s (stop)", start->uuid, other_w->action->uuid); order_actions(start, other, other_w->type); ); /* Stop also needs anything that the start needed to have completed too */ slist_iter( other_w, action_wrapper_t, start->actions_before, lpc, other = other_w->action; if(other->rsc == NULL) { /* nothing */ } else if(other->optional || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (start)", other_w->action->uuid, stop->uuid); order_actions(other, stop, other_w->type); ); } else if(start && stop && start->allow_reload_conversion && stop->node->details == start->node->details) { action_t *rewrite = NULL; start->pseudo = TRUE; /* easier than trying to delete it from the graph */ action = NULL; key = promote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } if(action && action->optional == FALSE) { action->pseudo = TRUE; } g_list_free(action_list); crm_free(key); action = NULL; key = demote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } g_list_free(action_list); crm_free(key); if(action && action->optional == FALSE) { rewrite = action; stop->pseudo = TRUE; } else { rewrite = stop; } crm_info("Rewriting %s of %s on %s as a reload", rewrite->task, rsc->id, stop->node->details->uname); crm_free(rewrite->uuid); crm_free(rewrite->task); rewrite->task = crm_strdup("reload"); rewrite->uuid = generate_op_key(rsc->id, rewrite->task, 0); } else { do_crm_log_unlikely(level+1, "%s nothing to do", rsc->id); } } diff --git a/pengine/regression.sh b/pengine/regression.sh index 8cbaf1f8d9..b5cb9fe5e6 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,339 +1,340 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # if [ -x /usr/bin/valgrind ]; then export G_SLICE=always-malloc VALGRIND_CMD="valgrind -q --show-reachable=no --leak-check=full --trace-children=no --time-stamp=yes --num-callers=20 --suppressions=./ptest.supp" fi . regression.core.sh create_mode="true" echo Generating test outputs for these tests... # do_test file description echo Done. echo "" echo Performing the following tests... create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test date-1 "Dates" -d "2005-020" do_test date-2 "Date Spec - Pass" -d "2005-020T12:30" do_test date-3 "Date Spec - Fail" -d "2005-020T11:30" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" --rc 5 do_test standby "Standby" do_test comments "Comments" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" #echo "" #do_test complex1 "Complex " echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594" do_test 662 "OSDL #662" do_test 696 "OSDL #696" do_test 726 "OSDL #726" do_test 735 "OSDL #735" do_test 764 "OSDL #764" do_test 797 "OSDL #797" do_test 829 "OSDL #829" do_test 994 "OSDL #994" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" +do_test colocate-primitive-with-clone "Optional colocation with a clone" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" test_results diff --git a/pengine/test10/colocate-primitive-with-clone.dot b/pengine/test10/colocate-primitive-with-clone.dot new file mode 100644 index 0000000000..d05ba80bc3 --- /dev/null +++ b/pengine/test10/colocate-primitive-with-clone.dot @@ -0,0 +1,26 @@ +digraph "g" { +"UMgroup01_running_0" [ style=bold color="green" fontcolor="orange" ] +"UMgroup01_start_0" -> "UMgroup01_running_0" [ style = bold] +"UMgroup01_start_0" -> "UmDummy01_start_0 srv04" [ style = bold] +"UMgroup01_start_0" -> "UmDummy02_start_0 srv04" [ style = bold] +"UMgroup01_start_0" -> "UmIPaddr_start_0 srv04" [ style = bold] +"UMgroup01_start_0" -> "UmVIPcheck_start_0 srv04" [ style = bold] +"UMgroup01_start_0" [ style=bold color="green" fontcolor="orange" ] +"UmDummy01_monitor_10000 srv04" [ style=bold color="green" fontcolor="black" ] +"UmDummy01_start_0 srv04" -> "UMgroup01_running_0" [ style = bold] +"UmDummy01_start_0 srv04" -> "UmDummy01_monitor_10000 srv04" [ style = bold] +"UmDummy01_start_0 srv04" -> "UmDummy02_start_0 srv04" [ style = bold] +"UmDummy01_start_0 srv04" [ style=bold color="green" fontcolor="black" ] +"UmDummy02_monitor_10000 srv04" [ style=bold color="green" fontcolor="black" ] +"UmDummy02_start_0 srv04" -> "UMgroup01_running_0" [ style = bold] +"UmDummy02_start_0 srv04" -> "UmDummy02_monitor_10000 srv04" [ style = bold] +"UmDummy02_start_0 srv04" [ style=bold color="green" fontcolor="black" ] +"UmIPaddr_monitor_10000 srv04" [ style=bold color="green" fontcolor="black" ] +"UmIPaddr_start_0 srv04" -> "UMgroup01_running_0" [ style = bold] +"UmIPaddr_start_0 srv04" -> "UmDummy01_start_0 srv04" [ style = bold] +"UmIPaddr_start_0 srv04" -> "UmIPaddr_monitor_10000 srv04" [ style = bold] +"UmIPaddr_start_0 srv04" [ style=bold color="green" fontcolor="black" ] +"UmVIPcheck_start_0 srv04" -> "UMgroup01_running_0" [ style = bold] +"UmVIPcheck_start_0 srv04" -> "UmIPaddr_start_0 srv04" [ style = bold] +"UmVIPcheck_start_0 srv04" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/colocate-primitive-with-clone.exp b/pengine/test10/colocate-primitive-with-clone.exp new file mode 100644 index 0000000000..dc1bfa1d64 --- /dev/null +++ b/pengine/test10/colocate-primitive-with-clone.exp @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/colocate-primitive-with-clone.scores b/pengine/test10/colocate-primitive-with-clone.scores new file mode 100644 index 0000000000..2a0e528c41 --- /dev/null +++ b/pengine/test10/colocate-primitive-with-clone.scores @@ -0,0 +1,453 @@ +Allocation scores: +group_color: UMgroup01 allocation score on srv01: -1000000 +group_color: UMgroup01 allocation score on srv02: -1000000 +group_color: UMgroup01 allocation score on srv03: -1000000 +group_color: UMgroup01 allocation score on srv04: 100 +group_color: UmVIPcheck allocation score on srv01: -1000000 +group_color: UmVIPcheck allocation score on srv02: -1000000 +group_color: UmVIPcheck allocation score on srv03: -1000000 +group_color: UmVIPcheck allocation score on srv04: 100 +group_color: UmIPaddr allocation score on srv01: 0 +group_color: UmIPaddr allocation score on srv02: 0 +group_color: UmIPaddr allocation score on srv03: 0 +group_color: UmIPaddr allocation score on srv04: 0 +group_color: UmDummy01 allocation score on srv01: 0 +group_color: UmDummy01 allocation score on srv02: 0 +group_color: UmDummy01 allocation score on srv03: 0 +group_color: UmDummy01 allocation score on srv04: 0 +group_color: UmDummy02 allocation score on srv01: 0 +group_color: UmDummy02 allocation score on srv02: 0 +group_color: UmDummy02 allocation score on srv03: 0 +group_color: UmDummy02 allocation score on srv04: 0 +clone_color: clnDiskd1 allocation score on srv01: -1000000 +clone_color: clnDiskd1 allocation score on srv02: 500 +clone_color: clnDiskd1 allocation score on srv03: 500 +clone_color: clnDiskd1 allocation score on srv04: 500 +clone_color: clnPrmDiskd1:0 allocation score on srv01: 0 +clone_color: clnPrmDiskd1:0 allocation score on srv02: 0 +clone_color: clnPrmDiskd1:0 allocation score on srv03: 0 +clone_color: clnPrmDiskd1:0 allocation score on srv04: 0 +clone_color: clnPrmDiskd1:1 allocation score on srv01: 0 +clone_color: clnPrmDiskd1:1 allocation score on srv02: 100 +clone_color: clnPrmDiskd1:1 allocation score on srv03: 0 +clone_color: clnPrmDiskd1:1 allocation score on srv04: 0 +clone_color: clnPrmDiskd1:2 allocation score on srv01: 0 +clone_color: clnPrmDiskd1:2 allocation score on srv02: 0 +clone_color: clnPrmDiskd1:2 allocation score on srv03: 100 +clone_color: clnPrmDiskd1:2 allocation score on srv04: 0 +clone_color: clnPrmDiskd1:3 allocation score on srv01: 0 +clone_color: clnPrmDiskd1:3 allocation score on srv02: 0 +clone_color: clnPrmDiskd1:3 allocation score on srv03: 0 +clone_color: clnPrmDiskd1:3 allocation score on srv04: 100 +native_color: clnPrmDiskd1:1 allocation score on srv01: -1000000 +native_color: clnPrmDiskd1:1 allocation score on srv02: 100 +native_color: clnPrmDiskd1:1 allocation score on srv03: 0 +native_color: clnPrmDiskd1:1 allocation score on srv04: 0 +native_color: clnPrmDiskd1:2 allocation score on srv01: -1000000 +native_color: clnPrmDiskd1:2 allocation score on srv02: -1000000 +native_color: clnPrmDiskd1:2 allocation score on srv03: 100 +native_color: clnPrmDiskd1:2 allocation score on srv04: 0 +native_color: clnPrmDiskd1:3 allocation score on srv01: -1000000 +native_color: clnPrmDiskd1:3 allocation score on srv02: -1000000 +native_color: clnPrmDiskd1:3 allocation score on srv03: -1000000 +native_color: clnPrmDiskd1:3 allocation score on srv04: 100 +native_color: clnPrmDiskd1:0 allocation score on srv01: -1000000 +native_color: clnPrmDiskd1:0 allocation score on srv02: -1000000 +native_color: clnPrmDiskd1:0 allocation score on srv03: -1000000 +native_color: clnPrmDiskd1:0 allocation score on srv04: -1000000 +clone_color: clnG3dummy1 allocation score on srv01: -1000000 +clone_color: clnG3dummy1 allocation score on srv02: 500 +clone_color: clnG3dummy1 allocation score on srv03: 500 +clone_color: clnG3dummy1 allocation score on srv04: 500 +clone_color: clnG3dummy01:0 allocation score on srv01: 0 +clone_color: clnG3dummy01:0 allocation score on srv02: 0 +clone_color: clnG3dummy01:0 allocation score on srv03: 0 +clone_color: clnG3dummy01:0 allocation score on srv04: 0 +clone_color: clnG3dummy01:1 allocation score on srv01: 0 +clone_color: clnG3dummy01:1 allocation score on srv02: 100 +clone_color: clnG3dummy01:1 allocation score on srv03: 0 +clone_color: clnG3dummy01:1 allocation score on srv04: 0 +clone_color: clnG3dummy01:2 allocation score on srv01: 0 +clone_color: clnG3dummy01:2 allocation score on srv02: 0 +clone_color: clnG3dummy01:2 allocation score on srv03: 100 +clone_color: clnG3dummy01:2 allocation score on srv04: 0 +clone_color: clnG3dummy01:3 allocation score on srv01: 0 +clone_color: clnG3dummy01:3 allocation score on srv02: 0 +clone_color: clnG3dummy01:3 allocation score on srv03: 0 +clone_color: clnG3dummy01:3 allocation score on srv04: 100 +native_color: clnG3dummy01:1 allocation score on srv01: -1000000 +native_color: clnG3dummy01:1 allocation score on srv02: 100 +native_color: clnG3dummy01:1 allocation score on srv03: 0 +native_color: clnG3dummy01:1 allocation score on srv04: 0 +native_color: clnG3dummy01:2 allocation score on srv01: -1000000 +native_color: clnG3dummy01:2 allocation score on srv02: -1000000 +native_color: clnG3dummy01:2 allocation score on srv03: 100 +native_color: clnG3dummy01:2 allocation score on srv04: 0 +native_color: clnG3dummy01:3 allocation score on srv01: -1000000 +native_color: clnG3dummy01:3 allocation score on srv02: -1000000 +native_color: clnG3dummy01:3 allocation score on srv03: -1000000 +native_color: clnG3dummy01:3 allocation score on srv04: 100 +native_color: clnG3dummy01:0 allocation score on srv01: -1000000 +native_color: clnG3dummy01:0 allocation score on srv02: -1000000 +native_color: clnG3dummy01:0 allocation score on srv03: -1000000 +native_color: clnG3dummy01:0 allocation score on srv04: -1000000 +clone_color: clnG3dummy2 allocation score on srv01: -1000000 +clone_color: clnG3dummy2 allocation score on srv02: 500 +clone_color: clnG3dummy2 allocation score on srv03: 500 +clone_color: clnG3dummy2 allocation score on srv04: 500 +clone_color: clnG3dummy02:0 allocation score on srv01: 0 +clone_color: clnG3dummy02:0 allocation score on srv02: 0 +clone_color: clnG3dummy02:0 allocation score on srv03: 0 +clone_color: clnG3dummy02:0 allocation score on srv04: 0 +clone_color: clnG3dummy02:1 allocation score on srv01: 0 +clone_color: clnG3dummy02:1 allocation score on srv02: 100 +clone_color: clnG3dummy02:1 allocation score on srv03: 0 +clone_color: clnG3dummy02:1 allocation score on srv04: 0 +clone_color: clnG3dummy02:2 allocation score on srv01: 0 +clone_color: clnG3dummy02:2 allocation score on srv02: 0 +clone_color: clnG3dummy02:2 allocation score on srv03: 100 +clone_color: clnG3dummy02:2 allocation score on srv04: 0 +clone_color: clnG3dummy02:3 allocation score on srv01: 0 +clone_color: clnG3dummy02:3 allocation score on srv02: 0 +clone_color: clnG3dummy02:3 allocation score on srv03: 0 +clone_color: clnG3dummy02:3 allocation score on srv04: 100 +native_color: clnG3dummy02:1 allocation score on srv01: -1000000 +native_color: clnG3dummy02:1 allocation score on srv02: 100 +native_color: clnG3dummy02:1 allocation score on srv03: 0 +native_color: clnG3dummy02:1 allocation score on srv04: 0 +native_color: clnG3dummy02:2 allocation score on srv01: -1000000 +native_color: clnG3dummy02:2 allocation score on srv02: -1000000 +native_color: clnG3dummy02:2 allocation score on srv03: 100 +native_color: clnG3dummy02:2 allocation score on srv04: 0 +native_color: clnG3dummy02:3 allocation score on srv01: -1000000 +native_color: clnG3dummy02:3 allocation score on srv02: -1000000 +native_color: clnG3dummy02:3 allocation score on srv03: -1000000 +native_color: clnG3dummy02:3 allocation score on srv04: 100 +native_color: clnG3dummy02:0 allocation score on srv01: -1000000 +native_color: clnG3dummy02:0 allocation score on srv02: -1000000 +native_color: clnG3dummy02:0 allocation score on srv03: -1000000 +native_color: clnG3dummy02:0 allocation score on srv04: -1000000 +clone_color: clnPingd allocation score on srv01: -1000000 +clone_color: clnPingd allocation score on srv02: 500 +clone_color: clnPingd allocation score on srv03: 500 +clone_color: clnPingd allocation score on srv04: 500 +clone_color: clnPrmPingd:0 allocation score on srv01: 0 +clone_color: clnPrmPingd:0 allocation score on srv02: 0 +clone_color: clnPrmPingd:0 allocation score on srv03: 0 +clone_color: clnPrmPingd:0 allocation score on srv04: 0 +clone_color: clnPrmPingd:1 allocation score on srv01: 0 +clone_color: clnPrmPingd:1 allocation score on srv02: 100 +clone_color: clnPrmPingd:1 allocation score on srv03: 0 +clone_color: clnPrmPingd:1 allocation score on srv04: 0 +clone_color: clnPrmPingd:2 allocation score on srv01: 0 +clone_color: clnPrmPingd:2 allocation score on srv02: 0 +clone_color: clnPrmPingd:2 allocation score on srv03: 100 +clone_color: clnPrmPingd:2 allocation score on srv04: 0 +clone_color: clnPrmPingd:3 allocation score on srv01: 0 +clone_color: clnPrmPingd:3 allocation score on srv02: 0 +clone_color: clnPrmPingd:3 allocation score on srv03: 0 +clone_color: clnPrmPingd:3 allocation score on srv04: 100 +native_color: clnPrmPingd:1 allocation score on srv01: -1000000 +native_color: clnPrmPingd:1 allocation score on srv02: 100 +native_color: clnPrmPingd:1 allocation score on srv03: 0 +native_color: clnPrmPingd:1 allocation score on srv04: 0 +native_color: clnPrmPingd:2 allocation score on srv01: -1000000 +native_color: clnPrmPingd:2 allocation score on srv02: -1000000 +native_color: clnPrmPingd:2 allocation score on srv03: 100 +native_color: clnPrmPingd:2 allocation score on srv04: 0 +native_color: clnPrmPingd:3 allocation score on srv01: -1000000 +native_color: clnPrmPingd:3 allocation score on srv02: -1000000 +native_color: clnPrmPingd:3 allocation score on srv03: -1000000 +native_color: clnPrmPingd:3 allocation score on srv04: 100 +native_color: clnPrmPingd:0 allocation score on srv01: -1000000 +native_color: clnPrmPingd:0 allocation score on srv02: -1000000 +native_color: clnPrmPingd:0 allocation score on srv03: -1000000 +native_color: clnPrmPingd:0 allocation score on srv04: -1000000 +clone_color: clnUMgroup01 allocation score on srv01: -1000000 +clone_color: clnUMgroup01 allocation score on srv02: -1000000 +clone_color: clnUMgroup01 allocation score on srv03: -1000000 +clone_color: clnUMgroup01 allocation score on srv04: 0 +clone_color: clnUmResource:0 allocation score on srv01: 0 +clone_color: clnUmResource:0 allocation score on srv02: -1000000 +clone_color: clnUmResource:0 allocation score on srv03: -1000000 +clone_color: clnUmResource:0 allocation score on srv04: 0 +clone_color: clnUMdummy01:0 allocation score on srv01: 0 +clone_color: clnUMdummy01:0 allocation score on srv02: -1000000 +clone_color: clnUMdummy01:0 allocation score on srv03: -1000000 +clone_color: clnUMdummy01:0 allocation score on srv04: 100 +clone_color: clnUMdummy02:0 allocation score on srv01: 0 +clone_color: clnUMdummy02:0 allocation score on srv02: 0 +clone_color: clnUMdummy02:0 allocation score on srv03: 0 +clone_color: clnUMdummy02:0 allocation score on srv04: 100 +clone_color: clnUmResource:1 allocation score on srv01: 0 +clone_color: clnUmResource:1 allocation score on srv02: -1000000 +clone_color: clnUmResource:1 allocation score on srv03: -1000000 +clone_color: clnUmResource:1 allocation score on srv04: 0 +clone_color: clnUMdummy01:1 allocation score on srv01: 0 +clone_color: clnUMdummy01:1 allocation score on srv02: -1000000 +clone_color: clnUMdummy01:1 allocation score on srv03: -1000000 +clone_color: clnUMdummy01:1 allocation score on srv04: 0 +clone_color: clnUMdummy02:1 allocation score on srv01: 0 +clone_color: clnUMdummy02:1 allocation score on srv02: 0 +clone_color: clnUMdummy02:1 allocation score on srv03: 0 +clone_color: clnUMdummy02:1 allocation score on srv04: 0 +group_color: clnUmResource:0 allocation score on srv01: -1000000 +group_color: clnUmResource:0 allocation score on srv02: -1000000 +group_color: clnUmResource:0 allocation score on srv03: -1000000 +group_color: clnUmResource:0 allocation score on srv04: 0 +group_color: clnUMdummy01:0 allocation score on srv01: -1000000 +group_color: clnUMdummy01:0 allocation score on srv02: -1000000 +group_color: clnUMdummy01:0 allocation score on srv03: -1000000 +group_color: clnUMdummy01:0 allocation score on srv04: 100 +group_color: clnUMdummy02:0 allocation score on srv01: -1000000 +group_color: clnUMdummy02:0 allocation score on srv02: 0 +group_color: clnUMdummy02:0 allocation score on srv03: 0 +group_color: clnUMdummy02:0 allocation score on srv04: 100 +native_color: clnUMdummy01:0 allocation score on srv01: -1000000 +native_color: clnUMdummy01:0 allocation score on srv02: -1000000 +native_color: clnUMdummy01:0 allocation score on srv03: -1000000 +native_color: clnUMdummy01:0 allocation score on srv04: 200 +native_color: clnUMdummy02:0 allocation score on srv01: -1000000 +native_color: clnUMdummy02:0 allocation score on srv02: -1000000 +native_color: clnUMdummy02:0 allocation score on srv03: -1000000 +native_color: clnUMdummy02:0 allocation score on srv04: 100 +group_color: clnUmResource:1 allocation score on srv01: -1000000 +group_color: clnUmResource:1 allocation score on srv02: -1000000 +group_color: clnUmResource:1 allocation score on srv03: -1000000 +group_color: clnUmResource:1 allocation score on srv04: -1000000 +group_color: clnUMdummy01:1 allocation score on srv01: -1000000 +group_color: clnUMdummy01:1 allocation score on srv02: -1000000 +group_color: clnUMdummy01:1 allocation score on srv03: -1000000 +group_color: clnUMdummy01:1 allocation score on srv04: -1000000 +group_color: clnUMdummy02:1 allocation score on srv01: -1000000 +group_color: clnUMdummy02:1 allocation score on srv02: 0 +group_color: clnUMdummy02:1 allocation score on srv03: 0 +group_color: clnUMdummy02:1 allocation score on srv04: -1000000 +native_color: clnUMdummy01:1 allocation score on srv01: -1000000 +native_color: clnUMdummy01:1 allocation score on srv02: -1000000 +native_color: clnUMdummy01:1 allocation score on srv03: -1000000 +native_color: clnUMdummy01:1 allocation score on srv04: -1000000 +native_color: clnUMdummy02:1 allocation score on srv01: -1000000 +native_color: clnUMdummy02:1 allocation score on srv02: -1000000 +native_color: clnUMdummy02:1 allocation score on srv03: -1000000 +native_color: clnUMdummy02:1 allocation score on srv04: -1000000 +native_color: UmVIPcheck allocation score on srv01: -1000000 +native_color: UmVIPcheck allocation score on srv02: -1000000 +native_color: UmVIPcheck allocation score on srv03: -1000000 +native_color: UmVIPcheck allocation score on srv04: 100 +native_color: UmIPaddr allocation score on srv01: -1000000 +native_color: UmIPaddr allocation score on srv02: -1000000 +native_color: UmIPaddr allocation score on srv03: -1000000 +native_color: UmIPaddr allocation score on srv04: 0 +native_color: UmDummy01 allocation score on srv01: -1000000 +native_color: UmDummy01 allocation score on srv02: -1000000 +native_color: UmDummy01 allocation score on srv03: -1000000 +native_color: UmDummy01 allocation score on srv04: 0 +native_color: UmDummy02 allocation score on srv01: -1000000 +native_color: UmDummy02 allocation score on srv02: -1000000 +native_color: UmDummy02 allocation score on srv03: -1000000 +native_color: UmDummy02 allocation score on srv04: 0 +group_color: OVDBgroup02-1 allocation score on srv01: -1000000 +group_color: OVDBgroup02-1 allocation score on srv02: -1000000 +group_color: OVDBgroup02-1 allocation score on srv03: -1000000 +group_color: OVDBgroup02-1 allocation score on srv04: 100 +group_color: prmExPostgreSQLDB1 allocation score on srv01: -1000000 +group_color: prmExPostgreSQLDB1 allocation score on srv02: -1000000 +group_color: prmExPostgreSQLDB1 allocation score on srv03: -1000000 +group_color: prmExPostgreSQLDB1 allocation score on srv04: 200 +group_color: prmFsPostgreSQLDB1-1 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB1-1 allocation score on srv02: 0 +group_color: prmFsPostgreSQLDB1-1 allocation score on srv03: 0 +group_color: prmFsPostgreSQLDB1-1 allocation score on srv04: 100 +group_color: prmFsPostgreSQLDB1-2 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB1-2 allocation score on srv02: 0 +group_color: prmFsPostgreSQLDB1-2 allocation score on srv03: 0 +group_color: prmFsPostgreSQLDB1-2 allocation score on srv04: 100 +group_color: prmFsPostgreSQLDB1-3 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB1-3 allocation score on srv02: 0 +group_color: prmFsPostgreSQLDB1-3 allocation score on srv03: 0 +group_color: prmFsPostgreSQLDB1-3 allocation score on srv04: 100 +group_color: prmIpPostgreSQLDB1 allocation score on srv01: 0 +group_color: prmIpPostgreSQLDB1 allocation score on srv02: 0 +group_color: prmIpPostgreSQLDB1 allocation score on srv03: 0 +group_color: prmIpPostgreSQLDB1 allocation score on srv04: 100 +group_color: prmApPostgreSQLDB1 allocation score on srv01: 0 +group_color: prmApPostgreSQLDB1 allocation score on srv02: 0 +group_color: prmApPostgreSQLDB1 allocation score on srv03: 0 +group_color: prmApPostgreSQLDB1 allocation score on srv04: 100 +native_color: prmExPostgreSQLDB1 allocation score on srv01: -1000000 +native_color: prmExPostgreSQLDB1 allocation score on srv02: -1000000 +native_color: prmExPostgreSQLDB1 allocation score on srv03: -1000000 +native_color: prmExPostgreSQLDB1 allocation score on srv04: 700 +native_color: prmFsPostgreSQLDB1-1 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB1-1 allocation score on srv02: -1000000 +native_color: prmFsPostgreSQLDB1-1 allocation score on srv03: -1000000 +native_color: prmFsPostgreSQLDB1-1 allocation score on srv04: 500 +native_color: prmFsPostgreSQLDB1-2 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB1-2 allocation score on srv02: -1000000 +native_color: prmFsPostgreSQLDB1-2 allocation score on srv03: -1000000 +native_color: prmFsPostgreSQLDB1-2 allocation score on srv04: 400 +native_color: prmFsPostgreSQLDB1-3 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB1-3 allocation score on srv02: -1000000 +native_color: prmFsPostgreSQLDB1-3 allocation score on srv03: -1000000 +native_color: prmFsPostgreSQLDB1-3 allocation score on srv04: 300 +native_color: prmIpPostgreSQLDB1 allocation score on srv01: -1000000 +native_color: prmIpPostgreSQLDB1 allocation score on srv02: -1000000 +native_color: prmIpPostgreSQLDB1 allocation score on srv03: -1000000 +native_color: prmIpPostgreSQLDB1 allocation score on srv04: 200 +native_color: prmApPostgreSQLDB1 allocation score on srv01: -1000000 +native_color: prmApPostgreSQLDB1 allocation score on srv02: -1000000 +native_color: prmApPostgreSQLDB1 allocation score on srv03: -1000000 +native_color: prmApPostgreSQLDB1 allocation score on srv04: 100 +group_color: OVDBgroup02-2 allocation score on srv01: -1000000 +group_color: OVDBgroup02-2 allocation score on srv02: 200 +group_color: OVDBgroup02-2 allocation score on srv03: -1000000 +group_color: OVDBgroup02-2 allocation score on srv04: 100 +group_color: prmExPostgreSQLDB2 allocation score on srv01: -1000000 +group_color: prmExPostgreSQLDB2 allocation score on srv02: 300 +group_color: prmExPostgreSQLDB2 allocation score on srv03: -1000000 +group_color: prmExPostgreSQLDB2 allocation score on srv04: 100 +group_color: prmFsPostgreSQLDB2-1 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB2-1 allocation score on srv02: 100 +group_color: prmFsPostgreSQLDB2-1 allocation score on srv03: 0 +group_color: prmFsPostgreSQLDB2-1 allocation score on srv04: 0 +group_color: prmFsPostgreSQLDB2-2 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB2-2 allocation score on srv02: 100 +group_color: prmFsPostgreSQLDB2-2 allocation score on srv03: 0 +group_color: prmFsPostgreSQLDB2-2 allocation score on srv04: 0 +group_color: prmFsPostgreSQLDB2-3 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB2-3 allocation score on srv02: 100 +group_color: prmFsPostgreSQLDB2-3 allocation score on srv03: 0 +group_color: prmFsPostgreSQLDB2-3 allocation score on srv04: 0 +group_color: prmIpPostgreSQLDB2 allocation score on srv01: 0 +group_color: prmIpPostgreSQLDB2 allocation score on srv02: 100 +group_color: prmIpPostgreSQLDB2 allocation score on srv03: 0 +group_color: prmIpPostgreSQLDB2 allocation score on srv04: 0 +group_color: prmApPostgreSQLDB2 allocation score on srv01: 0 +group_color: prmApPostgreSQLDB2 allocation score on srv02: 100 +group_color: prmApPostgreSQLDB2 allocation score on srv03: 0 +group_color: prmApPostgreSQLDB2 allocation score on srv04: 0 +native_color: prmExPostgreSQLDB2 allocation score on srv01: -1000000 +native_color: prmExPostgreSQLDB2 allocation score on srv02: 800 +native_color: prmExPostgreSQLDB2 allocation score on srv03: -1000000 +native_color: prmExPostgreSQLDB2 allocation score on srv04: 100 +native_color: prmFsPostgreSQLDB2-1 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB2-1 allocation score on srv02: 500 +native_color: prmFsPostgreSQLDB2-1 allocation score on srv03: -1000000 +native_color: prmFsPostgreSQLDB2-1 allocation score on srv04: -1000000 +native_color: prmFsPostgreSQLDB2-2 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB2-2 allocation score on srv02: 400 +native_color: prmFsPostgreSQLDB2-2 allocation score on srv03: -1000000 +native_color: prmFsPostgreSQLDB2-2 allocation score on srv04: -1000000 +native_color: prmFsPostgreSQLDB2-3 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB2-3 allocation score on srv02: 300 +native_color: prmFsPostgreSQLDB2-3 allocation score on srv03: -1000000 +native_color: prmFsPostgreSQLDB2-3 allocation score on srv04: -1000000 +native_color: prmIpPostgreSQLDB2 allocation score on srv01: -1000000 +native_color: prmIpPostgreSQLDB2 allocation score on srv02: 200 +native_color: prmIpPostgreSQLDB2 allocation score on srv03: -1000000 +native_color: prmIpPostgreSQLDB2 allocation score on srv04: -1000000 +native_color: prmApPostgreSQLDB2 allocation score on srv01: -1000000 +native_color: prmApPostgreSQLDB2 allocation score on srv02: 100 +native_color: prmApPostgreSQLDB2 allocation score on srv03: -1000000 +native_color: prmApPostgreSQLDB2 allocation score on srv04: -1000000 +group_color: OVDBgroup02-3 allocation score on srv01: -1000000 +group_color: OVDBgroup02-3 allocation score on srv02: -1000000 +group_color: OVDBgroup02-3 allocation score on srv03: 200 +group_color: OVDBgroup02-3 allocation score on srv04: 100 +group_color: prmExPostgreSQLDB3 allocation score on srv01: -1000000 +group_color: prmExPostgreSQLDB3 allocation score on srv02: -1000000 +group_color: prmExPostgreSQLDB3 allocation score on srv03: 300 +group_color: prmExPostgreSQLDB3 allocation score on srv04: 100 +group_color: prmFsPostgreSQLDB3-1 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB3-1 allocation score on srv02: 0 +group_color: prmFsPostgreSQLDB3-1 allocation score on srv03: 100 +group_color: prmFsPostgreSQLDB3-1 allocation score on srv04: 0 +group_color: prmFsPostgreSQLDB3-2 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB3-2 allocation score on srv02: 0 +group_color: prmFsPostgreSQLDB3-2 allocation score on srv03: 100 +group_color: prmFsPostgreSQLDB3-2 allocation score on srv04: 0 +group_color: prmFsPostgreSQLDB3-3 allocation score on srv01: 0 +group_color: prmFsPostgreSQLDB3-3 allocation score on srv02: 0 +group_color: prmFsPostgreSQLDB3-3 allocation score on srv03: 100 +group_color: prmFsPostgreSQLDB3-3 allocation score on srv04: 0 +group_color: prmIpPostgreSQLDB3 allocation score on srv01: 0 +group_color: prmIpPostgreSQLDB3 allocation score on srv02: 0 +group_color: prmIpPostgreSQLDB3 allocation score on srv03: 100 +group_color: prmIpPostgreSQLDB3 allocation score on srv04: 0 +group_color: prmApPostgreSQLDB3 allocation score on srv01: 0 +group_color: prmApPostgreSQLDB3 allocation score on srv02: 0 +group_color: prmApPostgreSQLDB3 allocation score on srv03: 100 +group_color: prmApPostgreSQLDB3 allocation score on srv04: 0 +native_color: prmExPostgreSQLDB3 allocation score on srv01: -1000000 +native_color: prmExPostgreSQLDB3 allocation score on srv02: -1000000 +native_color: prmExPostgreSQLDB3 allocation score on srv03: 800 +native_color: prmExPostgreSQLDB3 allocation score on srv04: 100 +native_color: prmFsPostgreSQLDB3-1 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB3-1 allocation score on srv02: -1000000 +native_color: prmFsPostgreSQLDB3-1 allocation score on srv03: 500 +native_color: prmFsPostgreSQLDB3-1 allocation score on srv04: -1000000 +native_color: prmFsPostgreSQLDB3-2 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB3-2 allocation score on srv02: -1000000 +native_color: prmFsPostgreSQLDB3-2 allocation score on srv03: 400 +native_color: prmFsPostgreSQLDB3-2 allocation score on srv04: -1000000 +native_color: prmFsPostgreSQLDB3-3 allocation score on srv01: -1000000 +native_color: prmFsPostgreSQLDB3-3 allocation score on srv02: -1000000 +native_color: prmFsPostgreSQLDB3-3 allocation score on srv03: 300 +native_color: prmFsPostgreSQLDB3-3 allocation score on srv04: -1000000 +native_color: prmIpPostgreSQLDB3 allocation score on srv01: -1000000 +native_color: prmIpPostgreSQLDB3 allocation score on srv02: -1000000 +native_color: prmIpPostgreSQLDB3 allocation score on srv03: 200 +native_color: prmIpPostgreSQLDB3 allocation score on srv04: -1000000 +native_color: prmApPostgreSQLDB3 allocation score on srv01: -1000000 +native_color: prmApPostgreSQLDB3 allocation score on srv02: -1000000 +native_color: prmApPostgreSQLDB3 allocation score on srv03: 100 +native_color: prmApPostgreSQLDB3 allocation score on srv04: -1000000 +group_color: grpStonith1 allocation score on srv01: -1000000 +group_color: grpStonith1 allocation score on srv02: 100 +group_color: grpStonith1 allocation score on srv03: 100 +group_color: grpStonith1 allocation score on srv04: 200 +group_color: prmStonithN1 allocation score on srv01: -1000000 +group_color: prmStonithN1 allocation score on srv02: 100 +group_color: prmStonithN1 allocation score on srv03: 100 +group_color: prmStonithN1 allocation score on srv04: 300 +native_color: prmStonithN1 allocation score on srv01: -1000000 +native_color: prmStonithN1 allocation score on srv02: 100 +native_color: prmStonithN1 allocation score on srv03: 100 +native_color: prmStonithN1 allocation score on srv04: 300 +group_color: grpStonith2 allocation score on srv01: 200 +group_color: grpStonith2 allocation score on srv02: -1000000 +group_color: grpStonith2 allocation score on srv03: 100 +group_color: grpStonith2 allocation score on srv04: 100 +group_color: prmStonithN2 allocation score on srv01: 200 +group_color: prmStonithN2 allocation score on srv02: -1000000 +group_color: prmStonithN2 allocation score on srv03: 200 +group_color: prmStonithN2 allocation score on srv04: 100 +native_color: prmStonithN2 allocation score on srv01: 200 +native_color: prmStonithN2 allocation score on srv02: -1000000 +native_color: prmStonithN2 allocation score on srv03: 200 +native_color: prmStonithN2 allocation score on srv04: 100 +group_color: grpStonith3 allocation score on srv01: 100 +group_color: grpStonith3 allocation score on srv02: 200 +group_color: grpStonith3 allocation score on srv03: -1000000 +group_color: grpStonith3 allocation score on srv04: 100 +group_color: prmStonithN3 allocation score on srv01: 100 +group_color: prmStonithN3 allocation score on srv02: 300 +group_color: prmStonithN3 allocation score on srv03: -1000000 +group_color: prmStonithN3 allocation score on srv04: 100 +native_color: prmStonithN3 allocation score on srv01: 100 +native_color: prmStonithN3 allocation score on srv02: 300 +native_color: prmStonithN3 allocation score on srv03: -1000000 +native_color: prmStonithN3 allocation score on srv04: 100 +group_color: grpStonith4 allocation score on srv01: 100 +group_color: grpStonith4 allocation score on srv02: 100 +group_color: grpStonith4 allocation score on srv03: 200 +group_color: grpStonith4 allocation score on srv04: -1000000 +group_color: prmStonithN4 allocation score on srv01: 100 +group_color: prmStonithN4 allocation score on srv02: 100 +group_color: prmStonithN4 allocation score on srv03: 300 +group_color: prmStonithN4 allocation score on srv04: -1000000 +native_color: prmStonithN4 allocation score on srv01: 100 +native_color: prmStonithN4 allocation score on srv02: 100 +native_color: prmStonithN4 allocation score on srv03: 300 +native_color: prmStonithN4 allocation score on srv04: -1000000 diff --git a/pengine/test10/colocate-primitive-with-clone.xml b/pengine/test10/colocate-primitive-with-clone.xml new file mode 100644 index 0000000000..ca18b3f047 --- /dev/null +++ b/pengine/test10/colocate-primitive-with-clone.xml @@ -0,0 +1,1170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +