diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h index ce02b1ba3b..4bfa3fecf0 100644 --- a/include/crm/pengine/status.h +++ b/include/crm/pengine/status.h @@ -1,405 +1,406 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef PENGINE_STATUS__H # define PENGINE_STATUS__H # include # include # include typedef struct node_s pe_node_t; typedef struct node_s node_t; typedef struct pe_action_s action_t; typedef struct pe_action_s pe_action_t; typedef struct resource_s resource_t; typedef struct ticket_s ticket_t; typedef enum no_quorum_policy_e { no_quorum_freeze, no_quorum_stop, no_quorum_ignore, no_quorum_suicide } no_quorum_policy_t; enum node_type { node_ping, node_member, node_remote }; enum pe_restart { pe_restart_restart, pe_restart_ignore }; enum pe_find { pe_find_renamed = 0x001, pe_find_clone = 0x004, pe_find_current = 0x008, pe_find_inactive = 0x010, }; # define pe_flag_have_quorum 0x00000001ULL # define pe_flag_symmetric_cluster 0x00000002ULL # define pe_flag_is_managed_default 0x00000004ULL # define pe_flag_maintenance_mode 0x00000008ULL # define pe_flag_stonith_enabled 0x00000010ULL # define pe_flag_have_stonith_resource 0x00000020ULL # define pe_flag_enable_unfencing 0x00000040ULL # define pe_flag_stop_rsc_orphans 0x00000100ULL # define pe_flag_stop_action_orphans 0x00000200ULL # define pe_flag_stop_everything 0x00000400ULL # define pe_flag_start_failure_fatal 0x00001000ULL # define pe_flag_remove_after_stop 0x00002000ULL # define pe_flag_startup_probes 0x00010000ULL # define pe_flag_have_status 0x00020000ULL # define pe_flag_have_remote_nodes 0x00040000ULL # define pe_flag_quick_location 0x00100000ULL # define pe_flag_sanitized 0x00200000ULL typedef struct pe_working_set_s { xmlNode *input; crm_time_t *now; /* options extracted from the input */ char *dc_uuid; node_t *dc_node; const char *stonith_action; const char *placement_strategy; unsigned long long flags; int stonith_timeout; int default_resource_stickiness; no_quorum_policy_t no_quorum_policy; GHashTable *config_hash; GHashTable *tickets; GHashTable *singletons; /* Actions for which there can be only one - ie. fence nodeX */ GListPtr nodes; GListPtr resources; GListPtr placement_constraints; GListPtr ordering_constraints; GListPtr colocation_constraints; GListPtr ticket_constraints; GListPtr actions; xmlNode *failed; xmlNode *op_defaults; xmlNode *rsc_defaults; /* stats */ int num_synapse; int max_valid_nodes; int order_id; int action_id; /* final output */ xmlNode *graph; GHashTable *template_rsc_sets; const char *localhost; GHashTable *tags; } pe_working_set_t; struct node_shared_s { const char *id; const char *uname; /* Make all these flags into a bitfield one day */ gboolean online; gboolean standby; gboolean standby_onfail; gboolean pending; gboolean unclean; gboolean unseen; gboolean shutdown; gboolean expected_up; gboolean is_dc; gboolean rsc_discovery_enabled; gboolean remote_requires_reset; gboolean remote_was_fenced; int num_resources; GListPtr running_rsc; /* resource_t* */ GListPtr allocated_rsc; /* resource_t* */ resource_t *remote_rsc; GHashTable *attrs; /* char* => char* */ enum node_type type; GHashTable *utilization; /*! cache of calculated rsc digests for this node. */ GHashTable *digest_cache; gboolean maintenance; }; struct node_s { int weight; gboolean fixed; int rsc_discover_mode; int count; struct node_shared_s *details; }; # include # define pe_rsc_orphan 0x00000001ULL # define pe_rsc_managed 0x00000002ULL # define pe_rsc_block 0x00000004ULL /* Further operations are prohibited due to failure policy */ # define pe_rsc_orphan_container_filler 0x00000008ULL # define pe_rsc_notify 0x00000010ULL # define pe_rsc_unique 0x00000020ULL # define pe_rsc_fence_device 0x00000040ULL # define pe_rsc_provisional 0x00000100ULL # define pe_rsc_allocating 0x00000200ULL # define pe_rsc_merging 0x00000400ULL # define pe_rsc_munging 0x00000800ULL # define pe_rsc_try_reload 0x00001000ULL # define pe_rsc_reload 0x00002000ULL # define pe_rsc_failed 0x00010000ULL # define pe_rsc_shutdown 0x00020000ULL # define pe_rsc_runnable 0x00040000ULL # define pe_rsc_start_pending 0x00080000ULL # define pe_rsc_starting 0x00100000ULL # define pe_rsc_stopping 0x00200000ULL # define pe_rsc_migrating 0x00400000ULL # define pe_rsc_allow_migrate 0x00800000ULL # define pe_rsc_failure_ignored 0x01000000ULL # define pe_rsc_unexpectedly_running 0x02000000ULL # define pe_rsc_maintenance 0x04000000ULL # define pe_rsc_needs_quorum 0x10000000ULL # define pe_rsc_needs_fencing 0x20000000ULL # define pe_rsc_needs_unfencing 0x40000000ULL # define pe_rsc_have_unfencing 0x80000000ULL enum pe_graph_flags { pe_graph_none = 0x00000, pe_graph_updated_first = 0x00001, pe_graph_updated_then = 0x00002, pe_graph_disable = 0x00004, }; /* *INDENT-OFF* */ enum pe_action_flags { pe_action_pseudo = 0x00001, pe_action_runnable = 0x00002, pe_action_optional = 0x00004, pe_action_print_always = 0x00008, pe_action_have_node_attrs = 0x00010, pe_action_failure_is_fatal = 0x00020, /* no longer used, here for API compatibility */ pe_action_implied_by_stonith = 0x00040, pe_action_migrate_runnable = 0x00080, pe_action_dumped = 0x00100, pe_action_processed = 0x00200, pe_action_clear = 0x00400, pe_action_dangle = 0x00800, pe_action_requires_any = 0x01000, /* This action requires one or mre of its dependencies to be runnable * We use this to clear the runnable flag before checking dependencies */ pe_action_reschedule = 0x02000, + pe_action_tracking = 0x04000, }; /* *INDENT-ON* */ struct resource_s { char *id; char *clone_name; xmlNode *xml; xmlNode *orig_xml; xmlNode *ops_xml; resource_t *parent; void *variant_opaque; enum pe_obj_types variant; resource_object_functions_t *fns; resource_alloc_functions_t *cmds; enum rsc_recovery_type recovery_type; enum pe_restart restart_type; int priority; int stickiness; int sort_index; int failure_timeout; int remote_reconnect_interval; int effective_priority; int migration_threshold; gboolean is_remote_node; gboolean exclusive_discover; unsigned long long flags; GListPtr rsc_cons_lhs; /* rsc_colocation_t* */ GListPtr rsc_cons; /* rsc_colocation_t* */ GListPtr rsc_location; /* rsc_to_node_t* */ GListPtr actions; /* action_t* */ GListPtr rsc_tickets; /* rsc_ticket* */ node_t *allocated_to; GListPtr running_on; /* node_t* */ GHashTable *known_on; /* node_t* */ GHashTable *allowed_nodes; /* node_t* */ enum rsc_role_e role; enum rsc_role_e next_role; GHashTable *meta; GHashTable *parameters; GHashTable *utilization; GListPtr children; /* resource_t* */ GListPtr dangling_migrations; /* node_t* */ node_t *partial_migration_target; node_t *partial_migration_source; resource_t *container; GListPtr fillers; char *pending_task; const char *isolation_wrapper; }; struct pe_action_s { int id; int priority; resource_t *rsc; node_t *node; xmlNode *op_entry; char *task; char *uuid; char *cancel_task; enum pe_action_flags flags; enum rsc_start_requirement needs; enum action_fail_response on_fail; enum rsc_role_e fail_role; action_t *pre_notify; action_t *pre_notified; action_t *post_notify; action_t *post_notified; int seen_count; GHashTable *meta; GHashTable *extra; GListPtr actions_before; /* action_warpper_t* */ GListPtr actions_after; /* action_warpper_t* */ }; struct ticket_s { char *id; gboolean granted; time_t last_granted; gboolean standby; GHashTable *state; }; typedef struct tag_s { char *id; GListPtr refs; } tag_t; enum pe_link_state { pe_link_not_dumped, pe_link_dumped, pe_link_dup, }; /* *INDENT-OFF* */ enum pe_ordering { pe_order_none = 0x0, /* deleted */ pe_order_optional = 0x1, /* pure ordering, nothing implied */ pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */ pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */ pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */ pe_order_implies_first_master = 0x40, /* Imply 'first' is required when 'then' is required and then's rsc holds Master role. */ /* first requires then to be both runnable and migrate runnable. */ pe_order_implies_first_migratable = 0x80, pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */ pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */ pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX', * ensure instances of 'then' on 'nodeX' are too. * Only really useful if 'then' is a clone and 'first' is not */ pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */ pe_order_stonith_stop = 0x2000, /* only applies if the action is non-pseudo */ pe_order_serialize_only = 0x4000, /* serialize */ pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not manditory */ pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not manditory */ pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */ pe_order_load = 0x200000, /* Only relevant if... */ pe_order_one_or_more = 0x400000, /* 'then' is only runnable if one or more of it's dependencies are too */ pe_order_anti_colocation = 0x800000, pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */ pe_order_trace = 0x4000000, /* test marker */ }; /* *INDENT-ON* */ typedef struct action_wrapper_s action_wrapper_t; struct action_wrapper_s { enum pe_ordering type; enum pe_link_state state; action_t *action; }; const char *rsc_printable_id(resource_t *rsc); gboolean cluster_status(pe_working_set_t * data_set); void set_working_set_defaults(pe_working_set_t * data_set); void cleanup_calculations(pe_working_set_t * data_set); resource_t *pe_find_resource(GListPtr rsc_list, const char *id_rh); node_t *pe_find_node(GListPtr node_list, const char *uname); node_t *pe_find_node_id(GListPtr node_list, const char *id); node_t *pe_find_node_any(GListPtr node_list, const char *id, const char *uname); GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter, pe_working_set_t * data_set); #endif diff --git a/pengine/graph.c b/pengine/graph.c index aa703cc3db..9cfede63de 100644 --- a/pengine/graph.c +++ b/pengine/graph.c @@ -1,1343 +1,1416 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include gboolean update_action(action_t * action); void update_colo_start_chain(action_t * action); gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type); static enum pe_action_flags get_action_flags(action_t * action, node_t * node) { enum pe_action_flags flags = action->flags; if (action->rsc) { flags = action->rsc->cmds->action_flags(action, NULL); if (action->rsc->variant >= pe_clone && node) { /* We only care about activity on $node */ enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node); /* Go to great lengths to ensure the correct value for pe_action_runnable... * * If we are a clone, then for _ordering_ constraints, its only relevant * if we are runnable _anywhere_. * * This only applies to _runnable_ though, and only for ordering constraints. * If this function is ever used during colocation, then we'll need additional logic * * Not very satisfying, but its logical and appears to work well. */ if (is_not_set(clone_flags, pe_action_runnable) && is_set(flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid); set_bit(clone_flags, pe_action_runnable); } flags = clone_flags; } } return flags; } static char * convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify, gboolean free_original) { int interval = 0; char *uuid = NULL; char *rid = NULL; char *raw_task = NULL; int task = no_action; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing %s", old_uuid); if (old_uuid == NULL) { return NULL; } else if (strstr(old_uuid, "notify") != NULL) { goto done; /* no conversion */ } else if (rsc->variant < pe_group) { goto done; /* no conversion */ } CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval)); if (interval > 0) { goto done; /* no conversion */ } task = text2task(raw_task); switch (task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: task--; break; case monitor_rsc: case shutdown_crm: case stonith_node: task = no_action; break; default: crm_err("Unknown action: %s", raw_task); task = no_action; break; } if (task != no_action) { if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) { uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1)); } else { uuid = generate_op_key(rid, task2text(task + 1), 0); } pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(old_uuid); } if (free_original) { free(old_uuid); } free(raw_task); free(rid); return uuid; } static action_t * rsc_expand_action(action_t * action) { action_t *result = action; if (action->rsc && action->rsc->variant >= pe_group) { /* Expand 'start' -> 'started' */ char *uuid = NULL; gboolean notify = FALSE; if (action->rsc->parent == NULL) { /* Only outter-most resources have notification actions */ notify = is_set(action->rsc->flags, pe_rsc_notify); } uuid = convert_non_atomic_uuid(action->uuid, action->rsc, notify, FALSE); if (uuid) { pe_rsc_trace(action->rsc, "Converting %s to %s %d", action->uuid, uuid, is_set(action->rsc->flags, pe_rsc_notify)); result = find_first_action(action->rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_err("Couldn't expand %s", action->uuid); result = action; } free(uuid); } } return result; } static enum pe_graph_flags graph_update_action(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; gboolean processed = FALSE; /* TODO: Do as many of these in parallel as possible */ if (type & pe_order_implies_then) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_then); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(then, pe_action_optional | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies right: %s then %s %p", first->uuid, then->uuid, then->rsc); } } if ((type & pe_order_restart) && then->rsc) { enum pe_action_flags restart = (pe_action_optional | pe_action_runnable); processed = TRUE; changed |= then->rsc->cmds->update_actions(first, then, node, flags, restart, pe_order_restart); if (changed) { pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("restart: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first) { processed = TRUE; if (first->rsc) { changed |= first->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_implies_first); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(first, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_first; } } if (changed) { pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_master) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_first_master); } if (changed) { pe_rsc_trace(then->rsc, "implies left when right rsc is Master role: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_one_or_more) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_one_or_more); } else if (is_set(flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_runnable_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_runnable_left); } else if (is_set(flags, pe_action_runnable) == FALSE) { if (update_action_flags(then, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_migratable) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_implies_first_migratable); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_pseudo_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_pseudo_left); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_optional) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_optional); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_asymmetrical) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_asymmetrical); } if (changed) { pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid); } } if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", first->uuid, then->uuid); update_action_flags(then, pe_action_print_always); /* dont care about changed */ } if ((type & pe_order_implies_first_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", then->uuid, first->uuid); update_action_flags(first, pe_action_print_always); /* dont care about changed */ } if ((type & pe_order_implies_then || type & pe_order_implies_first || type & pe_order_restart) && first->rsc && safe_str_eq(first->task, RSC_STOP) && is_not_set(first->rsc->flags, pe_rsc_managed) && is_set(first->rsc->flags, pe_rsc_block) && is_not_set(first->flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_then; } if (changed) { pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid); } } if (processed == FALSE) { crm_trace("Constraint 0x%.6x not applicable", type); } return changed; } static void mark_start_blocked(resource_t *rsc) { GListPtr gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(action->task, RSC_START)) { continue; } if (is_set(action->flags, pe_action_runnable)) { clear_bit(action->flags, pe_action_runnable); update_colo_start_chain(action); update_action(action); } } } void update_colo_start_chain(action_t *action) { GListPtr gIter = NULL; resource_t *rsc = NULL; if (is_not_set(action->flags, pe_action_runnable) && safe_str_eq(action->task, RSC_START)) { rsc = uber_parent(action->rsc); } if (rsc == NULL || rsc->rsc_cons_lhs == NULL) { return; } /* if rsc has children, all the children need to have start set to * unrunnable before we follow the colo chain for the parent. */ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *)gIter->data; action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL); if (start == NULL || is_set(start->flags, pe_action_runnable)) { return; } } for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *colocate_with = (rsc_colocation_t *)gIter->data; if (colocate_with->score == INFINITY) { mark_start_blocked(colocate_with->rsc_lh); } } } gboolean update_action(action_t * then) { GListPtr lpc = NULL; enum pe_graph_flags changed = pe_graph_none; int last_flags = then->flags; crm_trace("Processing %s (%s %s %s)", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : ""); if (is_set(then->flags, pe_action_requires_any)) { clear_bit(then->flags, pe_action_runnable); /* We are relying on the pe_order_one_or_more clause of * graph_update_action(), called as part of the: * * 'if (first == other->action)' * * block below, to set this back if appropriate */ } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; action_t *first = other->action; node_t *then_node = then->node; node_t *first_node = first->node; enum pe_action_flags then_flags = 0; enum pe_action_flags first_flags = 0; if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node) { crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid); } } if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node) { crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid); } } clear_bit(changed, pe_graph_updated_first); if (first->rsc != then->rsc && first->rsc != NULL && then->rsc != NULL && first->rsc != then->rsc->parent) { first = rsc_expand_action(first); } if (first != other->action) { crm_trace("Ordering %s afer %s instead of %s", then->uuid, first->uuid, other->action->uuid); } first_flags = get_action_flags(first, then_node); then_flags = get_action_flags(then, first_node); crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x", then->uuid, is_set(then_flags, pe_action_optional) ? "optional" : "required", is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then_flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : "", first->uuid, is_set(first_flags, pe_action_optional) ? "optional" : "required", is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first_flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : "", first_flags, other->type); if (first == other->action) { /* * 'first' was not expanded (ie. from 'start' to 'running'), which could mean it: * - has no associated resource, * - was a primitive, * - was pre-expanded (ie. 'running' instead of 'start') * * The third argument here to graph_update_action() is a node which is used under two conditions: * - Interleaving, in which case first->node and * then->node are equal (and NULL) * - If 'then' is a clone, to limit the scope of the * constraint to instances on the supplied node * */ int otype = other->type; node_t *node = then->node; if(is_set(otype, pe_order_implies_then_on_node)) { /* Normally we want the _whole_ 'then' clone to * restart if 'first' is restarted, so then->node is * needed. * * However for unfencing, we want to limit this to * instances on the same node as 'first' (the * unfencing operation), so first->node is supplied. * * Swap the node, from then on we can can treat it * like any other 'pe_order_implies_then' */ clear_bit(otype, pe_order_implies_then_on_node); set_bit(otype, pe_order_implies_then); node = first->node; } clear_bit(first_flags, pe_action_pseudo); changed |= graph_update_action(first, then, node, first_flags, otype); /* 'first' was for a complex resource (clone, group, etc), * create a new dependency if necessary */ } else if (order_actions(first, then, other->type)) { /* This was the first time 'first' and 'then' were associated, * start again to get the new actions_before list */ changed |= (pe_graph_updated_then | pe_graph_disable); } if (changed & pe_graph_disable) { crm_trace("Disabled constraint %s -> %s", other->action->uuid, then->uuid); clear_bit(changed, pe_graph_disable); other->type = pe_order_none; } if (changed & pe_graph_updated_first) { GListPtr lpc2 = NULL; crm_trace("Updated %s (first %s %s %s), processing dependants ", first->uuid, is_set(first->flags, pe_action_optional) ? "optional" : "required", is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first->flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : ""); for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other = (action_wrapper_t *) lpc2->data; update_action(other->action); } update_action(first); } } if (is_set(then->flags, pe_action_requires_any)) { if (last_flags != then->flags) { changed |= pe_graph_updated_then; } else { clear_bit(changed, pe_graph_updated_then); } } if (changed & pe_graph_updated_then) { crm_trace("Updated %s (then %s %s %s), processing dependants ", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : ""); if (is_set(last_flags, pe_action_runnable) && is_not_set(then->flags, pe_action_runnable)) { update_colo_start_chain(then); } update_action(then); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; update_action(other->action); } } return FALSE; } gboolean shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set) { /* add the stop to the before lists so it counts as a pre-req * for the shutdown */ GListPtr lpc = NULL; for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) { action_t *action = (action_t *) lpc->data; if (action->rsc == NULL || action->node == NULL) { continue; } else if (action->node->details != node->details) { continue; } else if (is_set(action->rsc->flags, pe_rsc_maintenance)) { pe_rsc_trace(action->rsc, "Skipping %s: maintenance mode", action->uuid); continue; } else if (node->details->maintenance) { pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode", action->uuid, node->details->uname); continue; } else if (safe_str_neq(action->task, RSC_STOP)) { continue; } else if (is_not_set(action->rsc->flags, pe_rsc_managed) && is_not_set(action->rsc->flags, pe_rsc_block)) { /* * If another action depends on this one, we may still end up blocking */ pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid); continue; } pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid, node->details->uname); pe_clear_action_bit(action, pe_action_optional); custom_action_order(action->rsc, NULL, action, NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op, pe_order_optional | pe_order_runnable_left, data_set); } return TRUE; } gboolean stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set) { CRM_CHECK(stonith_op != NULL, return FALSE); /* * Make sure the stonith OP occurs before we start any shared resources */ if (stonith_op != NULL) { GListPtr lpc = NULL; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc_stonith_ordering(rsc, stonith_op, data_set); } } /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ return TRUE; } static node_t * get_router_node(action_t *action) { node_t *began_on = NULL; node_t *ended_on = NULL; node_t *router_node = NULL; if (safe_str_eq(action->task, CRM_OP_FENCE) || is_remote_node(action->node) == FALSE) { return NULL; } CRM_ASSERT(action->node->details->remote_rsc != NULL); if (action->node->details->remote_rsc->running_on) { began_on = action->node->details->remote_rsc->running_on->data; } ended_on = action->node->details->remote_rsc->allocated_to; /* if there is only one location to choose from, * this is easy. Check for those conditions first */ if (!began_on || !ended_on) { /* remote rsc is either shutting down or starting up */ return began_on ? began_on : ended_on; } else if (began_on->details == ended_on->details) { /* remote rsc didn't move nodes. */ return began_on; } /* If we have get here, we know the remote resource * began on one node and is moving to another node. * * This means some actions will get routed through the cluster * node the connection rsc began on, and others are routed through * the cluster node the connection rsc ends up on. * * 1. stop, demote, migrate actions of resources living in the remote * node _MUST_ occur _BEFORE_ the connection can move (these actions * are all required before the remote rsc stop action can occur.) In * this case, we know these actions have to be routed through the initial * cluster node the connection resource lived on before the move takes place. * * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount * delete ....) must occur after the resource starts on the node it is * moving to. */ /* 1. before connection rsc moves. */ if (safe_str_eq(action->task, "stop") || safe_str_eq(action->task, "demote") || safe_str_eq(action->task, "migrate_from") || safe_str_eq(action->task, "migrate_to")) { router_node = began_on; /* 2. after connection rsc moves. */ } else { router_node = ended_on; } return router_node; } static xmlNode * action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) { gboolean needs_node_info = TRUE; xmlNode *action_xml = NULL; xmlNode *args_xml = NULL; char *action_id_s = NULL; if (action == NULL) { return NULL; } if (safe_str_eq(action->task, CRM_OP_FENCE)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* needs_node_info = FALSE; */ } else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* } else if(safe_str_eq(action->task, RSC_PROBED)) { */ /* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */ } else if (is_set(action->flags, pe_action_pseudo)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT); needs_node_info = FALSE; } else { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); } action_id_s = crm_itoa(action->id); crm_xml_add(action_xml, XML_ATTR_ID, action_id_s); free(action_id_s); crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task); if (action->rsc != NULL && action->rsc->clone_name != NULL) { char *clone_key = NULL; const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); int interval = crm_parse_int(interval_s, "0"); if (safe_str_eq(action->task, RSC_NOTIFY)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid)); CRM_CHECK(n_task != NULL, crm_err("No notify operation value found for %s", action->uuid)); clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task); } else if(action->cancel_task) { clone_key = generate_op_key(action->rsc->clone_name, action->cancel_task, interval); } else { clone_key = generate_op_key(action->rsc->clone_name, action->task, interval); } CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid)); crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key); crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid); } if (needs_node_info && action->node != NULL) { node_t *router_node = get_router_node(action); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id); if (router_node) { crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname); } } if (as_input) { return action_xml; } if (action->rsc) { if (is_set(action->flags, pe_action_pseudo) == FALSE) { int lpc = 0; xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml)); const char *attr_list[] = { XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER, XML_ATTR_TYPE }; if (is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) { /* Do not use the 'instance free' name here as that * might interfere with the instance we plan to keep. * Ie. if there are more than two named /anonymous/ * instances on a given node, we need to make sure the * command goes to the right one. * * Keep this block, even when everyone is using * 'instance free' anonymous clone names - it means * we'll do the right thing if anyone toggles the * unique flag to 'off' */ crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } else if (is_not_set(action->rsc->flags, pe_rsc_unique)) { const char *xml_id = ID(action->rsc->xml); crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, action->rsc->clone_name); /* ID is what we'd like client to use * ID_LONG is what they might know it as instead * * ID_LONG is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the claus above instead */ crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); } else { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } } else { CRM_ASSERT(action->rsc->clone_name == NULL); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); } for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); } } } args_xml = create_xml_node(NULL, XML_TAG_ATTRS); crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if (action->rsc != NULL && action->node) { GHashTable *p = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); get_rsc_attributes(p, action->rsc, action->node, data_set); g_hash_table_foreach(p, hash2smartfield, args_xml); g_hash_table_destroy(p); } else if(action->rsc && action->rsc->variant <= pe_native) { g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (action->rsc != NULL) { int isolated = 0; resource_t *parent = action->rsc; while (parent != NULL) { isolated |= parent->isolation_wrapper ? 1 : 0; parent->cmds->append_meta(parent, args_xml); parent = parent->parent; } if (isolated && action->node) { char *nodeattr = crm_meta_name(XML_RSC_ATTR_ISOLATION_HOST); crm_xml_add(args_xml, nodeattr, action->node->details->uname); free(nodeattr); } } else if (safe_str_eq(action->task, CRM_OP_FENCE) && action->node) { g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml); } sorted_xml(args_xml, action_xml, FALSE); crm_log_xml_trace(action_xml, "dumped action"); free_xml(args_xml); return action_xml; } static gboolean should_dump_action(action_t * action) { CRM_CHECK(action != NULL, return FALSE); if (is_set(action->flags, pe_action_dumped)) { crm_trace("action %d (%s) was already dumped", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) { GListPtr lpc = NULL; /* This is a horrible but convenient hack * * It mimimizes the number of actions with unsatisfied inputs * (ie. not included in the graph) * * This in turn, means we can be more concise when printing * aborted/incomplete graphs. * * It also makes it obvious which node is preventing * probe_complete from running (presumably because it is only * partially up) * * For these reasons we tolerate such perversions */ for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (is_not_set(wrapper->action->flags, pe_action_runnable)) { /* Only interested in runnable operations */ } else if (safe_str_neq(wrapper->action->task, RSC_START)) { /* Only interested in start operations */ } else if (is_set(wrapper->action->flags, pe_action_dumped)) { crm_trace("action %d (%s) dependency of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } else if (should_dump_action(wrapper->action)) { crm_trace("action %d (%s) dependency of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } } } if (is_set(action->flags, pe_action_runnable) == FALSE) { crm_trace("action %d (%s) was not runnable", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_optional) && is_set(action->flags, pe_action_print_always) == FALSE) { crm_trace("action %d (%s) was optional", action->id, action->uuid); return FALSE; } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) { const char *interval = NULL; interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); /* make sure probes and recurring monitors go through */ if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) { crm_trace("action %d (%s) was for an unmanaged resource (%s)", action->id, action->uuid, action->rsc->id); return FALSE; } } if (is_set(action->flags, pe_action_pseudo) || safe_str_eq(action->task, CRM_OP_FENCE) || safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { /* skip the next checks */ return TRUE; } if (action->node == NULL) { pe_err("action %d (%s) was not allocated", action->id, action->uuid); log_action(LOG_DEBUG, "Unallocated action", action, FALSE); return FALSE; } else if (action->node->details->online == FALSE) { pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for offline node", action, FALSE); return FALSE; #if 0 /* but this would also affect resources that can be safely * migrated before a fencing op */ } else if (action->node->details->unclean == FALSE) { pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for unclean node", action, FALSE); return FALSE; #endif } return TRUE; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a; const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } return 0; } static gboolean -should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) +check_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) { int type = wrapper->type; + if (wrapper->state == pe_link_dumped) { + return TRUE; + + } else if (wrapper->state == pe_link_dup) { + return FALSE; + } + type &= ~pe_order_implies_first_printed; type &= ~pe_order_implies_then_printed; type &= ~pe_order_optional; if (wrapper->action->node && action->rsc && action->rsc->fillers && is_not_set(type, pe_order_preserve) && wrapper->action->node->details->remote_rsc && uber_parent(action->rsc) != uber_parent(wrapper->action->rsc) ) { /* This prevents user-defined ordering constraints between * resources in remote nodes and the resources that * define/represent a remote node. * * There is no known valid reason to allow this sort of thing * but if one arises, we'd need to change the * action->rsc->fillers clause to be more specific, possibly * to check that it contained wrapper->action->rsc */ crm_warn("Invalid ordering constraint between %s and %s", wrapper->action->rsc->id, action->rsc->id); wrapper->type = pe_order_none; return FALSE; } - wrapper->state = pe_link_not_dumped; if (last_action == wrapper->action->id) { crm_trace("Input (%d) %s duplicated for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); wrapper->state = pe_link_dup; return FALSE; } else if (wrapper->type == pe_order_none) { crm_trace("Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) { crm_trace("Input (%d) %s optional (ordering) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && is_set(type, pe_order_one_or_more)) { crm_trace("Input (%d) %s optional (one-or-more) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && (wrapper->type & pe_order_stonith_stop)) { crm_trace("Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if ((wrapper->type & pe_order_implies_first_migratable) && (is_set(wrapper->action->flags, pe_action_runnable) == FALSE)) { return FALSE; } else if ((wrapper->type & pe_order_apply_first_non_migratable) && (is_set(wrapper->action->flags, pe_action_migrate_runnable))) { return FALSE; } else if ((wrapper->type == pe_order_optional) && strstr(wrapper->action->uuid, "_stop_0") && is_set(wrapper->action->flags, pe_action_migrate_runnable)) { /* for optional only ordering, ordering is not preserved for * a stop action that is actually involved with a migration. */ return FALSE; + } else if (wrapper->type == pe_order_load) { crm_trace("check load filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) { - /* Remove the orders like the following if not needed or introducing transition loop: + /* Remove the orders like the following if not relevant: * "load_stopped_node2" -> "rscA_migrate_to node1" * which were created also from: pengine/native.c: MigrateRsc() * order_actions(other, then, other_w->type); */ /* For migrate_to ops, we care about where it has been * allocated to, not where the action will be executed */ if (wrapper->action->node == NULL || action->rsc->allocated_to == NULL || wrapper->action->node->details != action->rsc->allocated_to->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - migrate"); wrapper->type = pe_order_none; return FALSE; - - } else { - GListPtr lpc = NULL; - - for (lpc = wrapper->action->actions_before; lpc != NULL; lpc = lpc->next) { - action_wrapper_t *wrapper_before = (action_wrapper_t *) lpc->data; - - /* If there's any order like: - * "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1" - * rscA is being migrated from node1 to node2, - * while rscB is being migrated from node2 to node1. - * There will be potential transition loop. - * Break the order "load_stopped_node2" -> "rscA_migrate_to node1". - */ - - if (wrapper_before->type != pe_order_load - || is_set(wrapper_before->action->flags, pe_action_optional) - || is_not_set(wrapper_before->action->flags, pe_action_migrate_runnable) - || wrapper_before->action->node == NULL - || wrapper->action->node == NULL - || wrapper_before->action->node->details != wrapper->action->node->details) { - continue; - } - - if (wrapper_before->action->rsc - && wrapper_before->action->rsc->allocated_to - && action->node - && wrapper_before->action->rsc->allocated_to->details == action->node->details) { - - crm_trace("load filter - migrate loop"); - wrapper->type = pe_order_none; - return FALSE; - } - } } } else if (wrapper->action->node == NULL || action->node == NULL || wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - node"); wrapper->type = pe_order_none; return FALSE; } else if (is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("load filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->type == pe_order_anti_colocation) { crm_trace("check anti-colocation filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (wrapper->action->node && action->node && wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("anti-colocation filter - node"); wrapper->type = pe_order_none; return FALSE; } else if (is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("anti-colocation filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->action->rsc && wrapper->action->rsc != action->rsc && is_set(wrapper->action->rsc->flags, pe_rsc_failed) && is_not_set(wrapper->action->rsc->flags, pe_rsc_managed) && strstr(wrapper->action->uuid, "_stop_0") && action->rsc && action->rsc->variant >= pe_clone) { crm_warn("Ignoring requirement that %s complete before %s:" " unmanaged failed resources cannot prevent clone shutdown", wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_dumped) || should_dump_action(wrapper->action)) { crm_trace("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #if 0 } else if (is_set(wrapper->action->flags, pe_action_runnable) && is_set(wrapper->action->flags, pe_action_pseudo) && wrapper->action->rsc->variant != pe_native) { crm_crit("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #endif } else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE && is_set(wrapper->action->flags, pe_action_print_always) == FALSE) { crm_trace("Input (%d) %s optional for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type); return FALSE; } dump: + return TRUE; +} + +static gboolean +graph_has_loop(action_t * init_action, action_t * action, action_wrapper_t * wrapper) +{ + GListPtr lpc = NULL; + gboolean has_loop = FALSE; + + if (is_set(wrapper->action->flags, pe_action_tracking)) { + crm_trace("Breaking tracking loop: %s.%s -> %s.%s (0x%.6x)", + wrapper->action->uuid, + wrapper->action->node ? wrapper->action->node->details->uname : "", + action->uuid, + action->node ? action->node->details->uname : "", + wrapper->type); + return FALSE; + } + + if (check_dump_input(-1, action, wrapper) == FALSE) { + return FALSE; + } + + /* If there's any order like: + * "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1" + * rscA is being migrated from node1 to node2, + * while rscB is being migrated from node2 to node1. + * There will be potential graph loop. + * Break the order "load_stopped_node2" -> "rscA_migrate_to node1". + */ + + crm_trace("Checking graph loop: %s.%s -> %s.%s (0x%.6x)", + wrapper->action->uuid, + wrapper->action->node ? wrapper->action->node->details->uname : "", + action->uuid, + action->node ? action->node->details->uname : "", + wrapper->type); + + if (wrapper->action == init_action) { + crm_debug("Found graph loop: %s.%s ->...-> %s.%s", + action->uuid, + action->node ? action->node->details->uname : "", + init_action->uuid, + init_action->node ? init_action->node->details->uname : ""); + + return TRUE; + } + + set_bit(wrapper->action->flags, pe_action_tracking); + + for (lpc = wrapper->action->actions_before; lpc != NULL; lpc = lpc->next) { + action_wrapper_t *wrapper_before = (action_wrapper_t *) lpc->data; + + if (graph_has_loop(init_action, wrapper->action, wrapper_before)) { + has_loop = TRUE; + goto done; + } + } + +done: + clear_bit(wrapper->action->flags, pe_action_tracking); + + return has_loop; +} + +static gboolean +should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) +{ + wrapper->state = pe_link_not_dumped; + + if (check_dump_input(last_action, action, wrapper) == FALSE) { + return FALSE; + } + + if (wrapper->type == pe_order_load + && action->rsc + && safe_str_eq(action->task, RSC_MIGRATE)) { + crm_trace("Checking graph loop - load migrate: %s.%s -> %s.%s", + wrapper->action->uuid, + wrapper->action->node ? wrapper->action->node->details->uname : "", + action->uuid, + action->node ? action->node->details->uname : ""); + + if (graph_has_loop(action, action, wrapper)) { + /* Remove the orders like the following if they are introducing any graph loops: + * "load_stopped_node2" -> "rscA_migrate_to node1" + * which were created also from: pengine/native.c: MigrateRsc() + * order_actions(other, then, other_w->type); + */ + crm_debug("Breaking graph loop - load migrate: %s.%s -> %s.%s", + wrapper->action->uuid, + wrapper->action->node ? wrapper->action->node->details->uname : "", + action->uuid, + action->node ? action->node->details->uname : ""); + + wrapper->type = pe_order_none; + return FALSE; + } + } + crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid); return TRUE; } void graph_element_from_action(action_t * action, pe_working_set_t * data_set) { GListPtr lpc = NULL; int last_action = -1; int synapse_priority = 0; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; xmlNode *input = NULL; xmlNode *xml_action = NULL; if (should_dump_action(action) == FALSE) { return; } set_bit(action->flags, pe_action_dumped); syn = create_xml_node(data_set->graph, "synapse"); set = create_xml_node(syn, "action_set"); in = create_xml_node(syn, "inputs"); crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse); data_set->num_synapse++; if (action->rsc != NULL) { synapse_priority = action->rsc->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority); } xml_action = action2xml(action, FALSE, data_set); add_node_nocopy(set, crm_element_name(xml_action), xml_action); action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (should_dump_input(last_action, action, wrapper) == FALSE) { continue; } wrapper->state = pe_link_dumped; CRM_CHECK(last_action < wrapper->action->id,; ); last_action = wrapper->action->id; input = create_xml_node(in, "trigger"); xml_action = action2xml(wrapper->action, TRUE, data_set); add_node_nocopy(input, crm_element_name(xml_action), xml_action); } } diff --git a/pengine/regression.sh b/pengine/regression.sh index b1014a3bd4..58c5fbb80c 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,814 +1,815 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # core=`dirname $0` . $core/regression.core.sh || exit 1 create_mode="true" info Generating test outputs for these tests... # do_test file description info Done. echo "" info Performing the following tests from $io_dir create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" do_test group-fail "Ensure stop order is preserved for partially active groups" do_test group-unmanaged "No need to restart r115 because r114 is unmanaged" do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails" do_test group-dependants "Account for the location preferences of things colocated with a group" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" do_test orphan-2 "Orphan stop, remove failcount" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" do_test params-6 "Params: Detect reload in previously migrated resource" do_test nvpair-id-ref "Support id-ref in nvpair with optional name" do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" --rc 4 do_test standby "Standby" do_test comments "Comments" echo "" do_test one-or-more-0 "Everything starts" do_test one-or-more-1 "Nothing starts because of A" do_test one-or-more-2 "D can start because of C" do_test one-or-more-3 "D cannot start because of B and C" do_test one-or-more-4 "D cannot start because of target-role" do_test one-or-more-5 "Start A and F even though C and D are stopped" do_test one-or-more-6 "Leave A running even though B is stopped" do_test one-or-more-7 "Leave A running even though C is stopped" do_test bug-5140-require-all-false "Allow basegrp:0 to stop" do_test clone-require-all-1 "clone B starts node 3 and 4" do_test clone-require-all-2 "clone B remains stopped everywhere" do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere" do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining." do_test clone-require-all-5 "clone B starts on node 1 3 and 4" do_test clone-require-all-6 "clone B remains active after shutting down instances of A" do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B." do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B" do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B" do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another." do_test one-or-more-unrunnnable-instances "Avoid dependancies on instances that wont ever be started" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" do_test clone-order-16instances "Verify ordering of 16 cloned resources" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" do_test ordered-set-basic-startup "Constraint set with default order settings." do_test ordered-set-natural "Allow natural set ordering" do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop" do_test coloc_fp_logic "Verify floating point calculations in colocation are working" do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc." do_test colo_slave_w_native "cl#5070 - Verify promotion order is affected when colocating slave to native rsc." do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" do_test enforce-colo1 "Always enforce B with A INFINITY." do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)" echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " do_test per-node-attrs "Per node resource parameters" echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor" do_test stop-failure-no-quorum "Stop failure without quorum" do_test stop-failure-no-fencing "Stop failure without fencing available" do_test stop-failure-with-fencing "Stop failure with fencing available" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from" do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" do_test migrate-both-vms "Migrate two VMs that have no colocation" do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B." do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B" do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both" do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable" do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable" do_test 6-migrate-group "Advanced migrate logic, migrate a group" do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false" do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping" do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping" do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A" do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping" do_test a-promote-then-b-migrate "A promote then B start. migrate B" do_test a-demote-then-b-migrate "A demote then B stop. migrate B" #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" do_test bug-cl-5168 "Don't shuffle clones" do_test bug-cl-5170 "Prevent clone from starting with on-fail=block" do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block" do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" echo "" do_test unfence-startup "Clean unfencing" do_test unfence-definition "Unfencing when the agent changes" do_test unfence-parameters "Unfencing when the agent parameters changes" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" do_test master-demote-2 "Demote does not clear past failure" do_test master-move "Move master based on failure of colocated group" do_test master-probed-score "Observe the promotion score of probed resources" do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint" do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint" do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint" do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion." do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive" do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score" do_test master-demote-block "Block promotion if demote fails with on-fail=block" do_test master-dependant-ban "Don't stop instances from being active because a dependant is banned from that host" do_test master-stop "Stop instances due to location constraint with role=Started" do_test master-partially-demoted-group "Allow partially demoted group to finish demoting" do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced" do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted" do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering" do_test master-notify "Master promotion with notifies" echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource" do_test bug-5028-detach "Ensure detach still works" do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged " do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged " do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged " do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged " do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged" echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition" do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" do_test 696 "OSDL #696 - CRM starts stonith RA without monitor" do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id" do_test 829 "OSDL #829" do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test stonith-4 "Stonith node state" --rc 4 do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false" do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false" do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts." do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false" do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false." do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false." do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false" do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true" do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources." do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases" do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload" do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change." do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart" do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed." do_test failcount "Ensure failcounts are correctly expired" do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present" do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart" do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop" do_test bug-5059 "No need to restart p_stateful1:*" do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled." do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled." do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections" do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block" do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group" do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)." do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group." do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs." do_test probe-timeout "cl#5099 - Default probe timeout" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)" +do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" echo "" do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive" do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node" do_test colocated-utilization-group "Colocated Utilization - Group" do_test colocated-utilization-clone "Colocated Utilization - Clone" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" do_test node-maintenance-1 "cl#5128 - Node maintenance" do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)" do_test rsc-maintenance "Per-resource maintenance" echo "" do_test not-installed-agent "The resource agent is missing" do_test not-installed-tools "Something the resource agent needs is missing" echo "" do_test stopped-monitor-00 "Stopped Monitor - initial start" do_test stopped-monitor-01 "Stopped Monitor - failed started" do_test stopped-monitor-02 "Stopped Monitor - started multi-up" do_test stopped-monitor-03 "Stopped Monitor - stop started" do_test stopped-monitor-04 "Stopped Monitor - failed stop" do_test stopped-monitor-05 "Stopped Monitor - start unmanaged" do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08 "Stopped Monitor - migrate" do_test stopped-monitor-09 "Stopped Monitor - unmanage started" do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")" do_test stopped-monitor-20 "Stopped Monitor - initial stop" do_test stopped-monitor-21 "Stopped Monitor - stopped single-up" do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23 "Stopped Monitor - start stopped" do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role="Started")" do_test stopped-monitor-30 "Stopped Monitor - new node started" do_test stopped-monitor-31 "Stopped Monitor - new node stopped" echo"" do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)" do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)" do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)" do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)" do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)" do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)" do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)" do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)" do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)" do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)" do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)" do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)" do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)" do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)" do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)" do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)" do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)" do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)" do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)" do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)" do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)" do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)" do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)" do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)" do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)" do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)" do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)" do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)" do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)" do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)" do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)" do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)" do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)" do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)" do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)" do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)" do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)" do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)" do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)" do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)" do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)" do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)" do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)" do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)" do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)" do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)" do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)" do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)" do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)" do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)" do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)" do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)" do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)" do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)" do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)" do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)" do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)" do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)" do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)" do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)" do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)" do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)" do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)" do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)" do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)" do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)" do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)" do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)" do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)" do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)" do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)" do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)" do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)" do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)" do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)" do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)" do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)" do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)" do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)" do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)" do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)" do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)" do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)" do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)" do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)" do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)" do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)" do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)" do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)" do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)" do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)" do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)" do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)" echo "" do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)" do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)" do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)" do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)" do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)" do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)" do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)" do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)" do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)" do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)" do_test cluster-specific-params "Cluster-specific instance attributes based on rules" do_test site-specific-params "Site-specific instance attributes based on rules" echo "" do_test template-1 "Template - 1" do_test template-2 "Template - 2" do_test template-3 "Template - 3 (merge operations)" do_test template-coloc-1 "Template - Colocation 1" do_test template-coloc-2 "Template - Colocation 2" do_test template-coloc-3 "Template - Colocation 3" do_test template-order-1 "Template - Order 1" do_test template-order-2 "Template - Order 2" do_test template-order-3 "Template - Order 3" do_test template-ticket "Template - Ticket" do_test template-rsc-sets-1 "Template - Resource Sets 1" do_test template-rsc-sets-2 "Template - Resource Sets 2" do_test template-rsc-sets-3 "Template - Resource Sets 3" do_test template-rsc-sets-4 "Template - Resource Sets 4" do_test template-clone-primitive "Cloned primitive from template" do_test template-clone-group "Cloned group from template" do_test location-sets-templates "Resource sets and templates - Location" do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)" do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)" do_test tags-location "Tags - Location" do_test tags-ticket "Tags - Ticket" echo "" do_test container-1 "Container - initial" do_test container-2 "Container - monitor failed" do_test container-3 "Container - stop failed" do_test container-4 "Container - reached migration-threshold" do_test container-group-1 "Container in group - initial" do_test container-group-2 "Container in group - monitor failed" do_test container-group-3 "Container in group - stop failed" do_test container-group-4 "Container in group - reached migration-threshold" do_test container-is-remote-node "Place resource within container when container is remote-node" do_test bug-rh-1097457 "Kill user defined container/contents ordering" echo "" do_test whitebox-fail1 "Fail whitebox container rsc." do_test whitebox-fail2 "Fail whitebox container rsc lrmd connection." do_test whitebox-fail3 "Failed containers should not run nested on remote nodes." do_test whitebox-start "Start whitebox container with resources assigned to it" do_test whitebox-stop "Stop whitebox container with resources assigned to it" do_test whitebox-move "Move whitebox container with resources assigned to it" do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource" do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established" do_test whitebox-orphaned "Properly shutdown orphaned whitebox container" do_test whitebox-orphan-ms "Properly tear down orphan ms resources on remote-nodes" do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start." do_test whitebox-migrate1 "Migrate both container and connection resource" do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced" echo "" do_test remote-startup-probes "Baremetal remote-node startup probes" do_test remote-startup "Startup a newly discovered remote-nodes with no status." do_test remote-fence-unclean "Fence unclean baremetal remote-node" do_test remote-fence-unclean2 "Fence baremetal remote-node after cluster node fails and connection can not be recovered" do_test remote-move "Move remote-node connection resource" do_test remote-disable "Disable a baremetal remote-node" do_test remote-orphaned "Properly shutdown orphaned connection resource" do_test remote-orphaned2 "verify we can handle orphaned remote connections with active resources on the remote" do_test remote-recover "Recover connection resource after cluster-node fails." do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section" do_test remote-partial-migrate "Make sure partial migrations are handled before ops on the remote node." do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection." do_test remote-recover-fail "Make sure start failure causes fencing if rsc are active on remote." do_test remote-start-fail "Make sure a start failure does not result in fencing if no active resources are on remote." do_test remote-unclean2 "Make monitor failure always results in fencing, even if no rsc are active on remote." echo "" do_test resource-discovery "Exercises resource-discovery location constraint option." do_test rsc-discovery-per-node "Disable resource discovery per node" echo "" do_test isolation-start-all "Start docker isolated resources." do_test isolation-restart-all "Restart docker isolated resources." do_test isolation-clone "Cloned isolated primitive." echo "" test_results diff --git a/pengine/test10/load-stopped-loop-2.dot b/pengine/test10/load-stopped-loop-2.dot new file mode 100644 index 0000000000..2252dcf448 --- /dev/null +++ b/pengine/test10/load-stopped-loop-2.dot @@ -0,0 +1,118 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange"] +"load_stopped_xfc0 xfc0" -> "xu-test3_start_0 xfc0" [ style = bold] +"load_stopped_xfc0 xfc0" -> "xu-test7_migrate_to_0 xfc1" [ style = bold] +"load_stopped_xfc0 xfc0" -> "xu-test7_start_0 xfc0" [ style = bold] +"load_stopped_xfc0 xfc0" [ style=bold color="green" fontcolor="orange"] +"load_stopped_xfc1 xfc1" -> "xu-test2_start_0 xfc1" [ style = bold] +"load_stopped_xfc1 xfc1" -> "xu-test6_migrate_to_0 xfc3" [ style = bold] +"load_stopped_xfc1 xfc1" -> "xu-test6_start_0 xfc1" [ style = bold] +"load_stopped_xfc1 xfc1" [ style=bold color="green" fontcolor="orange"] +"load_stopped_xfc2 xfc2" -> "xu-test4_start_0 xfc2" [ style = bold] +"load_stopped_xfc2 xfc2" -> "xu-test9_migrate_to_0 xfc0" [ style = bold] +"load_stopped_xfc2 xfc2" -> "xu-test9_start_0 xfc2" [ style = bold] +"load_stopped_xfc2 xfc2" [ style=bold color="green" fontcolor="orange"] +"load_stopped_xfc3 xfc3" -> "xu-test12_start_0 xfc3" [ style = bold] +"load_stopped_xfc3 xfc3" -> "xu-test13_start_0 xfc3" [ style = bold] +"load_stopped_xfc3 xfc3" -> "xu-test5_start_0 xfc3" [ style = bold] +"load_stopped_xfc3 xfc3" [ style=bold color="green" fontcolor="orange"] +"xu-test12_migrate_from_0 xfc3" -> "xu-test12_start_0 xfc3" [ style = bold] +"xu-test12_migrate_from_0 xfc3" -> "xu-test12_stop_0 xfc2" [ style = bold] +"xu-test12_migrate_from_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test12_migrate_to_0 xfc2" -> "xu-test12_migrate_from_0 xfc3" [ style = bold] +"xu-test12_migrate_to_0 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test12_monitor_10000 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test12_start_0 xfc3" -> "xu-test12_monitor_10000 xfc3" [ style = bold] +"xu-test12_start_0 xfc3" [ style=bold color="green" fontcolor="orange"] +"xu-test12_stop_0 xfc2" -> "all_stopped" [ style = bold] +"xu-test12_stop_0 xfc2" -> "load_stopped_xfc2 xfc2" [ style = bold] +"xu-test12_stop_0 xfc2" -> "xu-test12_start_0 xfc3" [ style = bold] +"xu-test12_stop_0 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test13_monitor_10000 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test13_start_0 xfc3" -> "xu-test13_monitor_10000 xfc3" [ style = bold] +"xu-test13_start_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test2_migrate_from_0 xfc1" -> "xu-test2_start_0 xfc1" [ style = bold] +"xu-test2_migrate_from_0 xfc1" -> "xu-test2_stop_0 xfc3" [ style = bold] +"xu-test2_migrate_from_0 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test2_migrate_to_0 xfc3" -> "xu-test2_migrate_from_0 xfc1" [ style = bold] +"xu-test2_migrate_to_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test2_monitor_10000 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test2_start_0 xfc1" -> "xu-test2_monitor_10000 xfc1" [ style = bold] +"xu-test2_start_0 xfc1" [ style=bold color="green" fontcolor="orange"] +"xu-test2_stop_0 xfc3" -> "all_stopped" [ style = bold] +"xu-test2_stop_0 xfc3" -> "load_stopped_xfc3 xfc3" [ style = bold] +"xu-test2_stop_0 xfc3" -> "xu-test2_start_0 xfc1" [ style = bold] +"xu-test2_stop_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test3_migrate_from_0 xfc0" -> "xu-test3_start_0 xfc0" [ style = bold] +"xu-test3_migrate_from_0 xfc0" -> "xu-test3_stop_0 xfc1" [ style = bold] +"xu-test3_migrate_from_0 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test3_migrate_to_0 xfc1" -> "xu-test3_migrate_from_0 xfc0" [ style = bold] +"xu-test3_migrate_to_0 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test3_monitor_10000 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test3_start_0 xfc0" -> "xu-test3_monitor_10000 xfc0" [ style = bold] +"xu-test3_start_0 xfc0" [ style=bold color="green" fontcolor="orange"] +"xu-test3_stop_0 xfc1" -> "all_stopped" [ style = bold] +"xu-test3_stop_0 xfc1" -> "load_stopped_xfc1 xfc1" [ style = bold] +"xu-test3_stop_0 xfc1" -> "xu-test3_start_0 xfc0" [ style = bold] +"xu-test3_stop_0 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test4_migrate_from_0 xfc2" -> "xu-test4_start_0 xfc2" [ style = bold] +"xu-test4_migrate_from_0 xfc2" -> "xu-test4_stop_0 xfc0" [ style = bold] +"xu-test4_migrate_from_0 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test4_migrate_to_0 xfc0" -> "xu-test4_migrate_from_0 xfc2" [ style = bold] +"xu-test4_migrate_to_0 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test4_monitor_10000 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test4_start_0 xfc2" -> "xu-test4_monitor_10000 xfc2" [ style = bold] +"xu-test4_start_0 xfc2" [ style=bold color="green" fontcolor="orange"] +"xu-test4_stop_0 xfc0" -> "all_stopped" [ style = bold] +"xu-test4_stop_0 xfc0" -> "load_stopped_xfc0 xfc0" [ style = bold] +"xu-test4_stop_0 xfc0" -> "xu-test4_start_0 xfc2" [ style = bold] +"xu-test4_stop_0 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test5_migrate_from_0 xfc3" -> "xu-test5_start_0 xfc3" [ style = bold] +"xu-test5_migrate_from_0 xfc3" -> "xu-test5_stop_0 xfc2" [ style = bold] +"xu-test5_migrate_from_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test5_migrate_to_0 xfc2" -> "xu-test5_migrate_from_0 xfc3" [ style = bold] +"xu-test5_migrate_to_0 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test5_monitor_10000 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test5_start_0 xfc3" -> "xu-test5_monitor_10000 xfc3" [ style = bold] +"xu-test5_start_0 xfc3" [ style=bold color="green" fontcolor="orange"] +"xu-test5_stop_0 xfc2" -> "all_stopped" [ style = bold] +"xu-test5_stop_0 xfc2" -> "load_stopped_xfc2 xfc2" [ style = bold] +"xu-test5_stop_0 xfc2" -> "xu-test5_start_0 xfc3" [ style = bold] +"xu-test5_stop_0 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test6_migrate_from_0 xfc1" -> "xu-test6_start_0 xfc1" [ style = bold] +"xu-test6_migrate_from_0 xfc1" -> "xu-test6_stop_0 xfc3" [ style = bold] +"xu-test6_migrate_from_0 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test6_migrate_to_0 xfc3" -> "xu-test6_migrate_from_0 xfc1" [ style = bold] +"xu-test6_migrate_to_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test6_monitor_10000 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test6_start_0 xfc1" -> "xu-test6_monitor_10000 xfc1" [ style = bold] +"xu-test6_start_0 xfc1" [ style=bold color="green" fontcolor="orange"] +"xu-test6_stop_0 xfc3" -> "all_stopped" [ style = bold] +"xu-test6_stop_0 xfc3" -> "load_stopped_xfc3 xfc3" [ style = bold] +"xu-test6_stop_0 xfc3" -> "xu-test6_start_0 xfc1" [ style = bold] +"xu-test6_stop_0 xfc3" [ style=bold color="green" fontcolor="black"] +"xu-test7_migrate_from_0 xfc0" -> "xu-test7_start_0 xfc0" [ style = bold] +"xu-test7_migrate_from_0 xfc0" -> "xu-test7_stop_0 xfc1" [ style = bold] +"xu-test7_migrate_from_0 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test7_migrate_to_0 xfc1" -> "xu-test7_migrate_from_0 xfc0" [ style = bold] +"xu-test7_migrate_to_0 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test7_monitor_10000 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test7_start_0 xfc0" -> "xu-test7_monitor_10000 xfc0" [ style = bold] +"xu-test7_start_0 xfc0" [ style=bold color="green" fontcolor="orange"] +"xu-test7_stop_0 xfc1" -> "all_stopped" [ style = bold] +"xu-test7_stop_0 xfc1" -> "load_stopped_xfc1 xfc1" [ style = bold] +"xu-test7_stop_0 xfc1" -> "xu-test7_start_0 xfc0" [ style = bold] +"xu-test7_stop_0 xfc1" [ style=bold color="green" fontcolor="black"] +"xu-test9_migrate_from_0 xfc2" -> "xu-test9_start_0 xfc2" [ style = bold] +"xu-test9_migrate_from_0 xfc2" -> "xu-test9_stop_0 xfc0" [ style = bold] +"xu-test9_migrate_from_0 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test9_migrate_to_0 xfc0" -> "xu-test9_migrate_from_0 xfc2" [ style = bold] +"xu-test9_migrate_to_0 xfc0" [ style=bold color="green" fontcolor="black"] +"xu-test9_monitor_10000 xfc2" [ style=bold color="green" fontcolor="black"] +"xu-test9_start_0 xfc2" -> "xu-test9_monitor_10000 xfc2" [ style = bold] +"xu-test9_start_0 xfc2" [ style=bold color="green" fontcolor="orange"] +"xu-test9_stop_0 xfc0" -> "all_stopped" [ style = bold] +"xu-test9_stop_0 xfc0" -> "load_stopped_xfc0 xfc0" [ style = bold] +"xu-test9_stop_0 xfc0" -> "xu-test9_start_0 xfc2" [ style = bold] +"xu-test9_stop_0 xfc0" [ style=bold color="green" fontcolor="black"] +} diff --git a/pengine/test10/load-stopped-loop-2.exp b/pengine/test10/load-stopped-loop-2.exp new file mode 100644 index 0000000000..3b8913303a --- /dev/null +++ b/pengine/test10/load-stopped-loop-2.exp @@ -0,0 +1,661 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/load-stopped-loop-2.scores b/pengine/test10/load-stopped-loop-2.scores new file mode 100644 index 0000000000..10f120f8cc --- /dev/null +++ b/pengine/test10/load-stopped-loop-2.scores @@ -0,0 +1,141 @@ +Allocation scores: +clone_color: cl_glusterd allocation score on xfc0: 0 +clone_color: cl_glusterd allocation score on xfc1: 0 +clone_color: cl_glusterd allocation score on xfc2: 0 +clone_color: cl_glusterd allocation score on xfc3: INFINITY +clone_color: cl_p_bl_glusterfs allocation score on xfc0: 0 +clone_color: cl_p_bl_glusterfs allocation score on xfc1: 0 +clone_color: cl_p_bl_glusterfs allocation score on xfc2: 0 +clone_color: cl_p_bl_glusterfs allocation score on xfc3: INFINITY +clone_color: p_bl_glusterfs:0 allocation score on xfc0: 0 +clone_color: p_bl_glusterfs:0 allocation score on xfc1: 0 +clone_color: p_bl_glusterfs:0 allocation score on xfc2: 0 +clone_color: p_bl_glusterfs:0 allocation score on xfc3: 1 +clone_color: p_bl_glusterfs:1 allocation score on xfc0: 1 +clone_color: p_bl_glusterfs:1 allocation score on xfc1: 0 +clone_color: p_bl_glusterfs:1 allocation score on xfc2: 0 +clone_color: p_bl_glusterfs:1 allocation score on xfc3: 0 +clone_color: p_bl_glusterfs:2 allocation score on xfc0: 0 +clone_color: p_bl_glusterfs:2 allocation score on xfc1: 1 +clone_color: p_bl_glusterfs:2 allocation score on xfc2: 0 +clone_color: p_bl_glusterfs:2 allocation score on xfc3: 0 +clone_color: p_bl_glusterfs:3 allocation score on xfc0: 0 +clone_color: p_bl_glusterfs:3 allocation score on xfc1: 0 +clone_color: p_bl_glusterfs:3 allocation score on xfc2: 1 +clone_color: p_bl_glusterfs:3 allocation score on xfc3: 0 +clone_color: p_glusterd:0 allocation score on xfc0: 0 +clone_color: p_glusterd:0 allocation score on xfc1: 0 +clone_color: p_glusterd:0 allocation score on xfc2: 0 +clone_color: p_glusterd:0 allocation score on xfc3: 1 +clone_color: p_glusterd:1 allocation score on xfc0: 1 +clone_color: p_glusterd:1 allocation score on xfc1: 0 +clone_color: p_glusterd:1 allocation score on xfc2: 0 +clone_color: p_glusterd:1 allocation score on xfc3: 0 +clone_color: p_glusterd:2 allocation score on xfc0: 0 +clone_color: p_glusterd:2 allocation score on xfc1: 1 +clone_color: p_glusterd:2 allocation score on xfc2: 0 +clone_color: p_glusterd:2 allocation score on xfc3: 0 +clone_color: p_glusterd:3 allocation score on xfc0: 0 +clone_color: p_glusterd:3 allocation score on xfc1: 0 +clone_color: p_glusterd:3 allocation score on xfc2: 1 +clone_color: p_glusterd:3 allocation score on xfc3: 0 +native_color: p_bl_glusterfs:0 allocation score on xfc0: -INFINITY +native_color: p_bl_glusterfs:0 allocation score on xfc1: -INFINITY +native_color: p_bl_glusterfs:0 allocation score on xfc2: -INFINITY +native_color: p_bl_glusterfs:0 allocation score on xfc3: 1 +native_color: p_bl_glusterfs:1 allocation score on xfc0: 1 +native_color: p_bl_glusterfs:1 allocation score on xfc1: -INFINITY +native_color: p_bl_glusterfs:1 allocation score on xfc2: -INFINITY +native_color: p_bl_glusterfs:1 allocation score on xfc3: -INFINITY +native_color: p_bl_glusterfs:2 allocation score on xfc0: -INFINITY +native_color: p_bl_glusterfs:2 allocation score on xfc1: 1 +native_color: p_bl_glusterfs:2 allocation score on xfc2: -INFINITY +native_color: p_bl_glusterfs:2 allocation score on xfc3: -INFINITY +native_color: p_bl_glusterfs:3 allocation score on xfc0: -INFINITY +native_color: p_bl_glusterfs:3 allocation score on xfc1: -INFINITY +native_color: p_bl_glusterfs:3 allocation score on xfc2: 1 +native_color: p_bl_glusterfs:3 allocation score on xfc3: -INFINITY +native_color: p_glusterd:0 allocation score on xfc0: 0 +native_color: p_glusterd:0 allocation score on xfc1: 0 +native_color: p_glusterd:0 allocation score on xfc2: 0 +native_color: p_glusterd:0 allocation score on xfc3: 1 +native_color: p_glusterd:1 allocation score on xfc0: 1 +native_color: p_glusterd:1 allocation score on xfc1: -INFINITY +native_color: p_glusterd:1 allocation score on xfc2: 0 +native_color: p_glusterd:1 allocation score on xfc3: -INFINITY +native_color: p_glusterd:2 allocation score on xfc0: 0 +native_color: p_glusterd:2 allocation score on xfc1: 1 +native_color: p_glusterd:2 allocation score on xfc2: 0 +native_color: p_glusterd:2 allocation score on xfc3: -INFINITY +native_color: p_glusterd:3 allocation score on xfc0: -INFINITY +native_color: p_glusterd:3 allocation score on xfc1: -INFINITY +native_color: p_glusterd:3 allocation score on xfc2: 1 +native_color: p_glusterd:3 allocation score on xfc3: -INFINITY +native_color: xu-test1 allocation score on xfc0: 0 +native_color: xu-test1 allocation score on xfc1: 0 +native_color: xu-test1 allocation score on xfc2: 0 +native_color: xu-test1 allocation score on xfc3: 0 +native_color: xu-test10 allocation score on xfc0: 0 +native_color: xu-test10 allocation score on xfc1: 0 +native_color: xu-test10 allocation score on xfc2: 0 +native_color: xu-test10 allocation score on xfc3: 0 +native_color: xu-test11 allocation score on xfc0: 0 +native_color: xu-test11 allocation score on xfc1: 0 +native_color: xu-test11 allocation score on xfc2: 0 +native_color: xu-test11 allocation score on xfc3: 0 +native_color: xu-test12 allocation score on xfc0: 0 +native_color: xu-test12 allocation score on xfc1: 0 +native_color: xu-test12 allocation score on xfc2: 0 +native_color: xu-test12 allocation score on xfc3: 0 +native_color: xu-test13 allocation score on xfc0: 0 +native_color: xu-test13 allocation score on xfc1: 0 +native_color: xu-test13 allocation score on xfc2: 0 +native_color: xu-test13 allocation score on xfc3: 0 +native_color: xu-test14 allocation score on xfc0: -INFINITY +native_color: xu-test14 allocation score on xfc1: -INFINITY +native_color: xu-test14 allocation score on xfc2: -INFINITY +native_color: xu-test14 allocation score on xfc3: -INFINITY +native_color: xu-test15 allocation score on xfc0: -INFINITY +native_color: xu-test15 allocation score on xfc1: -INFINITY +native_color: xu-test15 allocation score on xfc2: -INFINITY +native_color: xu-test15 allocation score on xfc3: -INFINITY +native_color: xu-test16 allocation score on xfc0: -INFINITY +native_color: xu-test16 allocation score on xfc1: -INFINITY +native_color: xu-test16 allocation score on xfc2: -INFINITY +native_color: xu-test16 allocation score on xfc3: -INFINITY +native_color: xu-test17 allocation score on xfc0: -INFINITY +native_color: xu-test17 allocation score on xfc1: -INFINITY +native_color: xu-test17 allocation score on xfc2: -INFINITY +native_color: xu-test17 allocation score on xfc3: -INFINITY +native_color: xu-test2 allocation score on xfc0: 0 +native_color: xu-test2 allocation score on xfc1: 0 +native_color: xu-test2 allocation score on xfc2: 0 +native_color: xu-test2 allocation score on xfc3: 0 +native_color: xu-test3 allocation score on xfc0: 0 +native_color: xu-test3 allocation score on xfc1: 0 +native_color: xu-test3 allocation score on xfc2: 0 +native_color: xu-test3 allocation score on xfc3: 0 +native_color: xu-test4 allocation score on xfc0: 0 +native_color: xu-test4 allocation score on xfc1: 0 +native_color: xu-test4 allocation score on xfc2: 0 +native_color: xu-test4 allocation score on xfc3: 0 +native_color: xu-test5 allocation score on xfc0: 0 +native_color: xu-test5 allocation score on xfc1: 0 +native_color: xu-test5 allocation score on xfc2: 0 +native_color: xu-test5 allocation score on xfc3: 0 +native_color: xu-test6 allocation score on xfc0: 0 +native_color: xu-test6 allocation score on xfc1: 0 +native_color: xu-test6 allocation score on xfc2: 0 +native_color: xu-test6 allocation score on xfc3: 0 +native_color: xu-test7 allocation score on xfc0: 0 +native_color: xu-test7 allocation score on xfc1: 0 +native_color: xu-test7 allocation score on xfc2: 0 +native_color: xu-test7 allocation score on xfc3: 0 +native_color: xu-test8 allocation score on xfc0: 0 +native_color: xu-test8 allocation score on xfc1: 0 +native_color: xu-test8 allocation score on xfc2: 0 +native_color: xu-test8 allocation score on xfc3: INFINITY +native_color: xu-test9 allocation score on xfc0: 0 +native_color: xu-test9 allocation score on xfc1: 0 +native_color: xu-test9 allocation score on xfc2: 0 +native_color: xu-test9 allocation score on xfc3: 0 diff --git a/pengine/test10/load-stopped-loop-2.summary b/pengine/test10/load-stopped-loop-2.summary new file mode 100644 index 0000000000..522b568616 --- /dev/null +++ b/pengine/test10/load-stopped-loop-2.summary @@ -0,0 +1,111 @@ + +Current cluster status: +Online: [ xfc0 xfc1 xfc2 xfc3 ] + + Clone Set: cl_glusterd [p_glusterd] + Started: [ xfc0 xfc1 xfc2 xfc3 ] + Clone Set: cl_p_bl_glusterfs [p_bl_glusterfs] + Started: [ xfc0 xfc1 xfc2 xfc3 ] + xu-test8 (ocf::heartbeat:Xen): Started xfc3 + xu-test1 (ocf::heartbeat:Xen): Started xfc3 + xu-test10 (ocf::heartbeat:Xen): Started xfc3 + xu-test11 (ocf::heartbeat:Xen): Started xfc3 + xu-test12 (ocf::heartbeat:Xen): Started xfc2 + xu-test13 (ocf::heartbeat:Xen): Stopped + xu-test14 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test15 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test16 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test17 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test2 (ocf::heartbeat:Xen): Started xfc3 + xu-test3 (ocf::heartbeat:Xen): Started xfc1 + xu-test4 (ocf::heartbeat:Xen): Started xfc0 + xu-test5 (ocf::heartbeat:Xen): Started xfc2 + xu-test6 (ocf::heartbeat:Xen): Started xfc3 + xu-test7 (ocf::heartbeat:Xen): Started xfc1 + xu-test9 (ocf::heartbeat:Xen): Started xfc0 + +Transition Summary: + * Migrate xu-test12 (Started xfc2 -> xfc3) + * Migrate xu-test2 (Started xfc3 -> xfc1) + * Migrate xu-test3 (Started xfc1 -> xfc0) + * Migrate xu-test4 (Started xfc0 -> xfc2) + * Migrate xu-test5 (Started xfc2 -> xfc3) + * Migrate xu-test6 (Started xfc3 -> xfc1) + * Migrate xu-test7 (Started xfc1 -> xfc0) + * Migrate xu-test9 (Started xfc0 -> xfc2) + * Start xu-test13 (xfc3) + +Executing cluster transition: + * Resource action: xu-test12 migrate_to on xfc2 + * Resource action: xu-test2 migrate_to on xfc3 + * Resource action: xu-test3 migrate_to on xfc1 + * Resource action: xu-test4 migrate_to on xfc0 + * Resource action: xu-test5 migrate_to on xfc2 + * Resource action: xu-test12 migrate_from on xfc3 + * Resource action: xu-test12 stop on xfc2 + * Resource action: xu-test2 migrate_from on xfc1 + * Resource action: xu-test2 stop on xfc3 + * Resource action: xu-test3 migrate_from on xfc0 + * Resource action: xu-test3 stop on xfc1 + * Resource action: xu-test4 migrate_from on xfc2 + * Resource action: xu-test4 stop on xfc0 + * Resource action: xu-test5 migrate_from on xfc3 + * Resource action: xu-test5 stop on xfc2 + * Pseudo action: load_stopped_xfc2 + * Pseudo action: xu-test4_start_0 + * Resource action: xu-test9 migrate_to on xfc0 + * Resource action: xu-test4 monitor=10000 on xfc2 + * Resource action: xu-test9 migrate_from on xfc2 + * Resource action: xu-test9 stop on xfc0 + * Pseudo action: load_stopped_xfc0 + * Pseudo action: xu-test3_start_0 + * Resource action: xu-test7 migrate_to on xfc1 + * Pseudo action: xu-test9_start_0 + * Resource action: xu-test3 monitor=10000 on xfc0 + * Resource action: xu-test7 migrate_from on xfc0 + * Resource action: xu-test7 stop on xfc1 + * Resource action: xu-test9 monitor=10000 on xfc2 + * Pseudo action: load_stopped_xfc1 + * Pseudo action: xu-test2_start_0 + * Resource action: xu-test6 migrate_to on xfc3 + * Pseudo action: xu-test7_start_0 + * Resource action: xu-test2 monitor=10000 on xfc1 + * Resource action: xu-test6 migrate_from on xfc1 + * Resource action: xu-test6 stop on xfc3 + * Resource action: xu-test7 monitor=10000 on xfc0 + * Pseudo action: load_stopped_xfc3 + * Pseudo action: all_stopped + * Pseudo action: xu-test12_start_0 + * Pseudo action: xu-test5_start_0 + * Pseudo action: xu-test6_start_0 + * Resource action: xu-test13 start on xfc3 + * Resource action: xu-test12 monitor=10000 on xfc3 + * Resource action: xu-test5 monitor=10000 on xfc3 + * Resource action: xu-test6 monitor=10000 on xfc1 + * Resource action: xu-test13 monitor=10000 on xfc3 + +Revised cluster status: +Online: [ xfc0 xfc1 xfc2 xfc3 ] + + Clone Set: cl_glusterd [p_glusterd] + Started: [ xfc0 xfc1 xfc2 xfc3 ] + Clone Set: cl_p_bl_glusterfs [p_bl_glusterfs] + Started: [ xfc0 xfc1 xfc2 xfc3 ] + xu-test8 (ocf::heartbeat:Xen): Started xfc3 + xu-test1 (ocf::heartbeat:Xen): Started xfc3 + xu-test10 (ocf::heartbeat:Xen): Started xfc3 + xu-test11 (ocf::heartbeat:Xen): Started xfc3 + xu-test12 (ocf::heartbeat:Xen): Started xfc3 + xu-test13 (ocf::heartbeat:Xen): Started xfc3 + xu-test14 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test15 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test16 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test17 (ocf::heartbeat:Xen): (target-role:Stopped) Stopped + xu-test2 (ocf::heartbeat:Xen): Started xfc1 + xu-test3 (ocf::heartbeat:Xen): Started xfc0 + xu-test4 (ocf::heartbeat:Xen): Started xfc2 + xu-test5 (ocf::heartbeat:Xen): Started xfc3 + xu-test6 (ocf::heartbeat:Xen): Started xfc1 + xu-test7 (ocf::heartbeat:Xen): Started xfc0 + xu-test9 (ocf::heartbeat:Xen): Started xfc2 + diff --git a/pengine/test10/load-stopped-loop-2.xml b/pengine/test10/load-stopped-loop-2.xml new file mode 100644 index 0000000000..3b608a1ff0 --- /dev/null +++ b/pengine/test10/load-stopped-loop-2.xml @@ -0,0 +1,814 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/load-stopped-loop.dot b/pengine/test10/load-stopped-loop.dot index 06d3f6f259..28079d499f 100644 --- a/pengine/test10/load-stopped-loop.dot +++ b/pengine/test10/load-stopped-loop.dot @@ -1,66 +1,67 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange"] "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" -> "license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style = bold] "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style=bold color="green" fontcolor="black"] "license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" -> "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style = bold] "license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" [ style=bold color="green" fontcolor="black"] "license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"] "license.anbriz.vds-ok.com-vm_start_0 v03-a" -> "license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold] "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="orange"] "license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "all_stopped" [ style = bold] "license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] "license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "load_stopped_v03-b v03-b" [ style = bold] "license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style=bold color="green" fontcolor="black"] "load_stopped_mgmt01 mgmt01" [ style=bold color="green" fontcolor="orange"] "load_stopped_v03-a v03-a" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] "load_stopped_v03-a v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style = bold] "load_stopped_v03-a v03-a" [ style=bold color="green" fontcolor="orange"] +"load_stopped_v03-b v03-b" -> "terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" [ style = bold] "load_stopped_v03-b v03-b" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] "load_stopped_v03-b v03-b" [ style=bold color="green" fontcolor="orange"] "stonith-v03-a_monitor_60000 v03-b" [ style=bold color="green" fontcolor="black"] "stonith-v03-a_start_0 v03-b" -> "stonith-v03-a_monitor_60000 v03-b" [ style = bold] "stonith-v03-a_start_0 v03-b" [ style=bold color="green" fontcolor="black"] "stonith-v03-a_stop_0 v03-b" -> "all_stopped" [ style = bold] "stonith-v03-a_stop_0 v03-b" -> "stonith-v03-a_start_0 v03-b" [ style = bold] "stonith-v03-a_stop_0 v03-b" [ style=bold color="green" fontcolor="black"] "stonith-v03-b_monitor_60000 v03-a" [ style=bold color="green" fontcolor="black"] "stonith-v03-b_start_0 v03-a" -> "stonith-v03-b_monitor_60000 v03-a" [ style = bold] "stonith-v03-b_start_0 v03-a" [ style=bold color="green" fontcolor="black"] "stonith-v03-b_stop_0 v03-a" -> "all_stopped" [ style = bold] "stonith-v03-b_stop_0 v03-a" -> "stonith-v03-b_start_0 v03-a" [ style = bold] "stonith-v03-b_stop_0 v03-a" [ style=bold color="green" fontcolor="black"] "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style=bold color="green" fontcolor="black"] "terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" [ style=bold color="green" fontcolor="black"] "terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style=bold color="green" fontcolor="black"] "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style=bold color="green" fontcolor="orange"] "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "all_stopped" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "load_stopped_v03-a v03-a" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style=bold color="green" fontcolor="black"] "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"] "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold] "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-0-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style = bold] "vds-ok-pool-0-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-0-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style = bold] "vds-ok-pool-0-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-0-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style = bold] "vds-ok-pool-0-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-1-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style = bold] "vds-ok-pool-1-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-1-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style = bold] "vds-ok-pool-1-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"] "vds-ok-pool-1-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style = bold] "vds-ok-pool-1-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"] } diff --git a/pengine/test10/load-stopped-loop.exp b/pengine/test10/load-stopped-loop.exp index a259834028..a124019cb9 100644 --- a/pengine/test10/load-stopped-loop.exp +++ b/pengine/test10/load-stopped-loop.exp @@ -1,416 +1,420 @@ - + + + + + diff --git a/pengine/test10/load-stopped-loop.summary b/pengine/test10/load-stopped-loop.summary index feb93fa2a5..ffcae1f8d8 100644 --- a/pengine/test10/load-stopped-loop.summary +++ b/pengine/test10/load-stopped-loop.summary @@ -1,334 +1,334 @@ Current cluster status: Online: [ mgmt01 v03-a v03-b ] stonith-v02-a (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v02-b (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v02-c (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v02-d (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-mgmt01 (stonith:fence_xvm): Started v03-b stonith-mgmt02 (stonith:meatware): Started mgmt01 stonith-v03-c (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v03-a (stonith:fence_ipmilan): Started v03-b stonith-v03-b (stonith:fence_ipmilan): Started v03-a stonith-v03-d (stonith:fence_ipmilan): (target-role:Stopped) Stopped Clone Set: cl-clvmd [clvmd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-dlm [dlm] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-iscsid [iscsid] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirtd [libvirtd] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-multipathd [multipathd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-node-params [node-params] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan1-if [vlan1-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan101-if [vlan101-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan102-if [vlan102-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan103-if [vlan103-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan104-if [vlan104-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan3-if [vlan3-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan4-if [vlan4-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan5-if [vlan5-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan900-if [vlan900-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan909-if [vlan909-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-libvirt-images-fs [libvirt-images-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-install-fs [libvirt-install-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-images-pool [libvirt-images-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped Clone Set: cl-vlan200-if [vlan200-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped gotin-bbb-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped maxb-c55-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped metae.ru-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b Clone Set: cl-mcast-test-net [mcast-test-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-libvirt-qpid [libvirt-qpid] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-gleb-net [mcast-gleb-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Transition Summary: * Reload vds-ok-pool-0-iscsi:0 (Started mgmt01) * Reload vds-ok-pool-0-iscsi:1 (Started v03-b) * Reload vds-ok-pool-0-iscsi:2 (Started v03-a) * Reload vds-ok-pool-1-iscsi:0 (Started mgmt01) * Reload vds-ok-pool-1-iscsi:1 (Started v03-b) * Reload vds-ok-pool-1-iscsi:2 (Started v03-a) * Restart stonith-v03-b (Started v03-a) * Restart stonith-v03-a (Started v03-b) * Migrate license.anbriz.vds-ok.com-vm (Started v03-b -> v03-a) * Migrate terminal0.anbriz.vds-ok.com-vm (Started v03-a -> v03-b) * Start vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (v03-a) Executing cluster transition: * Resource action: vds-ok-pool-0-iscsi:1 reload on mgmt01 * Resource action: vds-ok-pool-0-iscsi:1 monitor=30000 on mgmt01 * Resource action: vds-ok-pool-0-iscsi:0 reload on v03-b * Resource action: vds-ok-pool-0-iscsi:0 monitor=30000 on v03-b * Resource action: vds-ok-pool-0-iscsi:2 reload on v03-a * Resource action: vds-ok-pool-0-iscsi:2 monitor=30000 on v03-a * Resource action: vds-ok-pool-1-iscsi:1 reload on mgmt01 * Resource action: vds-ok-pool-1-iscsi:1 monitor=30000 on mgmt01 * Resource action: vds-ok-pool-1-iscsi:0 reload on v03-b * Resource action: vds-ok-pool-1-iscsi:0 monitor=30000 on v03-b * Resource action: vds-ok-pool-1-iscsi:2 reload on v03-a * Resource action: vds-ok-pool-1-iscsi:2 monitor=30000 on v03-a * Resource action: stonith-v03-b stop on v03-a * Resource action: stonith-v03-b start on v03-a * Resource action: stonith-v03-b monitor=60000 on v03-a * Resource action: stonith-v03-a stop on v03-b * Resource action: stonith-v03-a start on v03-b * Resource action: stonith-v03-a monitor=60000 on v03-b * Resource action: license.anbriz.vds-ok.com-vm migrate_to on v03-b - * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a * Pseudo action: load_stopped_mgmt01 * Resource action: license.anbriz.vds-ok.com-vm migrate_from on v03-a * Resource action: license.anbriz.vds-ok.com-vm stop on v03-b + * Pseudo action: load_stopped_v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_from on v03-b * Resource action: terminal0.anbriz.vds-ok.com-vm stop on v03-a - * Pseudo action: load_stopped_v03-b * Pseudo action: load_stopped_v03-a * Pseudo action: all_stopped * Pseudo action: license.anbriz.vds-ok.com-vm_start_0 * Pseudo action: terminal0.anbriz.vds-ok.com-vm_start_0 * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm start on v03-a * Resource action: license.anbriz.vds-ok.com-vm monitor=10000 on v03-a * Resource action: terminal0.anbriz.vds-ok.com-vm monitor=10000 on v03-b * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-a Revised cluster status: Online: [ mgmt01 v03-a v03-b ] stonith-v02-a (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v02-b (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v02-c (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v02-d (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-mgmt01 (stonith:fence_xvm): Started v03-b stonith-mgmt02 (stonith:meatware): Started mgmt01 stonith-v03-c (stonith:fence_ipmilan): (target-role:Stopped) Stopped stonith-v03-a (stonith:fence_ipmilan): Started v03-b stonith-v03-b (stonith:fence_ipmilan): Started v03-a stonith-v03-d (stonith:fence_ipmilan): (target-role:Stopped) Stopped Clone Set: cl-clvmd [clvmd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-dlm [dlm] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-iscsid [iscsid] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirtd [libvirtd] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-multipathd [multipathd] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-node-params [node-params] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan1-if [vlan1-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan101-if [vlan101-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan102-if [vlan102-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan103-if [vlan103-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan104-if [vlan104-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan3-if [vlan3-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan4-if [vlan4-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan5-if [vlan5-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan900-if [vlan900-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vlan909-if [vlan909-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-libvirt-images-fs [libvirt-images-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-install-fs [libvirt-install-fs] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] Started: [ mgmt01 v03-a v03-b ] Clone Set: cl-libvirt-images-pool [libvirt-images-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped Clone Set: cl-vlan200-if [vlan200-if] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped gotin-bbb-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped maxb-c55-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped metae.ru-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b Clone Set: cl-mcast-test-net [mcast-test-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a Clone Set: cl-libvirt-qpid [libvirt-qpid] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ] gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): (target-role:Stopped) Stopped terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b Clone Set: cl-mcast-gleb-net [mcast-gleb-net] Started: [ v03-a v03-b ] Stopped: [ mgmt01 ]