diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 99e2055883..5db90aa16c 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -1,299 +1,302 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef PE_INTERNAL__H # define PE_INTERNAL__H # include # include # define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args) # define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "", fmt, ##args) # define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "", fmt, ##args) # define pe_err(fmt...) { was_processing_error = TRUE; crm_config_error = TRUE; crm_err(fmt); } # define pe_warn(fmt...) { was_processing_warning = TRUE; crm_config_warning = TRUE; crm_warn(fmt); } # define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); } # define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); } # define pe_set_action_bit(action, bit) action->flags = crm_set_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit) # define pe_clear_action_bit(action, bit) action->flags = crm_clear_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit) typedef struct notify_data_s { GHashTable *keys; const char *action; action_t *pre; action_t *post; action_t *pre_done; action_t *post_done; GListPtr active; /* notify_entry_t* */ GListPtr inactive; /* notify_entry_t* */ GListPtr start; /* notify_entry_t* */ GListPtr stop; /* notify_entry_t* */ GListPtr demote; /* notify_entry_t* */ GListPtr promote; /* notify_entry_t* */ GListPtr master; /* notify_entry_t* */ GListPtr slave; /* notify_entry_t* */ GHashTable *allowed_nodes; } notify_data_t; bool pe_can_fence(pe_working_set_t *data_set, node_t *node); int merge_weights(int w1, int w2); void add_hash_param(GHashTable * hash, const char *name, const char *value); void append_hashtable(gpointer key, gpointer value, gpointer user_data); char *native_parameter(resource_t * rsc, node_t * node, gboolean create, const char *name, pe_working_set_t * data_set); node_t *native_location(resource_t * rsc, GListPtr * list, gboolean current); void pe_metadata(void); void verify_pe_options(GHashTable * options); void common_update_score(resource_t * rsc, const char *id, int score); void native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set); node_t *rsc_known_on(resource_t * rsc, GListPtr * list); gboolean native_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean group_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean clone_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean master_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean container_unpack(resource_t * rsc, pe_working_set_t * data_set); resource_t *native_find_rsc(resource_t * rsc, const char *id, node_t * node, int flags); gboolean native_active(resource_t * rsc, gboolean all); gboolean group_active(resource_t * rsc, gboolean all); gboolean clone_active(resource_t * rsc, gboolean all); gboolean master_active(resource_t * rsc, gboolean all); gboolean container_active(resource_t * rsc, gboolean all); void native_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void group_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void clone_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void master_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void container_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void native_free(resource_t * rsc); void group_free(resource_t * rsc); void clone_free(resource_t * rsc); void master_free(resource_t * rsc); void container_free(resource_t * rsc); enum rsc_role_e native_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e group_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e clone_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e master_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e container_resource_state(const resource_t * rsc, gboolean current); gboolean common_unpack(xmlNode * xml_obj, resource_t ** rsc, resource_t * parent, pe_working_set_t * data_set); void common_free(resource_t * rsc); extern pe_working_set_t *pe_dataset; extern node_t *node_copy(const node_t *this_node); extern time_t get_effective_time(pe_working_set_t * data_set); /* Failure handling utilities (from failcounts.c) */ extern int get_failcount(node_t * node, resource_t * rsc, time_t *last_failure, pe_working_set_t * data_set); extern int get_failcount_full(node_t * node, resource_t * rsc, time_t *last_failure, bool effective, xmlNode * xml_op, pe_working_set_t * data_set); extern int get_failcount_all(node_t * node, resource_t * rsc, time_t *last_failure, pe_working_set_t * data_set); /* Binary like operators for lists of nodes */ extern void node_list_exclude(GHashTable * list, GListPtr list2, gboolean merge_scores); extern GListPtr node_list_dup(GListPtr list, gboolean reset, gboolean filter); extern GListPtr node_list_from_hash(GHashTable * hash, gboolean reset, gboolean filter); extern GHashTable *node_hash_from_list(GListPtr list); static inline gpointer pe_hash_table_lookup(GHashTable * hash, gconstpointer key) { if (hash) { return g_hash_table_lookup(hash, key); } return NULL; } extern action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set); extern gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order); GHashTable *node_hash_dup(GHashTable * hash); extern GListPtr node_list_and(GListPtr list1, GListPtr list2, gboolean filter); extern GListPtr node_list_xor(GListPtr list1, GListPtr list2, gboolean filter); extern GListPtr node_list_minus(GListPtr list1, GListPtr list2, gboolean filter); extern void pe_free_shallow(GListPtr alist); extern void pe_free_shallow_adv(GListPtr alist, gboolean with_data); /* Printing functions for debug */ extern void print_node(const char *pre_text, node_t * node, gboolean details); extern void print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details); extern void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes); extern void dump_node_capacity(int level, const char *comment, node_t * node); extern void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node); # define dump_node_scores(level, rsc, text, nodes) do { \ dump_node_scores_worker(level, __FILE__, __FUNCTION__, __LINE__, rsc, text, nodes); \ } while(0) /* Sorting functions */ extern gint sort_rsc_priority(gconstpointer a, gconstpointer b); extern gint sort_rsc_index(gconstpointer a, gconstpointer b); extern xmlNode *find_rsc_op_entry(resource_t * rsc, const char *key); extern action_t *custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean foo, pe_working_set_t * data_set); # define delete_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_DELETE, 0) # define delete_action(rsc, node, optional) custom_action( \ rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \ optional, TRUE, data_set); # define stopped_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_STOPPED, 0) # define stopped_action(rsc, node, optional) custom_action( \ rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \ optional, TRUE, data_set); # define stop_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_STOP, 0) # define stop_action(rsc, node, optional) custom_action( \ rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \ optional, TRUE, data_set); # define reload_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_RELOAD, 0) # define start_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_START, 0) # define start_action(rsc, node, optional) custom_action( \ rsc, start_key(rsc), CRMD_ACTION_START, node, \ optional, TRUE, data_set) # define started_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_STARTED, 0) # define started_action(rsc, node, optional) custom_action( \ rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \ optional, TRUE, data_set) # define promote_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_PROMOTE, 0) # define promote_action(rsc, node, optional) custom_action( \ rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \ optional, TRUE, data_set) # define promoted_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_PROMOTED, 0) # define promoted_action(rsc, node, optional) custom_action( \ rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \ optional, TRUE, data_set) # define demote_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_DEMOTE, 0) # define demote_action(rsc, node, optional) custom_action( \ rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \ optional, TRUE, data_set) # define demoted_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_DEMOTED, 0) # define demoted_action(rsc, node, optional) custom_action( \ rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \ optional, TRUE, data_set) extern action_t *find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node); extern enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic); extern GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node); extern GListPtr find_actions_exact(GListPtr input, const char *key, node_t * on_node); extern GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node); extern void pe_free_action(action_t * action); extern void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); extern gboolean get_target_role(resource_t * rsc, enum rsc_role_e *role); extern resource_t *find_clone_instance(resource_t * rsc, const char *sub_id, pe_working_set_t * data_set); extern void destroy_ticket(gpointer data); extern ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set); char *clone_strip(const char *last_rsc_id); char *clone_zero(const char *last_rsc_id); int get_target_rc(xmlNode * xml_op); gint sort_node_uname(gconstpointer a, gconstpointer b); bool is_set_recursive(resource_t * rsc, long long flag, bool any); enum rsc_digest_cmp_val { /*! Digests are the same */ RSC_DIGEST_MATCH = 0, /*! Params that require a restart changed */ RSC_DIGEST_RESTART, /*! Some parameter changed. */ RSC_DIGEST_ALL, /*! rsc op didn't have a digest associated with it, so * it is unknown if parameters changed or not. */ RSC_DIGEST_UNKNOWN, }; typedef struct op_digest_cache_s { enum rsc_digest_cmp_val rc; xmlNode *params_all; xmlNode *params_secure; xmlNode *params_restart; char *digest_all_calc; char *digest_secure_calc; char *digest_restart_calc; } op_digest_cache_t; op_digest_cache_t *rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set); action_t *pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set); void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set); -void pe_action_required_worker(pe_action_t *action, const char *reason, const char *function, long line); -#define pe_action_required(action, reason) pe_action_required_worker(action, reason, __FUNCTION__, __LINE__) +void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite); +void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite); + +#define pe_action_required(action, reason, text) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, text, pe_action_optional, FALSE) +#define pe_action_implies(action, reason, flag) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, NULL, flag, FALSE) void set_bit_recursive(resource_t * rsc, unsigned long long flag); void clear_bit_recursive(resource_t * rsc, unsigned long long flag); gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); void print_rscs_brief(GListPtr rsc_list, const char * pre_text, long options, void * print_data, gboolean print_all); void pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason); node_t *pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set); bool remote_id_conflict(const char *remote_name, pe_working_set_t *data); void common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data); resource_t *find_container_child(const char *stem, resource_t * rsc, node_t *node); bool fix_remote_addr(resource_t * rsc); #endif diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index a53be6ad14..bc343232d2 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2198 +1,2261 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include pe_working_set_t *pe_dataset = NULL; extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled); /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return TRUE if node can be fenced, FALSE otherwise * * \note This function should only be called for cluster nodes and baremetal * remote nodes; guest nodes are fenced by stopping their container * resource, so fence execution requirements do not apply to them. */ bool pe_can_fence(pe_working_set_t * data_set, node_t *node) { if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) { return FALSE; /* Turned off */ } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) { return FALSE; /* No devices */ } else if (is_set(data_set->flags, pe_flag_have_quorum)) { return TRUE; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return TRUE; } else if(node == NULL) { return FALSE; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return TRUE; } crm_trace("Cannot fence %s", node->details->uname); return FALSE; } node_t * node_copy(const node_t *this_node) { node_t *new_node = NULL; CRM_CHECK(this_node != NULL, return NULL); new_node = calloc(1, sizeof(node_t)); CRM_ASSERT(new_node != NULL); crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = merge_weights(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { node_t *new_node = node_copy(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } GHashTable * node_hash_from_list(GListPtr list) { GListPtr gIter = list; GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *n = node_copy(node); g_hash_table_insert(result, (gpointer) n->details->id, n); } return result; } GListPtr node_list_dup(GListPtr list1, gboolean reset, gboolean filter) { GListPtr result = NULL; GListPtr gIter = list1; for (; gIter != NULL; gIter = gIter->next) { node_t *new_node = NULL; node_t *this_node = (node_t *) gIter->data; if (filter && this_node->weight < 0) { continue; } new_node = node_copy(this_node); if (reset) { new_node->weight = 0; } if (new_node != NULL) { result = g_list_prepend(result, new_node); } } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { const node_t *node_a = a; const node_t *node_b = b; return strcmp(node_a->details->uname, node_b->details->uname); } void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes) { GHashTable *hash = nodes; GHashTableIter iter; node_t *node = NULL; if (rsc) { hash = rsc->allowed_nodes; } if (rsc && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't show the allocation scores for orphans */ return; } if (level == 0) { char score[128]; int len = sizeof(score); /* For now we want this in sorted order to keep the regression tests happy */ GListPtr gIter = NULL; GListPtr list = g_hash_table_get_values(hash); list = g_list_sort(list, sort_node_uname); gIter = list; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } else if (hash) { char score[128]; int len = sizeof(score); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { do_crm_log_alias(LOG_TRACE, file, function, line, "%s: %s allocation score on %s: %s", comment, rsc->id, node->details->uname, score); } else { do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment, node->details->uname, score); } } } if (rsc && rsc->children) { GListPtr gIter = NULL; gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; dump_node_scores_worker(level, file, function, line, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; int len = 0; char *new_text = NULL; len = strlen(*dump_text) + strlen(" ") + strlen(key) + strlen("=") + strlen(value) + 1; new_text = calloc(1, len); sprintf(new_text, "%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, node_t * node) { int len = 0; char *dump_text = NULL; len = strlen(comment) + strlen(": ") + strlen(node->details->uname) + strlen(" capacity:") + 1; dump_text = calloc(1, len); sprintf(dump_text, "%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node) { int len = 0; char *dump_text = NULL; len = strlen(comment) + strlen(": ") + strlen(rsc->id) + strlen(" utilization on ") + strlen(node->details->uname) + strlen(":") + 1; dump_text = calloc(1, len); sprintf(dump_text, "%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } action_t * custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { if (g_list_length(possible_matches) > 1) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found existing action (%d) %s for %s on %s", action->id, task, rsc ? rsc->id : "", on_node ? on_node->details->uname : ""); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating%s action %d: %s for %s on %s %d", optional ? "" : " mandatory", data_set->action_id, key, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", optional); } action = calloc(1, sizeof(action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; CRM_ASSERT(task != NULL); action->task = strdup(task); if (on_node) { action->node = node_copy(on_node); } action->uuid = strdup(key); pe_set_action_bit(action, pe_action_runnable); if (optional) { pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); } else { pe_clear_action_bit(action, pe_action_optional); pe_rsc_trace(rsc, "Unset optional on %s", action->uuid); } /* Implied by calloc()... action->actions_before = NULL; action->actions_after = NULL; action->pseudo = FALSE; action->dumped = FALSE; action->processed = FALSE; action->seen_count = 0; */ action->extra = crm_str_table_new(); action->meta = crm_str_table_new(); action->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); action->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } if (save_action) { pe_rsc_trace(rsc, "Action %d created", action->id); } } if (optional == FALSE) { pe_rsc_trace(rsc, "Unset optional on %s", action->uuid); pe_clear_action_bit(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { pe_set_action_bit(action, pe_action_have_node_attrs); unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS, action->node->details->attrs, action->extra, NULL, FALSE, data_set->now); } if (is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid); - pe_clear_action_bit(action, pe_action_runnable); + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "node availability", pe_action_runnable, TRUE); } else if (is_not_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL) == NULL) { crm_debug("Action %s (unmanaged)", action->uuid); pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); /* action->runnable = FALSE; */ } else if (action->node->details->online == FALSE && (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)", action->uuid, action->node->details->uname); if (is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable"); } } else if (action->node->details->pending) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid); + free(action->reason); action->reason = NULL; pe_set_action_bit(action, pe_action_runnable); #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_stop) { - pe_clear_action_bit(action, pe_action_runnable); + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { - pe_clear_action_bit(action, pe_action_runnable); + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)", action->node->details->uname, action->uuid); } } else { pe_rsc_trace(rsc, "Action %s is runnable", action->uuid); + free(action->reason); action->reason = NULL; pe_set_action_bit(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: set_bit(rsc->flags, pe_rsc_stopping); break; case start_rsc: clear_bit(rsc->flags, pe_rsc_starting); if (is_set(action->flags, pe_action_runnable)) { set_bit(rsc->flags, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static const char * unpack_operation_on_fail(action_t * action) { const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id); return NULL; } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval = NULL; const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = __xml_first_child(action->rsc->ops_xml); operation && !value; operation = __xml_next_element(operation)) { if (!crm_str_eq((const char *)operation->name, "op", TRUE)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) { continue; } else if (crm_get_interval(interval) <= 0) { continue; } value = on_fail; } } return value; } static xmlNode * find_min_interval_mon(resource_t * rsc, gboolean include_disabled) { int number = 0; int min_interval = -1; const char *name = NULL; const char *value = NULL; const char *interval = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (safe_str_neq(name, RSC_STATUS)) { continue; } number = crm_get_interval(interval); if (number < 0) { continue; } if (min_interval < 0 || number < min_interval) { min_interval = number; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } static int unpack_interval_origin(const char *value, GHashTable *meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now) { int start_delay = 0; if (interval > 0 && value) { crm_time_t *origin = crm_time_new(value); if (origin && now) { crm_time_t *delay = NULL; int rc = crm_time_compare(origin, now); long long delay_s = 0; int interval_s = (interval / 1000); crm_trace("Origin: %s, interval: %d", value, interval_s); /* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */ while(rc > 0) { crm_time_add_seconds(origin, -interval_s); rc = crm_time_compare(origin, now); } /* Now find the first "multiple" that occurs after 'now' */ while (rc < 0) { crm_time_add_seconds(origin, interval_s); rc = crm_time_compare(origin, now); } delay = crm_time_calculate_duration(origin, now); crm_time_log(LOG_TRACE, "origin", origin, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "now", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration); delay_s = crm_time_get_seconds(delay); CRM_CHECK(delay_s >= 0, delay_s = 0); start_delay = delay_s * 1000; if (xml_obj) { crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj)); } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } crm_time_free(origin); crm_time_free(delay); } else if (!origin && xml_obj) { crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s", ID(xml_obj), value); } } return start_delay; } static int unpack_timeout(const char *value, action_t *action, xmlNode *xml_obj, unsigned long long interval, GHashTable *config_hash) { int timeout = 0; if (value == NULL && xml_obj == NULL && action && safe_str_eq(action->task, RSC_STATUS) && interval == 0) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); pe_rsc_trace(action->rsc, "\t%s uses the timeout value '%s' from the minimum interval monitor", action->uuid, value); } } if (value == NULL && config_hash) { value = pe_pref(config_hash, "default-action-timeout"); } timeout = crm_get_msec(value); if (timeout < 0) { timeout = 0; } return timeout; } static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = __xml_first_child(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) { for (attr = __xml_first_child(attrs); attr != NULL; attr = __xml_next_element(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) { int start_delay = unpack_interval_origin(value, NULL, xml_obj, interval, now); crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) { int timeout = unpack_timeout(value, NULL, NULL, 0, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout); } } } } void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set) { unsigned long long interval = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; const char *field = NULL; CRM_CHECK(action->rsc != NULL, return); unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); if (xml_obj) { xmlAttrPtr xIter = NULL; for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, action->meta, NULL, FALSE, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, action->versioned_parameters, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->versioned_meta, data_set->now); g_hash_table_remove(action->meta, "id"); field = XML_LRM_ATTR_INTERVAL; value = g_hash_table_lookup(action->meta, field); if (value != NULL) { interval = crm_get_interval(value); if (interval > 0) { value_ms = crm_itoa(interval); g_hash_table_replace(action->meta, strdup(field), value_ms); } else { g_hash_table_remove(action->meta, field); } } /* @COMPAT data sets < 1.1.10 ("requires" on start action not resource) */ value = g_hash_table_lookup(action->meta, "requires"); if (safe_str_neq(action->task, RSC_START) && safe_str_neq(action->task, RSC_PROMOTE)) { action->needs = rsc_req_nothing; value = "nothing (not start/promote)"; } else if (safe_str_eq(value, "nothing")) { action->needs = rsc_req_nothing; } else if (safe_str_eq(value, "quorum")) { action->needs = rsc_req_quorum; } else if (safe_str_eq(value, "unfencing")) { action->needs = rsc_req_stonith; set_bit(action->rsc->flags, pe_rsc_needs_unfencing); if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_notice("%s requires unfencing but fencing is disabled", action->rsc->id); } } else if (is_set(data_set->flags, pe_flag_stonith_enabled) && safe_str_eq(value, "fencing")) { action->needs = rsc_req_stonith; if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_notice("%s requires fencing but fencing is disabled", action->rsc->id); } /* @COMPAT end compatibility code */ } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing (resource)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum (resource)"; } else { action->needs = rsc_req_nothing; value = "nothing (resource)"; } pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (safe_str_eq(value, "block")) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); } else if (safe_str_eq(value, "fence")) { action->on_fail = action_fail_fence; value = "node fencing"; if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense"); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (safe_str_eq(value, "standby")) { action->on_fail = action_fail_standby; value = "node standby"; } else if (safe_str_eq(value, "ignore") || safe_str_eq(value, "nothing")) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (safe_str_eq(value, "migrate")) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (safe_str_eq(value, "stop")) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (safe_str_eq(value, "restart")) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (safe_str_eq(value, "restart-container")) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* for baremetal remote nodes, ensure that any failure that results in * dropping an active connection to a remote node results in fencing of * the remote node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) && (is_rsc_baremetal_remote_node(action->rsc, data_set) && !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && interval == 0) && (safe_str_neq(action->task, CRMD_ACTION_START)))) { if (!is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged baremetal remote node (enforcing default)"; } else { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence baremetal remote node (default)"; } else { value = "recover baremetal remote node connection (default)"; } if (action->rsc->remote_reconnect_interval) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task, role2text(action->fail_role)); field = XML_OP_ATTR_START_DELAY; value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); unpack_interval_origin(value, action->meta, xml_obj, interval, data_set->now); } field = XML_ATTR_TIMEOUT; value = g_hash_table_lookup(action->meta, field); timeout = unpack_timeout(value, action, xml_obj, interval, data_set->config_hash); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout)); unpack_versioned_meta(action->versioned_meta, xml_obj, interval, data_set->now); } static xmlNode * find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled) { unsigned long long number = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } number = crm_get_interval(interval); match_key = generate_op_key(rsc->id, name, number); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = generate_op_key(rsc->clone_name, name, number); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = generate_op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = generate_op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; print_resource(LOG_DEBUG_4, "\t\t", rsc, FALSE); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details) { long options = pe_print_log | pe_print_pending; if (rsc == NULL) { do_crm_log(log_level - 1, "%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } if (details) { options |= pe_print_details; } rsc->fns->print(rsc, pre_text, options, &log_level); } void pe_free_action(action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } if (action->versioned_parameters) { free_xml(action->versioned_parameters); } if (action->versioned_meta) { free_xml(action->versioned_meta); } free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); if (value == NULL) { /* skip */ } else if (safe_str_eq(value, "0")) { /* skip */ } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; break; default: break; } } return task; } action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (uuid != NULL && safe_str_neq(uuid, action->uuid)) { continue; } else if (task != NULL && safe_str_neq(task, action->task)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(key, action->uuid)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = node_copy(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GListPtr find_actions_exact(GListPtr input, const char *key, node_t * on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("Matching %s against %s", key, action->uuid); if (safe_str_neq(key, action->uuid)) { crm_trace("Key mismatch: %s vs. %s", key, action->uuid); continue; } else if (on_node == NULL || action->node == NULL) { crm_trace("on_node=%p, action->node=%p", on_node, action->node); continue; } else if (safe_str_eq(on_node->details->id, action->node->details->id)) { result = g_list_prepend(result, action); } crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id); } return result; } static void resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag) { node_t *match = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = merge_weights(match->weight, score); } void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node_iter = (node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value_const(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value_const(xml_b, XML_ATTR_ID); if (safe_str_eq(a_xml_id, b_xml_id)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unliklely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_const_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_const_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ int last_a = -1; int last_b = -1; crm_element_value_const_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_const_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %d vs %d", last_a, last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; int dummy = -1; const char *a_magic = crm_element_value_const(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value_const(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic a"); } if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (ie. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (value == NULL || safe_str_eq("started", value) || safe_str_eq("default", value)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (uber_parent(rsc)->variant == pe_master) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { crm_config_err("%s is not part of a master/slave resource, a %s of '%s' makes no sense", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *after = (action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = NULL; /* order |= pe_order_implies_then; */ /* order ^= pe_order_implies_then; */ wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_pseudo); set_bit(op->flags, pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { ticket_t *ticket = NULL; if (ticket_id == NULL || strlen(ticket_id) == 0) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = crm_str_table_new(); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } static void filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) { int len = 0; char *name = NULL; char *match = NULL; if (param_set == NULL) { return; } if (param_set && param_string) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; name = NULL; len = strlen(prop_name) + 3; name = malloc(len); if(name) { sprintf(name, " %s ", prop_name); name[len - 1] = 0; match = strstr(param_string, name); } if (need_present && match == NULL) { crm_trace("%s not found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } else if (need_present == FALSE && match) { crm_trace("%s found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } free(name); } } } bool fix_remote_addr(resource_t * rsc) { const char *name; const char *value; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; const char *value_list[] = { "remote", "ocf", "pacemaker" }; if(rsc == NULL) { return FALSE; } name = "addr"; value = g_hash_table_lookup(rsc->parameters, name); if (safe_str_eq(value, "#uname") == FALSE) { return FALSE; } for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) { name = attr_list[lpc]; value = crm_element_value(rsc->xml, attr_list[lpc]); if (safe_str_eq(value, value_list[lpc]) == FALSE) { return FALSE; } } return TRUE; } static void append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) { GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { crm_xml_add(params, key, value); } g_hash_table_destroy(hash); } static op_digest_cache_t * rsc_action_digest(resource_t * rsc, const char *task, const char *key, node_t * node, xmlNode * xml_op, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; data = g_hash_table_lookup(node->details->digest_cache, key); if (data == NULL) { GHashTable *local_rsc_params = crm_str_table_new(); action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); const char *op_version; const char *ra_version = NULL; const char *restart_list = NULL; const char *secure_list = " passwd password "; data = calloc(1, sizeof(op_digest_cache_t)); CRM_ASSERT(data != NULL); get_rsc_attributes(local_rsc_params, rsc, node, data_set); pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); if (fix_remote_addr(rsc)) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside crm_xml_add(data->params_all, "addr", node->details->uname); crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); g_hash_table_foreach(action->extra, hash2field, data->params_all); g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); g_hash_table_foreach(action->meta, hash2metafield, data->params_all); if(xml_op) { secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); } else { op_version = CRM_FEATURE_SET; } append_versioned_params(local_versioned_params, ra_version, data->params_all); append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); append_versioned_params(action->versioned_parameters, ra_version, data->params_all); filter_action_parameters(data->params_all, op_version); g_hash_table_destroy(local_rsc_params); pe_free_action(action); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); if (is_set(data_set->flags, pe_flag_sanitized)) { data->params_secure = copy_xml(data->params_all); if(secure_list) { filter_parameters(data->params_secure, secure_list, FALSE); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } - if(crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { + if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { data->params_restart = copy_xml(data->params_all); if (restart_list) { filter_parameters(data->params_restart, restart_list, TRUE); } data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); } g_hash_table_insert(node->details->digest_cache, strdup(key), data); } return data; } op_digest_cache_t * rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; char *key = NULL; int interval = 0; const char *interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_all; const char *digest_restart; CRM_ASSERT(node != NULL); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); interval = crm_parse_int(interval_s, "0"); key = generate_op_key(rsc->id, task, interval); data = rsc_action_digest(rsc, task, key, node, xml_op, data_set); data->rc = RSC_DIGEST_MATCH; if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { data->rc = RSC_DIGEST_RESTART; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { data->rc = RSC_DIGEST_ALL; } free(key); return data; } #define STONITH_DIGEST_TASK "stonith-on" static op_digest_cache_t * fencing_action_digest_cmp(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0); op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, data_set); const char *digest_all = g_hash_table_lookup(node->details->attrs, "digests-all"); const char *digest_secure = g_hash_table_lookup(node->details->attrs, "digests-secure"); /* No 'reloads' for fencing device changes * * We use the resource id + agent + digest so that we can detect * changes to the agent and/or the parameters used */ char *search_all = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); char *search_secure = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); data->rc = RSC_DIGEST_ALL; if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strstr(digest_all, search_all)) { data->rc = RSC_DIGEST_MATCH; } else if(digest_secure && data->digest_secure_calc) { if(strstr(digest_secure, search_secure)) { fprintf(stdout, "Only 'private' parameters to %s for unfencing %s changed\n", rsc->id, node->details->uname); data->rc = RSC_DIGEST_MATCH; } } if (data->rc == RSC_DIGEST_ALL && is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) { fprintf(stdout, "Parameters to %s for unfencing %s changed, try '%s:%s:%s'\n", rsc->id, node->details->uname, rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); } free(key); free(search_all); free(search_secure); return data; } const char *rsc_printable_id(resource_t *rsc) { if (is_not_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void clear_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; clear_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; clear_bit_recursive(child_rsc, flag); } } void set_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; set_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_bit_recursive(child_rsc, flag); } } static GListPtr find_unfencing_devices(GListPtr candidates, GListPtr matches) { for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { resource_t *candidate = gIter->data; const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES); const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); if(candidate->children) { matches = find_unfencing_devices(candidate->children, matches); } else if (is_not_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) { matches = g_list_prepend(matches, candidate); } } return matches; } #define STONITH_DIGEST_TASK "stonith-on" action_t * pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set) { char *op_key = NULL; action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, op_key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if(is_remote_node(node) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the check_action_definition() based stuff works fine. * * Use "stonith-on" to avoid creating cache entries for * operations check_action_definition() would look for. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = malloc(max); char *digests_secure = malloc(max); GListPtr matches = find_unfencing_devices(data_set->resources, NULL); for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) { resource_t *match = gIter->data; op_digest_cache_t *data = fencing_action_digest_cmp(match, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (is_set(data_set->flags, pe_flag_sanitized)) { /* Extra detail for those running from the commandline */ fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_secure_calc); } add_hash_param(stonith_op->meta, strdup("digests-all"), digests_all); add_hash_param(stonith_op->meta, strdup("digests-secure"), digests_secure); } } else { free(op_key); } if(optional == FALSE && pe_can_fence(data_set, node)) { - pe_action_required(stonith_op, reason); + pe_action_required(stonith_op, NULL, reason); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set) { if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { action_t *unfence = pe_fence_op(node, "on", FALSE, reason, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (crm_str_eq(existing_ref, obj_ref, TRUE)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } -void pe_action_required_worker(pe_action_t *action, const char *reason, const char *function, long line) +void pe_action_set_flag_reason(const char *function, long line, + pe_action_t *action, pe_action_t *reason, const char *text, + enum pe_action_flags flags, bool overwrite) +{ + bool unset = FALSE; + bool update = FALSE; + const char *change = NULL; + + if(is_set(flags, pe_action_runnable)) { + unset = TRUE; + change = "unrunnable"; + } else if(is_set(flags, pe_action_optional)) { + unset = TRUE; + change = "required"; + } else if(is_set(flags, pe_action_failure_is_fatal)) { + change = "fatally failed"; + } else if(is_set(flags, pe_action_migrate_runnable)) { + unset = TRUE; + overwrite = TRUE; + change = "unrunnable"; + } else if(is_set(flags, pe_action_dangle)) { + change = "dangling"; + } else if(is_set(flags, pe_action_requires_any)) { + change = "required"; + } else { + crm_err("Unknown flag change to %s by %s: 0x%.16x", flags, action->uuid, reason->uuid); + } + + if(unset) { + if(is_set(action->flags, flags)) { + action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags); + update = TRUE; + } + + } else { + if(is_not_set(action->flags, flags)) { + action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags); + update = TRUE; + } + } + + if((change && update) || text) { + char *reason_text = NULL; + if(reason == NULL) { + pe_action_set_reason(action, text, overwrite); + + } else if(reason->rsc == NULL) { + reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:""); + } else { + reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA"); + } + + if(reason_text && action->rsc != reason->rsc) { + pe_action_set_reason(action, reason_text, overwrite); + } + free(reason_text); + } + } + +void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { - if(is_set(action->flags, pe_action_optional)) { - action->flags = crm_clear_bit(function, line, action->uuid, action->flags, pe_action_optional); - if(action->reason == NULL) { + if(action->reason == NULL || overwrite) { + free(action->reason); + if(reason) { action->reason = strdup(reason); + } else { + action->reason = NULL; } } } diff --git a/pengine/native.c b/pengine/native.c index 16c9d5aed9..63666fdfad 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,3325 +1,3330 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include /* #define DELETE_THEN_REFRESH 1 // The crmd will remove the resource from the CIB itself, making this redundant */ #define INFINITY_HACK (INFINITY * -100) #define VARIANT_NATIVE 1 #include gboolean update_action(action_t * then); void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set); gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); /* *INDENT-OFF* */ enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, }, }; /* *INDENT-ON* */ static action_t * get_first_named_action(resource_t * rsc, const char *action, gboolean only_valid, node_t * current); static gboolean native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; gboolean result = FALSE; process_utilization(rsc, &prefer, data_set); length = g_hash_table_size(rsc->allowed_nodes); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to ? TRUE : FALSE; } if(rsc->allowed_nodes) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, g_list_nth_data(rsc->running_on, 0)); } if (prefer) { node_t *best = g_list_nth_data(nodes, 0); chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen && chosen->weight >= 0 && chosen->weight >= best->weight /* Possible alternative: (chosen->weight >= INFINITY || best->weight < INFINITY) */ && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Using preferred node %s for %s instead of choosing from %d candidates", chosen->details->uname, rsc->id, length); } else if (chosen && chosen->weight < 0) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else if (chosen && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); } } if (chosen == NULL && rsc->allowed_nodes) { chosen = g_list_nth_data(nodes, 0); pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates", chosen ? chosen->details->uname : "", rsc->id, length); if (chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if (running && can_run_resources(running) == FALSE) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, running->details->uname); running = NULL; } for (lpc = 1; lpc < length && running; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if (tmp->weight == chosen->weight) { multiple++; if (tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if (multiple > 1) { int log_level = LOG_INFO; static char score[33]; score2char_stack(chosen->weight, score, sizeof(score)); if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } static int node_list_attr_score(GHashTable * list, const char *attr, const char *value) { GHashTableIter iter; node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { int weight = node->weight; if (can_run_resources(node) == FALSE) { weight = -INFINITY; } if (weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if (safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } } if (safe_str_neq(attr, "#" XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node ? best_node : "", best_score); } return best_score; } static void node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor, gboolean only_positive) { int score = 0; int new_score = 0; GHashTableIter iter; node_t *node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list1); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { CRM_LOG_ASSERT(node != NULL); if(node == NULL) { continue; }; score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); new_score = merge_weights(factor * score, node->weight); if (factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO - Decide if we want to filter only if weight == -INFINITY * */ crm_trace("%s: Filtering %d + %f*%d (factor * score)", node->details->uname, node->weight, factor, score); } else if (node->weight == INFINITY_HACK) { crm_trace("%s: Filtering %d + %f*%d (node < 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight > 0) { node->weight = INFINITY_HACK; crm_trace("%s: Filtering %d + %f*%d (score > 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight == 0) { crm_trace("%s: Filtering %d + %f*%d (score == 0)", node->details->uname, node->weight, factor, score); } else { crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score); node->weight = new_score; } } } GHashTable * node_hash_dup(GHashTable * hash) { /* Hack! */ GListPtr list = g_hash_table_get_values(hash); GHashTable *result = node_hash_from_list(list); g_list_free(list); return result; } GHashTable * native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } GHashTable * rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { GHashTable *work = NULL; int multiplier = 1; if (factor < 0) { multiplier = -1; } if (is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); if (is_set(flags, pe_weights_init)) { if (rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last); work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags); } else { work = node_hash_dup(rsc->allowed_nodes); } clear_bit(flags, pe_weights_init); } else if (rsc->variant == pe_group && rsc->children) { GListPtr iter = rsc->children; pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id); work = node_hash_dup(nodes); for(iter = rsc->children; iter->next != NULL; iter = iter->next) { work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags); } } else { pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id); work = node_hash_dup(nodes); node_hash_update(work, rsc->allowed_nodes, attr, factor, is_set(flags, pe_weights_positive)); } if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id); g_hash_table_destroy(work); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if (can_run_any(work)) { GListPtr gIter = NULL; if (is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; crm_trace("Checking %d additional colocation constraints", g_list_length(gIter)); } else if(rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } gIter = ((resource_t*)last->data)->rsc_cons_lhs; crm_trace("Checking %d additional optional group colocation constraints from %s", g_list_length(gIter), ((resource_t*)last->data)->id); } else { gIter = rsc->rsc_cons_lhs; crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id); } for (; gIter != NULL; gIter = gIter->next) { resource_t *other = NULL; rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else { other = constraint->rsc_lh; } pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id); work = rsc_merge_weights(other, rhs, work, constraint->node_attribute, multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback); dump_node_scores(LOG_TRACE, NULL, rhs, work); } } if (is_set(flags, pe_weights_positive)) { node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->weight == INFINITY_HACK) { node->weight = 1; } } } if (nodes) { g_hash_table_destroy(nodes); } clear_bit(rsc->flags, pe_rsc_merging); return work; } node_t * native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr gIter = NULL; int alloc_details = scores_log_level + 1; if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; GHashTable *archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if (constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_hash_dup(rsc->allowed_nodes); } rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); if (archive && can_run_any(rsc->allowed_nodes) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive) { g_hash_table_destroy(archive); } } dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_rollback); } print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id); /* make sure it doesn't come up again */ resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } else if(rsc->next_role > rsc->role && is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); rsc->next_role = rsc->role; } dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; rsc->next_role = rsc->role; if (rsc->running_on == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if (is_set(rsc->flags, pe_rsc_failed)) { assign_to = rsc->running_on->data; reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to ? assign_to->details->uname : "'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if (is_set(data_set->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if (is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set)) { pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if (rsc->allocated_to == NULL) { if (is_not_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); if (rsc->is_remote_node) { node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); CRM_ASSERT(remote_node != NULL); if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) { crm_trace("Setting remote node %s to ONLINE", remote_node->details->id); remote_node->details->online = TRUE; /* We shouldn't consider an unseen remote-node unclean if we are going * to try and connect to it. Otherwise we get an unnecessary fence */ if (remote_node->details->unseen == TRUE) { remote_node->details->unclean = FALSE; } } else { crm_trace("Setting remote node %s to SHUTDOWN. next role = %s, allocated=%s", remote_node->details->id, role2text(rsc->next_role), rsc->allocated_to ? "true" : "false"); remote_node->details->shutdown = TRUE; } } return rsc->allocated_to; } static gboolean is_op_dup(resource_t * rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; CRM_ASSERT(rsc); for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { value = crm_element_value(operation, "name"); if (safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (value == NULL) { value = "0"; } if (safe_str_neq(value, interval)) { continue; } if (id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err ("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } } } return dup; } void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; /* Only process for the operations without role="Stopped" */ value = crm_element_value(operation, "role"); if (value && text2role(value) == RSC_ROLE_STOPPED) { return; } CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node ? node->details->uname : "n/a"); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } if (start != NULL) { pe_rsc_trace(rsc, "Marking %s %s due to %s", key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory", start->uuid); is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional); } else { pe_rsc_trace(rsc, "Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches == NULL) { is_optional = FALSE; pe_rsc_trace(rsc, "Marking %s mandatory: not active", key); } else { GListPtr gIter = NULL; for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_reschedule)) { is_optional = FALSE; break; } } g_list_free(possible_matches); } if ((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if (is_optional) { char *local_key = strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* it's running : cancel it */ mon = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(mon->task); free(mon->cancel_task); mon->task = strdup(RSC_CANCEL); mon->cancel_task = strdup(name); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch (rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if (rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if (rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if (local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result, key, value ? value : role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); free(key); return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if (is_optional) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } else if (node == NULL || node->details->online == FALSE || node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } else if (is_set(mon->flags, pe_action_optional) == FALSE) { pe_rsc_info(rsc, " Start recurring %s (%llus) for %s on %s", mon->task, interval_ms / 1000, rsc->id, crm_str(node_uname)); } if (rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); free(running_master); } if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); if (rsc->next_role == RSC_ROLE_MASTER) { custom_action_order(rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } else if (rsc->role == RSC_ROLE_MASTER) { custom_action_order(rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } } } void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp(rsc, start, node, operation, data_set); } } } } void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* TODO: Support of non-unique clone */ if (is_set(rsc->flags, pe_rsc_unique) == FALSE) { return; } /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } pe_rsc_trace(rsc, "Creating recurring actions %s for %s in role %s on nodes where it'll not be running", ID(operation), rsc->id, role2text(rsc->next_role)); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } /* if the monitor exists on the node where the resource will be running, cancel it */ if (node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches) { action_t *cancel_op = NULL; char *local_key = strdup(key); g_list_free(possible_matches); cancel_op = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(cancel_op->task); free(cancel_op->cancel_task); cancel_op->task = strdup(RSC_CANCEL); cancel_op->cancel_task = strdup(name); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), crm_str(node_uname)); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *stop_node = (node_t *) gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; char *stop_op_key = NULL; if (node_uname && safe_str_eq(stop_node_uname, node_uname)) { continue; } pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if (possible_matches == NULL) { pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); free(rc_inactive); if (is_set(rsc->flags, pe_rsc_managed)) { char *probe_key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0); GListPtr probes = find_actions(rsc->actions, probe_key, stop_node); GListPtr pIter = NULL; for (pIter = probes; pIter != NULL; pIter = pIter->next) { action_t *probe = (action_t *) pIter->data; order_actions(probe, stopped_mon, pe_order_runnable_left); crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname); } g_list_free(probes); free(probe_key); } if (probe_complete_ops) { g_list_free(probe_complete_ops); } stop_op_key = stop_key(rsc); stop_ops = find_actions_exact(rsc->actions, stop_op_key, stop_node); for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *stop = (action_t *) local_gIter->data; if (is_set(stop->flags, pe_action_optional) == FALSE) { stop_is_optional = FALSE; } if (is_set(stop->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, strdup(stop_op_key), stop, NULL, strdup(key), stopped_mon, pe_order_implies_then | pe_order_runnable_left, data_set); } } if (stop_ops) { g_list_free(stop_ops); } free(stop_op_key); if (is_optional == FALSE && probe_is_optional && stop_is_optional && is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__); } if (is_set(stopped_mon->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if (stop_node->details->online == FALSE || stop_node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(stopped_mon->flags, pe_action_runnable) && is_set(stopped_mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", stopped_mon->task, interval_ms / 1000, rsc->id, crm_str(stop_node_uname)); } } free(key); } void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } static void handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set) { action_t *migrate_to = NULL; action_t *migrate_from = NULL; action_t *start = NULL; action_t *stop = NULL; gboolean partial = rsc->partial_migration_target ? TRUE : FALSE; pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s", rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE"); start = start_action(rsc, chosen, TRUE); stop = stop_action(rsc, current, TRUE); if (partial == FALSE) { migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set); } migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set); if ((migrate_to && migrate_from) || (migrate_from && partial)) { set_bit(start->flags, pe_action_migrate_runnable); set_bit(stop->flags, pe_action_migrate_runnable); update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */ /* order probes before migrations */ if (partial) { set_bit(migrate_from->flags, pe_action_migrate_runnable); migrate_from->needs = start->needs; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set); } else { set_bit(migrate_from->flags, pe_action_migrate_runnable); set_bit(migrate_to->flags, pe_action_migrate_runnable); migrate_to->needs = start->needs; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set); } custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional | pe_order_implies_first_migratable | pe_order_pseudo_left, data_set); } if (migrate_to) { add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); /* pcmk remote connections don't require pending to be recorded in cib. * We can optimize cib writes by only setting PENDING for non pcmk remote * connection resources */ if (rsc->is_remote_node == FALSE) { /* migrate_to takes place on the source node, but can * have an effect on the target node depending on how * the agent is written. Because of this, we have to maintain * a record that the migrate_to occurred incase the source node * loses membership while the migrate_to action is still in-flight. */ add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true"); } } if (migrate_from) { add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); } } void native_create_actions(resource_t * rsc, pe_working_set_t * data_set) { action_t *start = NULL; node_t *chosen = NULL; node_t *current = NULL; gboolean need_stop = FALSE; gboolean is_moving = FALSE; gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE; GListPtr gIter = NULL; int num_active_nodes = 0; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; CRM_ASSERT(rsc); chosen = rsc->allocated_to; if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } else if (rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc, role2text(rsc->role), role2text(rsc->next_role)); if (rsc->running_on) { current = rsc->running_on->data; } for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *n = (node_t *) gIter->data; if (rsc->partial_migration_source && (n->details == rsc->partial_migration_source->details)) { current = rsc->partial_migration_source; } num_active_nodes++; } for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop = stop_action(rsc, current, FALSE); set_bit(stop->flags, pe_action_dangle); pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s", rsc->id, current->details->uname); if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, FALSE, data_set); } } if (num_active_nodes > 1) { if (num_active_nodes == 2 && chosen && rsc->partial_migration_target && rsc->partial_migration_source && (current->details == rsc->partial_migration_source->details) && (chosen->details == rsc->partial_migration_target->details)) { /* Here the chosen node is still the migration target from a partial * migration. Attempt to continue the migration instead of recovering * by stopping the resource everywhere and starting it on a single node. */ pe_rsc_trace(rsc, "Will attempt to continue with a partial migration to target %s from %s", rsc->partial_migration_target->details->id, rsc->partial_migration_source->details->id); } else { const char *type = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->partial_migration_target && rsc->partial_migration_source) { crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too", rsc->id, rsc->partial_migration_target->details->uname, rsc->partial_migration_source->details->uname); } else { pe_proc_err("Resource %s (%s::%s) is active on %d nodes %s", rsc->id, class, type, num_active_nodes, recovery2text(rsc->recovery_type)); crm_warn("See %s for more information.", "http://clusterlabs.org/wiki/FAQ#Resource_is_Too_Active"); } if (rsc->recovery_type == recovery_stop_start) { need_stop = TRUE; } /* If by chance a partial migration is in process, * but the migration target is not chosen still, clear all * partial migration data. */ rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = FALSE; } } if (is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, chosen, TRUE); set_bit(start->flags, pe_action_print_always); } if (current && chosen && current->details != chosen->details) { pe_rsc_trace(rsc, "Moving %s", rsc->id); is_moving = TRUE; need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Recovering %s", rsc->id); need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Block %s", rsc->id); need_stop = TRUE; } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) { /* Recovery of a promoted resource */ start = start_action(rsc, chosen, TRUE); if (is_set(start->flags, pe_action_optional) == FALSE) { pe_rsc_trace(rsc, "Forced start %s", rsc->id); need_stop = TRUE; } } pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); /* Create any additional actions required when bringing resource down and * back up to same level. */ role = rsc->role; while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop ? " required" : ""); if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) { break; } role = next_role; } while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) { next_role = rsc_state_matrix[role][rsc->role]; pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop ? " required" : ""); if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { break; } role = next_role; } role = rsc->role; /* Required steps from this role to the next */ while (role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA"); if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } if(is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "No monitor additional ops for blocked resource"); } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Monitor ops for active resource"); start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { pe_rsc_trace(rsc, "Monitor ops for in-active resource"); Recurring_Stopped(rsc, NULL, NULL, data_set); } /* if we are stuck in a partial migration, where the target * of the partial migration no longer matches the chosen target. * A full stop/start is required */ if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) { pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id); allow_migrate = FALSE; } else if (is_moving == FALSE || is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || (current->details->unclean == TRUE) || rsc->next_role < RSC_ROLE_STARTED) { allow_migrate = FALSE; } if (allow_migrate) { handle_migration_actions(rsc, current, chosen, data_set); } } static void rsc_avoids_remote_nodes(resource_t *rsc) { GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->details->remote_rsc) { node->weight = -INFINITY; } } } void native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { /* This function is on the critical path and worth optimizing as much as possible */ resource_t *top = uber_parent(rsc); int type = pe_order_optional | pe_order_implies_then | pe_order_restart; gboolean is_stonith = is_set(rsc->flags, pe_rsc_fence_device); custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, type, data_set); if (top->variant == pe_master || rsc->role > RSC_ROLE_SLAVE) { custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_implies_first_master, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } if (is_stonith == FALSE && is_set(data_set->flags, pe_flag_enable_unfencing) && is_set(rsc->flags, pe_rsc_needs_unfencing)) { /* Check if the node needs to be unfenced first */ node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); crm_debug("Ordering any stops of %s before %s, and any starts after", rsc->id, unfence->uuid); /* * It would be more efficient to order clone resources once, * rather than order each instance, but ordering the instance * allows us to avoid unnecessary dependencies that might conflict * with user constraints. * * @TODO: This constraint can still produce a transition loop if the * resource has a stop scheduled on the node being unfenced, and * there is a user ordering constraint to start some other resource * (which will be ordered after the unfence) before stopping this * resource. An example is "start some slow-starting cloned service * before stopping an associated virtual IP that may be moving to * it": * stop this -> unfencing -> start that -> stop this */ custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(unfence->uuid), unfence, pe_order_optional|pe_order_same_node, data_set); custom_action_order(NULL, strdup(unfence->uuid), unfence, rsc, start_key(rsc), NULL, pe_order_implies_then_on_node|pe_order_same_node, data_set); } } if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(all_stopped->task), all_stopped, pe_order_implies_then | pe_order_runnable_left, data_set); } if (g_hash_table_size(rsc->utilization) > 0 && safe_str_neq(data_set->placement_strategy, "default")) { GHashTableIter iter; node_t *next = NULL; GListPtr gIter = NULL; pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(current); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } custom_action_order(rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_load, data_set); } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&next)) { char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(next); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, start_key(rsc), NULL, pe_order_load, data_set); custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_load, data_set); free(load_stopped_task); } } if (rsc->container) { resource_t *remote_rsc = NULL; /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else if (rsc->is_remote_node == FALSE) { remote_rsc = rsc_contains_remote_node(data_set, rsc->container); } if (remote_rsc) { /* The container represents a Pacemaker Remote node, so force the * resource on the Pacemaker Remote node instead of colocating the * resource with the container resource. */ GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); custom_action_order(rsc->container, generate_op_key(rsc->container->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL, pe_order_implies_first, data_set); if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } rsc_colocation_new("resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, data_set); } } if (rsc->is_remote_node || is_stonith) { /* don't allow remote nodes to run stonith devices * or remote connection resources.*/ rsc_avoids_remote_nodes(rsc); } /* If this is a guest node's implicit remote connection, do not allow the * guest resource to live on a Pacemaker Remote node, to avoid nesting * remotes. However, allow bundles to run on remote nodes. */ if (rsc->is_remote_node && rsc->container && is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) { rsc_avoids_remote_nodes(rsc->container); } } void native_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } enum filter_colocation_res filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint, gboolean preview) { if (constraint->score == 0) { return influence_nothing; } /* rh side must be allocated before we can process constraint */ if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) { return influence_nothing; } if ((constraint->role_lh >= RSC_ROLE_SLAVE) && rsc_lh->parent && rsc_lh->parent->variant == pe_master && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* LH and RH resources have already been allocated, place the correct * priority oh LH rsc for the given multistate resource role */ return influence_rsc_priority; } if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if ((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return influence_nothing; } details_rh = rsc_rh->allocated_to ? rsc_rh->allocated_to->details : NULL; details_lh = rsc_lh->allocated_to ? rsc_lh->allocated_to->details : NULL; if (constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh ? details_lh->uname : "n/a", details_rh ? details_rh->uname : "n/a"); } else if (constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh ? details_rh->uname : "n/a"); } return influence_nothing; } if (constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s", role2text(constraint->role_lh), role2text(rsc_lh->next_role)); return influence_nothing; } if (constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if (constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { crm_trace("LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_lh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { crm_trace("RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } return influence_rsc_location; } static void influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *rh_value = NULL; const char *lh_value = NULL; const char *attribute = "#id"; int score_multiplier = 1; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { return; } lh_value = g_hash_table_lookup(rsc_lh->allocated_to->details->attrs, attribute); rh_value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); if (!safe_str_eq(lh_value, rh_value)) { if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) { rsc_lh->priority = -INFINITY; } return; } if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) { return; } if (constraint->role_lh == RSC_ROLE_SLAVE) { score_multiplier = -1; } rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority); } static void colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = "#id"; GHashTable *work = NULL; gboolean do_check = FALSE; GHashTableIter iter; node_t *node = NULL; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (rsc_rh->allocated_to) { value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if (constraint->score < 0) { /* nothing to do: * anti-colocation with something that is not running */ return; } work = node_hash_dup(rsc_lh->allowed_nodes); g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { tmp = g_hash_table_lookup(node->details->attrs, attribute); if (do_check && safe_str_eq(tmp, value)) { if (constraint->score < INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights(constraint->score, node->weight); } } else if (do_check == FALSE || constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check ? "failed" : "unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } } if (can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { static char score[33]; score2char_stack(constraint->score, score, sizeof(score)); pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); } if (work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { enum filter_colocation_res filter_results; CRM_ASSERT(rsc_lh); CRM_ASSERT(rsc_rh); filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE); pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results); switch (filter_results) { case influence_rsc_priority: influence_priority(rsc_lh, rsc_rh, constraint); break; case influence_rsc_location: pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); colocation_match(rsc_lh, rsc_rh, constraint); break; case influence_nothing: default: return; } } static gboolean filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket) { if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) { pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter", role2text(rsc_ticket->role_lh)); return FALSE; } return TRUE; } void rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set) { if (rsc_ticket == NULL) { pe_err("rsc_ticket was NULL"); return; } if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", rsc_ticket->id); return; } if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) { return; } if (rsc_lh->children) { GListPtr gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); } return; } pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)", rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); if (rsc_ticket->ticket->granted == FALSE && g_list_length(rsc_lh->running_on) > 0) { GListPtr gIter = NULL; switch (rsc_ticket->loss_policy) { case loss_ticket_stop: resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); break; case loss_ticket_demote: /*Promotion score will be set to -INFINITY in master_promotion_order() */ if (rsc_ticket->role_lh != RSC_ROLE_MASTER) { resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); } break; case loss_ticket_fence: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pe_fence_node(data_set, node, "deadman ticket was lost"); } break; case loss_ticket_freeze: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } if (g_list_length(rsc_lh->running_on) > 0) { clear_bit(rsc_lh->flags, pe_rsc_managed); set_bit(rsc_lh->flags, pe_rsc_block); } break; } } else if (rsc_ticket->ticket->granted == FALSE) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); } } else if (rsc_ticket->ticket->standby) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set); } } } enum pe_action_flags native_action_flags(action_t * action, node_t * node) { return action->flags; } enum pe_graph_flags native_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { /* flags == get_action_flags(first, then_node) called from update_action() */ enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, then->uuid, then->flags); if (type & pe_order_asymmetrical) { resource_t *then_rsc = then->rsc; enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0; if (!then_rsc) { /* ignore */ } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) { /* ignore... if 'then' is supposed to be stopped after 'first', but * then is already stopped, there is nothing to be done when non-symmetrical. */ } else if ((then_rsc_role >= RSC_ROLE_STARTED) && safe_str_eq(then->task, RSC_START) && then->node && then_rsc->running_on && g_list_length(then_rsc->running_on) == 1 && then->node->details == ((node_t *) then_rsc->running_on->data)->details) { /* ignore... if 'then' is supposed to be started after 'first', but * then is already started, there is nothing to be done when non-symmetrical. */ } else if (!(first->flags & pe_action_runnable)) { /* prevent 'then' action from happening if 'first' is not runnable and * 'then' has not yet occurred. */ - pe_clear_action_bit(then, pe_action_runnable); - pe_clear_action_bit(then, pe_action_optional); + pe_action_implies(then, first, pe_action_optional); + pe_action_implies(then, first, pe_action_runnable); + pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid); } else { /* ignore... then is allowed to start/stop if it wants to. */ } } if (type & pe_order_implies_first) { - if ((filter & pe_action_optional) && (flags & pe_action_optional) == 0) { + if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) { + // Needs is_set(first_flags, pe_action_optional) too? pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); - - pe_clear_action_bit(first, pe_action_optional); + pe_action_implies(first, then, pe_action_optional); } if (is_set(flags, pe_action_migrate_runnable) && is_set(then->flags, pe_action_migrate_runnable) == FALSE && is_set(then->flags, pe_action_optional) == FALSE) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); - pe_clear_action_bit(first, pe_action_migrate_runnable); + pe_action_implies(first, then, pe_action_migrate_runnable); } } if (type & pe_order_implies_first_master) { if ((filter & pe_action_optional) && ((then->flags & pe_action_optional) == FALSE) && then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) { - pe_clear_action_bit(first, pe_action_optional); + pe_action_implies(first, then, pe_action_optional); if (is_set(first->flags, pe_action_migrate_runnable) && is_set(then->flags, pe_action_migrate_runnable) == FALSE) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); - pe_clear_action_bit(first, pe_action_migrate_runnable); + pe_action_implies(first, then, pe_action_migrate_runnable); } pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); } } if ((type & pe_order_implies_first_migratable) && is_set(filter, pe_action_optional)) { if (((then->flags & pe_action_migrate_runnable) == FALSE) || ((then->flags & pe_action_runnable) == FALSE)) { pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid); - pe_clear_action_bit(first, pe_action_runnable); + pe_action_implies(first, then, pe_action_runnable); } if ((then->flags & pe_action_optional) == 0) { - pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid); - pe_clear_action_bit(first, pe_action_optional); + pe_action_implies(first, then, pe_action_optional); } } if ((type & pe_order_pseudo_left) && is_set(filter, pe_action_optional)) { if ((first->flags & pe_action_runnable) == FALSE) { - pe_clear_action_bit(then, pe_action_migrate_runnable); + pe_action_implies(then, first, pe_action_migrate_runnable); pe_clear_action_bit(then, pe_action_pseudo); pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid); } } if (is_set(type, pe_order_runnable_left) && is_set(filter, pe_action_runnable) && is_set(then->flags, pe_action_runnable) && is_set(flags, pe_action_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid); - pe_clear_action_bit(then, pe_action_runnable); - pe_clear_action_bit(then, pe_action_migrate_runnable); + pe_action_implies(then, first, pe_action_runnable); + pe_action_implies(then, first, pe_action_migrate_runnable); } if (is_set(type, pe_order_implies_then) && is_set(filter, pe_action_optional) && is_set(then->flags, pe_action_optional) && is_set(flags, pe_action_optional) == FALSE) { /* in this case, treat migrate_runnable as if first is optional */ if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid); - pe_clear_action_bit(then, pe_action_optional); + pe_action_implies(then, first, pe_action_optional); } } if (is_set(type, pe_order_restart)) { const char *reason = NULL; CRM_ASSERT(first->rsc && first->rsc->variant == pe_native); CRM_ASSERT(then->rsc && then->rsc->variant == pe_native); if ((filter & pe_action_runnable) && (then->flags & pe_action_runnable) == 0 && (then->rsc->flags & pe_rsc_managed)) { reason = "shutdown"; } if ((filter & pe_action_optional) && (then->flags & pe_action_optional) == 0) { reason = "recover"; } if (reason && is_set(first->flags, pe_action_optional)) { if (is_set(first->flags, pe_action_runnable) || is_not_set(then->flags, pe_action_optional)) { pe_rsc_trace(first->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); - pe_clear_action_bit(first, pe_action_optional); + pe_action_implies(first, then, pe_action_optional); } } if (reason && is_not_set(first->flags, pe_action_optional) && is_not_set(first->flags, pe_action_runnable)) { pe_rsc_trace(then->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); - pe_clear_action_bit(then, pe_action_runnable); + pe_action_implies(then, first, pe_action_runnable); } if (reason && is_not_set(first->flags, pe_action_optional) && is_set(first->flags, pe_action_migrate_runnable) && is_not_set(then->flags, pe_action_migrate_runnable)) { - pe_clear_action_bit(first, pe_action_migrate_runnable); + pe_action_implies(first, then, pe_action_migrate_runnable); } } if (then_flags != then->flags) { changed |= pe_graph_updated_then; pe_rsc_trace(then->rsc, "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", then->uuid, then->node ? then->node->details->uname : "[none]", then->flags, then_flags, first->uuid, first->flags); if(then->rsc && then->rsc->parent) { /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */ update_action(then); } } if (first_flags != first->flags) { changed |= pe_graph_updated_first; pe_rsc_trace(first->rsc, "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, first_flags, then->uuid, then->flags); } return changed; } void native_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { GListPtr gIter = NULL; GHashTableIter iter; node_t *node = NULL; if (constraint == NULL) { pe_err("Constraint is NULL"); return; } else if (rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) { pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)", constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role)); return; } else if (is_active(constraint) == FALSE) { pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id); return; } if (constraint->node_list_rh == NULL) { pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id); return; } for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *other_node = NULL; other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (other_node != NULL) { pe_rsc_trace(rsc, "%s + %s: %d + %d", node->details->uname, other_node->details->uname, node->weight, other_node->weight); other_node->weight = merge_weights(other_node->weight, node->weight); } else { other_node = node_copy(node); + pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode); g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node); } if (other_node->rsc_discover_mode < constraint->discover_mode) { if (constraint->discover_mode == discover_exclusive) { rsc->exclusive_discover = TRUE; } /* exclusive > never > always... always is default */ other_node->rsc_discover_mode = constraint->discover_mode; } } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight); } } void native_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } -#define log_change(fmt, args...) do { \ - if(terminal) { \ - printf(" * "fmt"\n", ##args); \ - } else { \ - crm_notice(fmt, ##args); \ - } \ +#define log_change(a, fmt, args...) do { \ + if(a && a->reason && terminal) { \ + printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \ + } else if(a && a->reason) { \ + crm_notice(fmt" \tdue to %s", ##args, a->reason); \ + } else if(terminal) { \ + printf(" * "fmt"\n", ##args); \ + } else { \ + crm_notice(fmt, ##args); \ + } \ } while(0) #define STOP_SANITY_ASSERT(lineno) do { \ if(current && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if(stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \ CRM_ASSERT(stop != NULL); \ } else if(is_set(stop->flags, pe_action_optional)) { \ crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \ CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \ } \ } while(0) void LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { node_t *next = NULL; node_t *current = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *demote = NULL; action_t *promote = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if(rsc->variant == pe_container) { container_LogActions(rsc, data_set, terminal); return; } if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; LogActions(child_rsc, data_set, terminal); } return; } next = rsc->allocated_to; if (rsc->running_on) { if (g_list_length(rsc->running_on) > 1 && rsc->partial_migration_source) { current = rsc->partial_migration_source; } else { current = rsc->running_on->data; } if (rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if (is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed) ? " unmanaged" : ""); return; } if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } key = start_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = stop_key(rsc); if(start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { possible_matches = find_actions(rsc->actions, key, NULL); } else { possible_matches = find_actions(rsc->actions, key, current); } free(key); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } key = promote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } key = demote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { action_t *migrate_to = NULL; key = generate_op_key(rsc->id, RSC_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { migrate_to = possible_matches->data; } CRM_CHECK(next != NULL,); if (next == NULL) { } else if (migrate_to && is_set(migrate_to->flags, pe_action_runnable) && current) { - log_change("Migrate %s\t(%s %s -> %s)", + log_change(start, "Migrate %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if (is_set(rsc->flags, pe_rsc_reload)) { - log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); + log_change(start, "Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start == NULL || is_set(start->flags, pe_action_optional)) { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { - log_change("Stop %s\t(%s %s%s)", rsc->id, role2text(rsc->role), current?current->details->uname:"N/A", + log_change(start, "Stop %s\t(%s %s%s)", rsc->id, role2text(rsc->role), current?current->details->uname:"N/A", stop && is_not_set(stop->flags, pe_action_runnable) ? " - blocked" : ""); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { - log_change("%s %s\t(%s %s -> %s)", + log_change(stop, "%s %s\t(%s %s -> %s)", is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move ", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if (is_set(rsc->flags, pe_rsc_failed)) { - log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); + log_change(stop, "Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); STOP_SANITY_ASSERT(__LINE__); } else { - log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); + log_change(start, "Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */ } g_list_free(possible_matches); return; } if (rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if (current != NULL) { gboolean allowed = FALSE; if (demote != NULL && (demote->flags & pe_action_runnable)) { allowed = TRUE; } - log_change("Demote %s\t(%s -> %s %s%s)", + log_change(demote, "Demote %s\t(%s -> %s %s%s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname, allowed ? "" : " - blocked"); if (stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->next_role > RSC_ROLE_STOPPED && moving == FALSE) { if (is_set(rsc->flags, pe_rsc_failed)) { - log_change("Recover %s\t(%s %s)", + log_change(stop, "Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); STOP_SANITY_ASSERT(__LINE__); } else if (is_set(rsc->flags, pe_rsc_reload)) { - log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), + log_change(start, "Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { - log_change("Restart %s\t(%s %s)", + log_change(start, "Restart %s\t(%s %s)", rsc->id, role2text(rsc->next_role), next->details->uname); STOP_SANITY_ASSERT(__LINE__); } } } } else if (rsc->next_role == RSC_ROLE_STOPPED) { GListPtr gIter = NULL; CRM_CHECK(current != NULL,); key = stop_key(rsc); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; action_t *stop_op = NULL; gboolean allowed = FALSE; possible_matches = find_actions(rsc->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op && (stop_op->flags & pe_action_runnable)) { STOP_SANITY_ASSERT(__LINE__); allowed = TRUE; } - log_change("Stop %s\t(%s%s)", rsc->id, node->details->uname, - allowed ? "" : " - blocked"); + log_change(start, "Stop %s\t(%s%s) %s", rsc->id, node->details->uname, + allowed ? "" : " - blocked", stop->reason?stop->reason:""); } free(key); } if (moving) { - log_change("Move %s\t(%s %s -> %s)", + log_change(stop, "Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->next_role), current->details->uname, next->details->uname); STOP_SANITY_ASSERT(__LINE__); } if (rsc->role == RSC_ROLE_STOPPED) { gboolean allowed = FALSE; if (start && (start->flags & pe_action_runnable)) { allowed = TRUE; } CRM_CHECK(next != NULL,); if (next != NULL) { - log_change("Start %s\t(%s%s)", rsc->id, next->details->uname, + log_change(start, "Start %s\t(%s%s)", rsc->id, next->details->uname, allowed ? "" : " - blocked"); } if (allowed == FALSE) { return; } } if (rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { gboolean allowed = FALSE; CRM_LOG_ASSERT(next); if (stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->role > RSC_ROLE_STOPPED) { if (is_set(rsc->flags, pe_rsc_failed)) { - log_change("Recover %s\t(%s %s)", + log_change(stop, "Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next?next->details->uname:NULL); STOP_SANITY_ASSERT(__LINE__); } else if (is_set(rsc->flags, pe_rsc_reload)) { - log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), + log_change(start, "Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next?next->details->uname:NULL); STOP_SANITY_ASSERT(__LINE__); } else { - log_change("Restart %s\t(%s %s)", + log_change(start, "Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next?next->details->uname:NULL); STOP_SANITY_ASSERT(__LINE__); } } if (promote && (promote->flags & pe_action_runnable)) { allowed = TRUE; } - log_change("Promote %s\t(%s -> %s %s%s)", + log_change(promote, "Promote %s\t(%s -> %s %s%s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next?next->details->uname:NULL, allowed ? "" : " - blocked"); } } gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop; if (rsc->partial_migration_target) { if (rsc->partial_migration_target->details == current->details) { pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname, next->details->uname, rsc->id); continue; } else { pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id); optional = FALSE; } } pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname); stop = stop_action(rsc, current, optional); if (is_not_set(rsc->flags, pe_rsc_managed)) { update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set); const char *unfenced = g_hash_table_lookup(current->details->attrs, XML_NODE_IS_UNFENCED); order_actions(stop, unfence, pe_order_implies_first); if (unfenced == NULL || safe_str_eq("0", unfenced)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname); } } } return TRUE; } gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { action_t *start = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s on %s %d", rsc->id, next ? next->details->uname : "N/A", optional); start = start_action(rsc, next, TRUE); if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(next, "on", TRUE, NULL, data_set); const char *unfenced = g_hash_table_lookup(next->details->attrs, XML_NODE_IS_UNFENCED); order_actions(unfence, start, pe_order_implies_then); if (unfenced == NULL || safe_str_eq("0", unfenced)) { char *reason = crm_strdup_printf("Required by %s", rsc->id); trigger_unfencing(NULL, next, reason, NULL, data_set); free(reason); } } if (is_set(start->flags, pe_action_runnable) && optional == FALSE) { update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } return TRUE; } gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; CRM_ASSERT(rsc); CRM_CHECK(next != NULL, return FALSE); pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *start = (action_t *) gIter->data; if (is_set(start->flags, pe_action_runnable) == FALSE) { runnable = FALSE; } } g_list_free(action_list); if (runnable) { promote_action(rsc, next, optional); return TRUE; } pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *promote = (action_t *) gIter->data; update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A"); demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A"); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if (node == NULL) { pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if (node->details->unclean || node->details->online == FALSE) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional ? pe_order_implies_then : pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional ? pe_order_implies_then : pe_order_optional, data_set); return TRUE; } #include <../lib/pengine/unpack.h> #define set_char(x) last_rsc_id[lpc] = x; complete = TRUE; static char * increment_clone(char *last_rsc_id) { int lpc = 0; int len = 0; char *tmp = NULL; gboolean complete = FALSE; CRM_CHECK(last_rsc_id != NULL, return NULL); if (last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len - 1; while (complete == FALSE && lpc > 0) { switch (last_rsc_id[lpc]) { case 0: lpc--; break; case '0': set_char('1'); break; case '1': set_char('2'); break; case '2': set_char('3'); break; case '3': set_char('4'); break; case '4': set_char('5'); break; case '5': set_char('6'); break; case '6': set_char('7'); break; case '7': set_char('8'); break; case '8': set_char('9'); break; case '9': last_rsc_id[lpc] = '0'; lpc--; break; case ':': tmp = last_rsc_id; last_rsc_id = calloc(1, len + 2); memcpy(last_rsc_id, tmp, len); last_rsc_id[++lpc] = '1'; last_rsc_id[len] = '0'; last_rsc_id[len + 1] = 0; complete = TRUE; free(tmp); break; default: crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc); return NULL; break; } } return last_rsc_id; } static node_t * probe_grouped_clone(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { node_t *running = NULL; resource_t *top = uber_parent(rsc); if (running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() * * This code desperately needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=10: * No probes O(25s) * Detection without clone loop O(3m) * Detection with clone loop O(8m) ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while (peer && running == NULL) { running = pe_hash_table_lookup(peer->known_on, node->details->id); if (running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping active clone: %s", rsc->id); free(clone_id); return running; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } free(clone_id); } return running; } gboolean native_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { enum pe_ordering flags = pe_order_optional; char *key = NULL; action_t *probe = NULL; node_t *running = NULL; node_t *allowed = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if (rc_inactive == NULL) { rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id); return FALSE; } else if (force == FALSE && is_container_remote_node(node)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s on container %s", rsc->id, node->details->id); return FALSE; } if (is_remote_node(node)) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) { pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes do not run stonith agents.", rsc->id, node->details->id); return FALSE; } else if (rsc_contains_remote_node(data_set, rsc)) { pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes can not run resources that contain connection resources.", rsc->id, node->details->id); return FALSE; } else if (rsc->is_remote_node) { pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes can not run connection resources", rsc->id, node->details->id); return FALSE; } } if (rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set) || any_created; } return any_created; } else if ((rsc->container) && (!rsc->is_remote_node)) { pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id); return FALSE; } if (is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id); return FALSE; } running = g_hash_table_lookup(rsc->known_on, node->details->id); if (running == NULL && is_set(rsc->flags, pe_rsc_unique) == FALSE) { /* Anonymous clones */ if (rsc->parent == top) { running = g_hash_table_lookup(rsc->parent->known_on, node->details->id); } else { /* Grouped anonymous clones need extra special handling */ running = probe_grouped_clone(rsc, node, data_set); } } if (force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname); return FALSE; } allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (rsc->exclusive_discover || top->exclusive_discover) { if (allowed == NULL) { /* exclusive discover is enabled and this node is not in the allowed list. */ return FALSE; } else if (allowed->rsc_discover_mode != discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on */ return FALSE; } } if (allowed && allowed->rsc_discover_mode == discover_never) { /* this resource is marked as not needing to be discovered on this node */ return FALSE; } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); /* If enabled, require unfencing before probing any fence devices * but ensure it happens after any resources that require * unfencing have been probed. * * Doing it the other way (requiring unfencing after probing * resources that need it) would result in the node being * unfenced, and all its resources being stopped, whenever a new * resource is added. Which would be highly suboptimal. * * So essentially, at the point the fencing device(s) have been * probed, we know the state of all resources that require * unfencing and that unfencing occurred. */ if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); order_actions(unfence, probe, pe_order_optional); } /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if (running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if (rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role), is_set(probe->flags, pe_action_runnable), rsc->running_on); if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) { top = rsc; } else if (pe_rsc_is_clone(top) == FALSE) { top = rsc; } else { crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id); } if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) { /* Prevent the start from occuring if rsc isn't active, but * don't cause it to stop if it was active already */ flags |= pe_order_runnable_left; } custom_action_order(rsc, NULL, probe, top, generate_op_key(top->id, RSC_START, 0), NULL, flags, data_set); /* Before any reloads, if they exist */ custom_action_order(rsc, NULL, probe, top, reload_key(rsc), NULL, pe_order_optional, data_set); if (node->details->shutdown == FALSE) { custom_action_order(rsc, NULL, probe, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional, data_set); } if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Normally rsc.start depends on probe complete which depends * on rsc.probe. But this can't be the case in this scenario as * it would create graph loops. * * So instead we explicitly order 'rsc.probe then rsc.start' */ } else { order_actions(probe, complete, pe_order_implies_then); } return TRUE; } static void native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { node_t *target; GListPtr gIter = NULL; action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if(action->needs == rsc_req_nothing) { /* Anything other than start or promote requires nothing */ } else if (action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_optional); } else if (safe_str_eq(action->task, RSC_START) && NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) { /* if known == NULL, then we don't know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * it's analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explanation is that the * DC died and took its status with it */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_optional | pe_order_runnable_left); } } } static void native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; GListPtr action_list = NULL; action_t *start = NULL; resource_t *top = uber_parent(rsc); node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Check whether the resource has a pending start action */ start = find_first_action(rsc->actions, NULL, CRMD_ACTION_START, NULL); /* Get a list of stop actions potentially implied by the fencing */ key = stop_key(rsc); action_list = find_actions(rsc->actions, key, target); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The stop would never complete and is now implied by the fencing, * so convert it into a pseudo-action. */ update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_implied_by_stonith, __FUNCTION__, __LINE__); if(start == NULL || start->needs > rsc_req_quorum) { enum pe_ordering flags = pe_order_optional; action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); if (target->details->remote_rsc) { /* User constraints must not order a resource in a guest node * relative to the guest node container resource. This flag * marks constraints as generated by the cluster and thus * immune to that check. */ flags |= pe_order_preserve; } order_actions(stonith_op, action, flags); order_actions(stonith_op, parent_stop, flags); } if (is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ create_secondary_notification(action, rsc, stonith_op, data_set); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ key = demote_key(rsc); action_list = find_actions(rsc->actions, key, target); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online == FALSE || action->node->details->unclean == TRUE || is_set(rsc->flags, pe_rsc_failed)) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); if (start == NULL || start->needs > rsc_req_quorum) { order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } } else if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); } else { native_start_constraints(rsc, stonith_op, data_set); native_stop_constraints(rsc, stonith_op, data_set); } } enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static action_t * get_first_named_action(resource_t * rsc, const char *action, gboolean only_valid, node_t * current) { action_t *a = NULL; GListPtr action_list = NULL; char *key = generate_op_key(rsc->id, action, 0); action_list = find_actions(rsc->actions, key, current); if (action_list == NULL || action_list->data == NULL) { crm_trace("%s: no %s action", rsc->id, action); free(key); return NULL; } a = action_list->data; g_list_free(action_list); if (only_valid && is_set(a->flags, pe_action_pseudo)) { crm_trace("%s: pseudo", key); a = NULL; } else if (only_valid && is_not_set(a->flags, pe_action_runnable)) { crm_trace("%s: runnable", key); a = NULL; } free(key); return a; } void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set) { GListPtr gIter = NULL; action_t *other = NULL; action_t *reload = NULL; if (rsc->children) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; ReloadRsc(child_rsc, node, data_set); } return; } else if (rsc->variant > pe_native) { /* Complex resource with no children */ return; } else if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); return; } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) { pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */ return; } else if (node == NULL) { pe_rsc_trace(rsc, "%s: not active", rsc->id); return; } pe_rsc_trace(rsc, "Processing %s", rsc->id); set_bit(rsc->flags, pe_rsc_reload); reload = custom_action( rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set); /* stop = stop_action(rsc, node, optional); */ other = get_first_named_action(rsc, RSC_STOP, TRUE, node); if (other != NULL) { order_actions(reload, other, pe_order_optional); } other = get_first_named_action(rsc, RSC_DEMOTE, TRUE, node); if (other != NULL) { order_actions(reload, other, pe_order_optional); } } void native_append_meta(resource_t * rsc, xmlNode * xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); resource_t *iso_parent, *last_parent, *parent; if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container) { crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id); } } last_parent = iso_parent = rsc; while (iso_parent != NULL) { char *name = NULL; char *iso = NULL; if (iso_parent->isolation_wrapper == NULL) { last_parent = iso_parent; iso_parent = iso_parent->parent; continue; } /* name of wrapper script this resource is routed through. */ name = crm_meta_name(XML_RSC_ATTR_ISOLATION_WRAPPER); crm_xml_add(xml, name, iso_parent->isolation_wrapper); free(name); /* instance name for isolated environment */ name = crm_meta_name(XML_RSC_ATTR_ISOLATION_INSTANCE); if (pe_rsc_is_clone(iso_parent)) { /* if isolation is set at the clone/master level, we have to * give this resource the unique isolation instance associated * with the clone child (last_parent)*/ /* Example: cloned group. group is container * clone myclone - iso_parent * group mygroup - last_parent (this is the iso environment) * rsc myrsc1 - rsc * rsc myrsc2 * The group is what is isolated in example1. We have to make * sure myrsc1 and myrsc2 launch in the same isolated environment. * * Example: cloned primitives. rsc primitive is container * clone myclone iso_parent * rsc myrsc1 - last_parent == rsc (this is the iso environment) * The individual cloned primitive instances are isolated */ value = g_hash_table_lookup(last_parent->meta, XML_RSC_ATTR_INCARNATION); CRM_ASSERT(value != NULL); iso = crm_concat(crm_element_value(last_parent->xml, XML_ATTR_ID), value, '_'); crm_xml_add(xml, name, iso); free(iso); } else { /* * Example: cloned group of containers * clone myclone * group mygroup * rsc myrsc1 - iso_parent (this is the iso environment) * rsc myrsc2 * * Example: group of containers * group mygroup * rsc myrsc1 - iso_parent (this is the iso environment) * rsc myrsc2 * * Example: group is container * group mygroup - iso_parent ( this is iso environment) * rsc myrsc1 * rsc myrsc2 * * Example: single primitive * rsc myrsc1 - iso_parent (this is the iso environment) */ value = g_hash_table_lookup(iso_parent->meta, XML_RSC_ATTR_INCARNATION); if (value) { crm_xml_add(xml, name, iso_parent->id); iso = crm_concat(crm_element_value(iso_parent->xml, XML_ATTR_ID), value, '_'); crm_xml_add(xml, name, iso); free(iso); } else { crm_xml_add(xml, name, iso_parent->id); } } free(name); break; } } diff --git a/pengine/test10/10-a-then-bm-b-move-a-clone.summary b/pengine/test10/10-a-then-bm-b-move-a-clone.summary index 07b4d2b90d..b39963f5da 100644 --- a/pengine/test10/10-a-then-bm-b-move-a-clone.summary +++ b/pengine/test10/10-a-then-bm-b-move-a-clone.summary @@ -1,32 +1,32 @@ Current cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node1 f20node2 ] vm (ocf::heartbeat:Dummy): Started f20node1 Transition Summary: - * Stop myclone:1 (f20node1) + * Stop myclone:1 (f20node1) due to node availability * Migrate vm (Started f20node1 -> f20node2) Executing cluster transition: * Resource action: vm migrate_to on f20node1 * Resource action: vm migrate_from on f20node2 * Resource action: vm stop on f20node1 * Pseudo action: myclone-clone_stop_0 * Pseudo action: vm_start_0 * Resource action: myclone stop on f20node1 * Pseudo action: myclone-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node2 ] Stopped: [ f20node1 ] vm (ocf::heartbeat:Dummy): Started f20node2 diff --git a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary index 4b8e1d0cae..498cd2b055 100644 --- a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary +++ b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.summary @@ -1,35 +1,35 @@ Current cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node1 ] Stopped: [ f20node2 ] vm (ocf::heartbeat:Dummy): Started f20node1 Transition Summary: * Move myclone:0 (Started f20node1 -> f20node2) - * Move vm (Started f20node1 -> f20node2) + * Move vm (Started f20node1 -> f20node2) due to unrunnable myclone-clone stop Executing cluster transition: * Resource action: myclone monitor on f20node2 * Resource action: vm stop on f20node1 * Pseudo action: myclone-clone_stop_0 * Resource action: myclone stop on f20node1 * Pseudo action: myclone-clone_stopped_0 * Pseudo action: myclone-clone_start_0 * Pseudo action: all_stopped * Resource action: myclone start on f20node2 * Pseudo action: myclone-clone_running_0 * Resource action: vm start on f20node2 Revised cluster status: Node f20node1 (1): standby Online: [ f20node2 ] Clone Set: myclone-clone [myclone] Started: [ f20node2 ] Stopped: [ f20node1 ] vm (ocf::heartbeat:Dummy): Started f20node2 diff --git a/pengine/test10/1484.summary b/pengine/test10/1484.summary index 52cfebe898..4b5d8a6b47 100644 --- a/pengine/test10/1484.summary +++ b/pengine/test10/1484.summary @@ -1,20 +1,20 @@ Current cluster status: Online: [ hb1 hb2 ] OFFLINE: [ hb3 ] the-future-of-vaj (ocf::heartbeat:Dummy): FAILED hb2 Transition Summary: - * Stop the-future-of-vaj (hb2) + * Stop the-future-of-vaj (hb2) due to node availability Executing cluster transition: * Resource action: the-future-of-vaj stop on hb2 * Pseudo action: all_stopped Revised cluster status: Online: [ hb1 hb2 ] OFFLINE: [ hb3 ] the-future-of-vaj (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/1494.summary b/pengine/test10/1494.summary index 29c3cd6719..be13fdd8f6 100644 --- a/pengine/test10/1494.summary +++ b/pengine/test10/1494.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ hb1 hb2 ] OFFLINE: [ hb3 ] Clone Set: ima_cloneid [ima_rscid] (unique) ima_rscid:0 (ocf::heartbeat:Dummy): Started hb1 ima_rscid:1 (ocf::heartbeat:Dummy): Started hb2 Transition Summary: - * Stop ima_rscid:0 (hb1) + * Stop ima_rscid:0 (hb1) due to node availability Executing cluster transition: * Pseudo action: ima_cloneid_stop_0 * Resource action: ima_rscid:0 stop on hb1 * Pseudo action: ima_cloneid_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ hb1 hb2 ] OFFLINE: [ hb3 ] Clone Set: ima_cloneid [ima_rscid] (unique) ima_rscid:0 (ocf::heartbeat:Dummy): Stopped ima_rscid:1 (ocf::heartbeat:Dummy): Started hb2 diff --git a/pengine/test10/5-am-then-bm-a-not-migratable.summary b/pengine/test10/5-am-then-bm-a-not-migratable.summary index bc739101cb..7e95dbb124 100644 --- a/pengine/test10/5-am-then-bm-a-not-migratable.summary +++ b/pengine/test10/5-am-then-bm-a-not-migratable.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: * Move A (Started 18node1 -> 18node2) - * Move B (Started 18node2 -> 18node1) + * Move B (Started 18node2 -> 18node1) due to unrunnable A stop Executing cluster transition: * Resource action: B stop on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Resource action: A start on 18node2 * Resource action: B start on 18node1 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/594.summary b/pengine/test10/594.summary index 44ed113912..de1d179137 100644 --- a/pengine/test10/594.summary +++ b/pengine/test10/594.summary @@ -1,56 +1,56 @@ Current cluster status: Node hadev3 (879e65f8-4b38-4c56-9552-4752ad436669): UNCLEAN (offline) Online: [ hadev1 hadev2 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 child_DoFencing:1 (stonith:ssh): Started hadev1 child_DoFencing:2 (stonith:ssh): Started hadev1 Transition Summary: * Shutdown hadev2 * Fence (reboot) hadev3 'peer is no longer part of the cluster' * Move DcIPaddr (Started hadev2 -> hadev1) * Move rsc_hadev2 (Started hadev2 -> hadev1) - * Stop child_DoFencing:0 (hadev2) - * Stop child_DoFencing:2 (hadev1) + * Stop child_DoFencing:0 (hadev2) due to node availability + * Stop child_DoFencing:2 (hadev1) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on hadev1 * Resource action: rsc_hadev3 monitor on hadev2 * Resource action: rsc_hadev2 monitor on hadev1 * Resource action: child_DoFencing:0 monitor on hadev1 * Resource action: child_DoFencing:2 monitor on hadev2 * Pseudo action: DoFencing_stop_0 * Fencing hadev3 (reboot) * Resource action: DcIPaddr stop on hadev2 * Resource action: rsc_hadev2 stop on hadev2 * Resource action: child_DoFencing:0 stop on hadev2 * Resource action: child_DoFencing:2 stop on hadev1 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on hadev2 * Pseudo action: stonith_complete * Pseudo action: all_stopped * Resource action: DcIPaddr start on hadev1 * Resource action: rsc_hadev2 start on hadev1 * Resource action: DcIPaddr monitor=5000 on hadev1 * Resource action: rsc_hadev2 monitor=5000 on hadev1 Revised cluster status: Online: [ hadev1 hadev2 ] OFFLINE: [ hadev3 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev1 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started hadev1 child_DoFencing:2 (stonith:ssh): Stopped diff --git a/pengine/test10/662.summary b/pengine/test10/662.summary index 1726f35ef1..4a9d911bc5 100644 --- a/pengine/test10/662.summary +++ b/pengine/test10/662.summary @@ -1,67 +1,67 @@ Current cluster status: Online: [ c001n02 c001n03 c001n04 c001n09 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n09 (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n02 child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n04 child_DoFencing:3 (stonith:ssh): Started c001n09 Transition Summary: * Shutdown c001n02 * Move rsc_c001n02 (Started c001n02 -> c001n03) - * Stop child_DoFencing:0 (c001n02) + * Stop child_DoFencing:0 (c001n02) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n04 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n02 * Resource action: rsc_c001n09 monitor on c001n04 * Resource action: rsc_c001n09 monitor on c001n03 * Resource action: rsc_c001n09 monitor on c001n02 * Resource action: rsc_c001n02 monitor on c001n09 * Resource action: rsc_c001n02 monitor on c001n04 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n03 monitor on c001n09 * Resource action: rsc_c001n03 monitor on c001n04 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n04 monitor on c001n09 * Resource action: rsc_c001n04 monitor on c001n03 * Resource action: child_DoFencing:0 monitor on c001n09 * Resource action: child_DoFencing:0 monitor on c001n04 * Resource action: child_DoFencing:1 monitor on c001n04 * Resource action: child_DoFencing:1 monitor on c001n02 * Resource action: child_DoFencing:2 monitor on c001n09 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n04 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Pseudo action: DoFencing_stop_0 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: child_DoFencing:0 stop on c001n02 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Resource action: rsc_c001n02 start on c001n03 * Resource action: rsc_c001n02 monitor=5000 on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n04 c001n09 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n09 (ocf::heartbeat:IPaddr): Started c001n09 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n04 child_DoFencing:3 (stonith:ssh): Started c001n09 diff --git a/pengine/test10/7-migrate-group-one-unmigratable.summary b/pengine/test10/7-migrate-group-one-unmigratable.summary index ab9bcd9437..cf1b370c2f 100644 --- a/pengine/test10/7-migrate-group-one-unmigratable.summary +++ b/pengine/test10/7-migrate-group-one-unmigratable.summary @@ -1,40 +1,40 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Resource Group: thegroup A (ocf::heartbeat:Dummy): Started 18node1 B (ocf::heartbeat:Dummy): Started 18node1 C (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: * Migrate A (Started 18node1 -> 18node2) * Move B (Started 18node1 -> 18node2) - * Move C (Started 18node1 -> 18node2) + * Move C (Started 18node1 -> 18node2) due to unrunnable B stop Executing cluster transition: * Pseudo action: thegroup_stop_0 * Resource action: C stop on 18node1 * Resource action: B stop on 18node1 * Resource action: A migrate_to on 18node1 * Resource action: A migrate_from on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped * Pseudo action: thegroup_stopped_0 * Pseudo action: thegroup_start_0 * Pseudo action: A_start_0 * Resource action: B start on 18node2 * Resource action: C start on 18node2 * Pseudo action: thegroup_running_0 * Resource action: A monitor=60000 on 18node2 * Resource action: B monitor=60000 on 18node2 * Resource action: C monitor=60000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Resource Group: thegroup A (ocf::heartbeat:Dummy): Started 18node2 B (ocf::heartbeat:Dummy): Started 18node2 C (ocf::heartbeat:Dummy): Started 18node2 diff --git a/pengine/test10/764.summary b/pengine/test10/764.summary index 2d43eae782..0d5c612f5b 100644 --- a/pengine/test10/764.summary +++ b/pengine/test10/764.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ posic041 posic043 ] OFFLINE: [ posic042 posic044 ] DcIPaddr (ocf::heartbeat:IPaddr): Started posic043 rsc_posic041 (ocf::heartbeat:IPaddr): Started posic041 rsc_posic042 (ocf::heartbeat:IPaddr): Started posic041 rsc_posic043 (ocf::heartbeat:IPaddr): Started posic043 rsc_posic044 (ocf::heartbeat:IPaddr): Starting posic041 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started posic043 child_DoFencing:1 (stonith:ssh): Started posic041 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: - * Stop DcIPaddr (Started posic043) - * Stop rsc_posic041 (Started posic041) - * Stop rsc_posic042 (Started posic041) - * Stop rsc_posic043 (Started posic043) - * Stop rsc_posic044 (Started posic041) + * Stop DcIPaddr (Started posic043) due to no quorum + * Stop rsc_posic041 (Started posic041) due to no quorum + * Stop rsc_posic042 (Started posic041) due to no quorum + * Stop rsc_posic043 (Started posic043) due to no quorum + * Stop rsc_posic044 (Started posic041) due to no quorum Executing cluster transition: * Resource action: DcIPaddr monitor on posic041 * Resource action: rsc_posic041 monitor on posic043 * Resource action: rsc_posic042 monitor on posic043 * Resource action: rsc_posic043 monitor on posic041 * Resource action: rsc_posic044 monitor on posic043 * Resource action: child_DoFencing:0 monitor=5000 on posic043 * Resource action: child_DoFencing:1 monitor=5000 on posic041 * Resource action: child_DoFencing:1 monitor on posic043 * Resource action: child_DoFencing:2 monitor on posic041 * Resource action: child_DoFencing:3 monitor on posic041 * Resource action: DcIPaddr stop on posic043 * Resource action: rsc_posic041 stop on posic041 * Resource action: rsc_posic042 stop on posic041 * Resource action: rsc_posic043 stop on posic043 * Resource action: rsc_posic044 stop on posic041 * Pseudo action: all_stopped Revised cluster status: Online: [ posic041 posic043 ] OFFLINE: [ posic042 posic044 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped rsc_posic041 (ocf::heartbeat:IPaddr): Stopped rsc_posic042 (ocf::heartbeat:IPaddr): Stopped rsc_posic043 (ocf::heartbeat:IPaddr): Stopped rsc_posic044 (ocf::heartbeat:IPaddr): Started posic041 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started posic043 child_DoFencing:1 (stonith:ssh): Started posic041 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/797.summary b/pengine/test10/797.summary index 3184eae297..9e9400359e 100644 --- a/pengine/test10/797.summary +++ b/pengine/test10/797.summary @@ -1,73 +1,73 @@ Current cluster status: Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline) Online: [ c001n01 c001n02 c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started (Monitoring)[ c001n01 c001n03 ] child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: * Shutdown c001n02 - * Stop DcIPaddr (Started c001n03) - * Stop rsc_c001n08 (Started c001n02) - * Stop rsc_c001n02 (Started c001n02) - * Stop rsc_c001n03 (Started c001n03) - * Stop rsc_c001n01 (Started c001n01) + * Stop DcIPaddr (Started c001n03) due to no quorum + * Stop rsc_c001n08 (Started c001n02) due to no quorum + * Stop rsc_c001n02 (Started c001n02) due to no quorum + * Stop rsc_c001n03 (Started c001n03) due to no quorum + * Stop rsc_c001n01 (Started c001n01) due to no quorum * Restart child_DoFencing:0 (Started c001n01) - * Stop child_DoFencing:1 (c001n02) + * Stop child_DoFencing:1 (c001n02) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n02 * Resource action: DcIPaddr monitor on c001n01 * Resource action: DcIPaddr stop on c001n03 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n03 monitor on c001n02 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n01 * Pseudo action: DoFencing_stop_0 * Resource action: DcIPaddr delete on c001n03 * Resource action: rsc_c001n08 stop on c001n02 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n03 stop on c001n03 * Resource action: rsc_c001n01 stop on c001n01 * Resource action: child_DoFencing:0 stop on c001n03 * Resource action: child_DoFencing:0 stop on c001n01 * Resource action: child_DoFencing:1 stop on c001n02 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Resource action: child_DoFencing:0 start on c001n01 * Resource action: child_DoFencing:0 monitor=5000 on c001n01 * Pseudo action: DoFencing_running_0 Revised cluster status: Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline) Online: [ c001n01 c001n02 c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 child_DoFencing:1 (stonith:ssh): Stopped child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/829.summary b/pengine/test10/829.summary index 9e66733076..feca908f08 100644 --- a/pengine/test10/829.summary +++ b/pengine/test10/829.summary @@ -1,64 +1,64 @@ Current cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n01 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 (UNCLEAN) rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n08 Transition Summary: * Fence (reboot) c001n02 'peer is no longer part of the cluster' * Move rsc_c001n02 (Started c001n02 -> c001n01) - * Stop child_DoFencing:0 (c001n02) + * Stop child_DoFencing:0 (c001n02) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n03 monitor on c001n08 * Resource action: rsc_c001n03 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:0 monitor on c001n01 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Fencing c001n02 (reboot) * Pseudo action: rsc_c001n02_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: stonith_complete * Resource action: rsc_c001n02 start on c001n01 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: all_stopped * Resource action: rsc_c001n02 monitor=5000 on c001n01 Revised cluster status: Online: [ c001n01 c001n03 c001n08 ] OFFLINE: [ c001n02 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n01 child_DoFencing:3 (stonith:ssh): Started c001n08 diff --git a/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary b/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary index 37d16be476..44fc1a2261 100644 --- a/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary +++ b/pengine/test10/9-am-then-bm-b-migrating-a-stopping.summary @@ -1,23 +1,23 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Started 18node1 ( disabled ) B (ocf::heartbeat:Dummy): Started 18node2 Transition Summary: - * Stop A (18node1) - * Stop B (Started 18node2) + * Stop A (18node1) due to node availability + * Stop B (Started 18node2) due to unrunnable A start Executing cluster transition: * Resource action: B stop on 18node2 * Resource action: A stop on 18node1 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] A (ocf::heartbeat:Dummy): Stopped ( disabled ) B (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/994-2.summary b/pengine/test10/994-2.summary index dc4b9d15cc..a1d477f5c4 100644 --- a/pengine/test10/994-2.summary +++ b/pengine/test10/994-2.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ paul ] Resource Group: group_1 datadisk_1 (heartbeat:datadisk): Started paul Filesystem_2 (ocf::heartbeat:Filesystem): Started paul IPaddr_5 (ocf::heartbeat:IPaddr): Started paul postfix_9 (lsb:postfix): FAILED paul depends (lsb:postfix): Started paul Transition Summary: * Recover postfix_9 (Started paul) - * Restart depends (Started paul) + * Restart depends (Started paul) due to required group_1 running Executing cluster transition: * Resource action: depends stop on paul * Pseudo action: group_1_stop_0 * Resource action: postfix_9 stop on paul * Pseudo action: all_stopped * Pseudo action: group_1_stopped_0 * Pseudo action: group_1_start_0 * Resource action: postfix_9 start on paul * Resource action: postfix_9 monitor=120000 on paul * Pseudo action: group_1_running_0 * Resource action: depends start on paul Revised cluster status: Online: [ paul ] Resource Group: group_1 datadisk_1 (heartbeat:datadisk): Started paul Filesystem_2 (ocf::heartbeat:Filesystem): Started paul IPaddr_5 (ocf::heartbeat:IPaddr): Started paul postfix_9 (lsb:postfix): Started paul depends (lsb:postfix): Started paul diff --git a/pengine/test10/anti-colocation-order.summary b/pengine/test10/anti-colocation-order.summary index 2447501799..052043a5fb 100644 --- a/pengine/test10/anti-colocation-order.summary +++ b/pengine/test10/anti-colocation-order.summary @@ -1,44 +1,44 @@ Current cluster status: Node node1: standby Online: [ node2 ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Resource Group: group2 rsc3 (ocf::pacemaker:Dummy): Started node2 rsc4 (ocf::pacemaker:Dummy): Started node2 Transition Summary: * Move rsc1 (Started node1 -> node2) * Move rsc2 (Started node1 -> node2) - * Stop rsc3 (node2) - * Stop rsc4 (node2) + * Stop rsc3 (node2) due to node availability + * Stop rsc4 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node1 * Pseudo action: group2_stop_0 * Resource action: rsc4 stop on node2 * Resource action: rsc1 stop on node1 * Resource action: rsc3 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 * Pseudo action: group2_stopped_0 * Pseudo action: group1_start_0 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Pseudo action: group1_running_0 Revised cluster status: Node node1: standby Online: [ node2 ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc3 (ocf::pacemaker:Dummy): Stopped rsc4 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/asymmetrical-order-move.summary b/pengine/test10/asymmetrical-order-move.summary index a9c1d8ffd4..503813cb40 100644 --- a/pengine/test10/asymmetrical-order-move.summary +++ b/pengine/test10/asymmetrical-order-move.summary @@ -1,25 +1,25 @@ Using the original execution date of: 2016-04-28 11:50:29Z 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-2 dummy1 (ocf::pacemaker:Dummy): Stopped ( disabled ) dummy2 (ocf::pacemaker:Dummy): Started sle12sp2-1 Transition Summary: - * Stop dummy2 (Started sle12sp2-1) + * Stop dummy2 (Started sle12sp2-1) due to unrunnable dummy1 start Executing cluster transition: * Resource action: dummy2 stop on sle12sp2-1 * Pseudo action: all_stopped Using the original execution date of: 2016-04-28 11:50:29Z Revised cluster status: Online: [ sle12sp2-1 sle12sp2-2 ] st_sbd (stonith:external/sbd): Started sle12sp2-2 dummy1 (ocf::pacemaker:Dummy): Stopped ( disabled ) dummy2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/bug-1572-1.summary b/pengine/test10/bug-1572-1.summary index 6c37bb467d..6a2461412f 100644 --- a/pengine/test10/bug-1572-1.summary +++ b/pengine/test10/bug-1572-1.summary @@ -1,85 +1,85 @@ Current cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Masters: [ arc-tkincaidlx.wsicorp.com ] Slaves: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com Transition Summary: * Shutdown arc-dknightlx - * Stop rsc_drbd_7788:0 (arc-dknightlx) + * Stop rsc_drbd_7788:0 (arc-dknightlx) due to node availability * Restart rsc_drbd_7788:1 (Master arc-tkincaidlx.wsicorp.com) - * Restart fs_mirror (Started arc-tkincaidlx.wsicorp.com) - * Restart pgsql_5555 (Started arc-tkincaidlx.wsicorp.com) - * Restart IPaddr_147_81_84_133 (Started arc-tkincaidlx.wsicorp.com) + * Restart fs_mirror (Started arc-tkincaidlx.wsicorp.com) due to required ms_drbd_7788 notified + * Restart pgsql_5555 (Started arc-tkincaidlx.wsicorp.com) due to required fs_mirror start + * Restart IPaddr_147_81_84_133 (Started arc-tkincaidlx.wsicorp.com) due to required pgsql_5555 start Executing cluster transition: * Pseudo action: ms_drbd_7788_pre_notify_demote_0 * Pseudo action: grp_pgsql_mirror_stop_0 * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_stopped_0 * Pseudo action: ms_drbd_7788_demote_0 * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_demoted_0 * Pseudo action: ms_drbd_7788_post_notify_demoted_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_7788_pre_notify_stop_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_7788_stop_0 * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx * Resource action: rsc_drbd_7788:1 stop on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_stopped_0 * Cluster action: do_shutdown on arc-dknightlx * Pseudo action: ms_drbd_7788_post_notify_stopped_0 * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_7788_pre_notify_start_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_7788_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_7788_start_0 * Resource action: rsc_drbd_7788:1 start on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_running_0 * Pseudo action: ms_drbd_7788_post_notify_running_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_running_0 * Pseudo action: ms_drbd_7788_pre_notify_promote_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_7788_promote_0 * Resource action: rsc_drbd_7788:1 promote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_promoted_0 * Pseudo action: ms_drbd_7788_post_notify_promoted_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_promoted_0 * Pseudo action: grp_pgsql_mirror_start_0 * Resource action: fs_mirror start on arc-tkincaidlx.wsicorp.com * Resource action: pgsql_5555 start on arc-tkincaidlx.wsicorp.com * Resource action: pgsql_5555 monitor=30000 on arc-tkincaidlx.wsicorp.com * Resource action: IPaddr_147_81_84_133 start on arc-tkincaidlx.wsicorp.com * Resource action: IPaddr_147_81_84_133 monitor=25000 on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_running_0 Revised cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Masters: [ arc-tkincaidlx.wsicorp.com ] Stopped: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com diff --git a/pengine/test10/bug-1572-2.summary b/pengine/test10/bug-1572-2.summary index a4235a73a7..96574cff2e 100644 --- a/pengine/test10/bug-1572-2.summary +++ b/pengine/test10/bug-1572-2.summary @@ -1,61 +1,61 @@ Current cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Masters: [ arc-tkincaidlx.wsicorp.com ] Slaves: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com Transition Summary: * Shutdown arc-dknightlx - * Stop rsc_drbd_7788:0 (arc-dknightlx) + * Stop rsc_drbd_7788:0 (arc-dknightlx) due to node availability * Demote rsc_drbd_7788:1 (Master -> Slave arc-tkincaidlx.wsicorp.com) - * Stop fs_mirror (arc-tkincaidlx.wsicorp.com) - * Stop pgsql_5555 (arc-tkincaidlx.wsicorp.com) - * Stop IPaddr_147_81_84_133 (arc-tkincaidlx.wsicorp.com) + * Stop fs_mirror (arc-tkincaidlx.wsicorp.com) due to node availability + * Stop pgsql_5555 (arc-tkincaidlx.wsicorp.com) due to node availability + * Stop IPaddr_147_81_84_133 (arc-tkincaidlx.wsicorp.com) due to node availability Executing cluster transition: * Pseudo action: ms_drbd_7788_pre_notify_demote_0 * Pseudo action: grp_pgsql_mirror_stop_0 * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_stopped_0 * Pseudo action: ms_drbd_7788_demote_0 * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_demoted_0 * Pseudo action: ms_drbd_7788_post_notify_demoted_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_7788_pre_notify_stop_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_7788_stop_0 * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx * Pseudo action: ms_drbd_7788_stopped_0 * Cluster action: do_shutdown on arc-dknightlx * Pseudo action: ms_drbd_7788_post_notify_stopped_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788] Slaves: [ arc-tkincaidlx.wsicorp.com ] Stopped: [ arc-dknightlx ] Resource Group: grp_pgsql_mirror fs_mirror (ocf::heartbeat:Filesystem): Stopped pgsql_5555 (ocf::heartbeat:pgsql): Stopped IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Stopped diff --git a/pengine/test10/bug-1573.summary b/pengine/test10/bug-1573.summary index 8fb2820574..02d93a67f7 100644 --- a/pengine/test10/bug-1573.summary +++ b/pengine/test10/bug-1573.summary @@ -1,34 +1,34 @@ Current cluster status: Online: [ xen-b ] OFFLINE: [ xen-c ] Resource Group: group_1 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Stopped apache_2 (ocf::heartbeat:apache): Stopped Resource Group: group_11 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started xen-b apache_6 (ocf::heartbeat:apache): Stopped Transition Summary: * Shutdown xen-b - * Stop IPaddr_192_168_1_102 (xen-b) + * Stop IPaddr_192_168_1_102 (xen-b) due to node availability Executing cluster transition: * Pseudo action: group_11_stop_0 * Resource action: IPaddr_192_168_1_102 stop on xen-b * Cluster action: do_shutdown on xen-b * Pseudo action: all_stopped * Pseudo action: group_11_stopped_0 Revised cluster status: Online: [ xen-b ] OFFLINE: [ xen-c ] Resource Group: group_1 IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Stopped apache_2 (ocf::heartbeat:apache): Stopped Resource Group: group_11 IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Stopped apache_6 (ocf::heartbeat:apache): Stopped diff --git a/pengine/test10/bug-1718.summary b/pengine/test10/bug-1718.summary index 622a5a8b3e..b539e4e856 100644 --- a/pengine/test10/bug-1718.summary +++ b/pengine/test10/bug-1718.summary @@ -1,42 +1,42 @@ 2 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ biggame.ds9 heartbeat.ds9 ops.ds9 ] OFFLINE: [ defiant.ds9 warbird.ds9 ] Resource Group: Web_Group Apache_IP (ocf::heartbeat:IPaddr): Started heartbeat.ds9 resource_IP2 (ocf::heartbeat:IPaddr): Stopped ( disabled ) resource_dummyweb (ocf::heartbeat:Dummy): Stopped Resource Group: group_fUN resource_IP3 (ocf::heartbeat:IPaddr): Started ops.ds9 resource_dummy (ocf::heartbeat:Dummy): Started ops.ds9 Transition Summary: - * Stop resource_IP3 (Started ops.ds9) - * Stop resource_dummy (Started ops.ds9) + * Stop resource_IP3 (Started ops.ds9) due to unrunnable Web_Group running + * Stop resource_dummy (Started ops.ds9) due to required resource_IP3 start Executing cluster transition: * Pseudo action: group_fUN_stop_0 * Resource action: resource_dummy stop on ops.ds9 * Resource action: OpenVPN_IP delete on ops.ds9 * Resource action: OpenVPN_IP delete on heartbeat.ds9 * Resource action: Apache delete on biggame.ds9 * Resource action: Apache delete on ops.ds9 * Resource action: Apache delete on heartbeat.ds9 * Resource action: resource_IP3 stop on ops.ds9 * Pseudo action: all_stopped * Pseudo action: group_fUN_stopped_0 Revised cluster status: Online: [ biggame.ds9 heartbeat.ds9 ops.ds9 ] OFFLINE: [ defiant.ds9 warbird.ds9 ] Resource Group: Web_Group Apache_IP (ocf::heartbeat:IPaddr): Started heartbeat.ds9 resource_IP2 (ocf::heartbeat:IPaddr): Stopped ( disabled ) resource_dummyweb (ocf::heartbeat:Dummy): Stopped Resource Group: group_fUN resource_IP3 (ocf::heartbeat:IPaddr): Stopped resource_dummy (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/bug-5014-A-stop-B-started.summary b/pengine/test10/bug-5014-A-stop-B-started.summary index 20af4abbbe..a00dfd7b0d 100644 --- a/pengine/test10/bug-5014-A-stop-B-started.summary +++ b/pengine/test10/bug-5014-A-stop-B-started.summary @@ -1,21 +1,21 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Started fc16-builder ( disabled ) ClusterIP2 (ocf::heartbeat:IPaddr2): Started fc16-builder Transition Summary: - * Stop ClusterIP (fc16-builder) + * Stop ClusterIP (fc16-builder) due to node availability Executing cluster transition: * Resource action: ClusterIP stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) ClusterIP2 (ocf::heartbeat:IPaddr2): Started fc16-builder diff --git a/pengine/test10/bug-5014-A-stopped-B-stopped.summary b/pengine/test10/bug-5014-A-stopped-B-stopped.summary index 970d81c3f9..95e5b60e71 100644 --- a/pengine/test10/bug-5014-A-stopped-B-stopped.summary +++ b/pengine/test10/bug-5014-A-stopped-B-stopped.summary @@ -1,21 +1,21 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped Transition Summary: - * Start ClusterIP2 (fc16-builder - blocked) + * Start ClusterIP2 (fc16-builder - blocked) due to unrunnable ClusterIP start Executing cluster transition: * Resource action: ClusterIP monitor on fc16-builder * Resource action: ClusterIP2 monitor on fc16-builder Revised cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/bug-5014-CLONE-A-stop-B-started.summary b/pengine/test10/bug-5014-CLONE-A-stop-B-started.summary index 5e9bec062f..c3fb6d59b2 100644 --- a/pengine/test10/bug-5014-CLONE-A-stop-B-started.summary +++ b/pengine/test10/bug-5014-CLONE-A-stop-B-started.summary @@ -1,27 +1,27 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] Clone Set: clone1 [ClusterIP] Started: [ fc16-builder ] Clone Set: clone2 [ClusterIP2] Started: [ fc16-builder ] Transition Summary: - * Stop ClusterIP:0 (fc16-builder) + * Stop ClusterIP:0 (fc16-builder) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: ClusterIP:0 stop on fc16-builder * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] Clone Set: clone1 [ClusterIP] Stopped (disabled): [ fc16-builder ] Clone Set: clone2 [ClusterIP2] Started: [ fc16-builder ] diff --git a/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary b/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary index 991e618e1d..fe12fe63f2 100644 --- a/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary +++ b/pengine/test10/bug-5014-CthenAthenB-C-stopped.summary @@ -1,25 +1,25 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped ClusterIP3 (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Transition Summary: - * Start ClusterIP (fc16-builder - blocked) - * Start ClusterIP2 (fc16-builder - blocked) + * Start ClusterIP (fc16-builder - blocked) due to unrunnable ClusterIP3 start + * Start ClusterIP2 (fc16-builder - blocked) due to unrunnable ClusterIP start Executing cluster transition: * Resource action: ClusterIP monitor on fc16-builder * Resource action: ClusterIP2 monitor on fc16-builder * Resource action: ClusterIP3 monitor on fc16-builder Revised cluster status: Online: [ fc16-builder ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped ClusterIP3 (ocf::heartbeat:IPaddr2): Stopped ( disabled ) diff --git a/pengine/test10/bug-5014-GROUP-A-stopped-B-started.summary b/pengine/test10/bug-5014-GROUP-A-stopped-B-started.summary index 67df8d755d..5ea35cbb3b 100644 --- a/pengine/test10/bug-5014-GROUP-A-stopped-B-started.summary +++ b/pengine/test10/bug-5014-GROUP-A-stopped-B-started.summary @@ -1,27 +1,27 @@ 2 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] Resource Group: group1 ClusterIP (ocf::heartbeat:IPaddr2): Started fc16-builder ( disabled ) Resource Group: group2 ClusterIP2 (ocf::heartbeat:IPaddr2): Started fc16-builder Transition Summary: - * Stop ClusterIP (fc16-builder) + * Stop ClusterIP (fc16-builder) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: ClusterIP stop on fc16-builder * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ fc16-builder ] Resource Group: group1 ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Resource Group: group2 ClusterIP2 (ocf::heartbeat:IPaddr2): Started fc16-builder diff --git a/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary b/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary index e5251a0adb..f9b64485af 100644 --- a/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary +++ b/pengine/test10/bug-5014-GROUP-A-stopped-B-stopped.summary @@ -1,23 +1,23 @@ 2 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] Resource Group: group1 ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Resource Group: group2 ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped Transition Summary: - * Start ClusterIP2 (fc16-builder - blocked) + * Start ClusterIP2 (fc16-builder - blocked) due to unrunnable group1 running Executing cluster transition: Revised cluster status: Online: [ fc16-builder ] Resource Group: group1 ClusterIP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) Resource Group: group2 ClusterIP2 (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/bug-5014-ordered-set-symmetrical-false.summary b/pengine/test10/bug-5014-ordered-set-symmetrical-false.summary index 4322db36c2..89b3416102 100644 --- a/pengine/test10/bug-5014-ordered-set-symmetrical-false.summary +++ b/pengine/test10/bug-5014-ordered-set-symmetrical-false.summary @@ -1,25 +1,25 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop C (fc16-builder) + * Stop C (fc16-builder) due to node availability Executing cluster transition: * Resource action: C stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary b/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary index af74ba6527..7d94d606ff 100644 --- a/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary +++ b/pengine/test10/bug-5014-ordered-set-symmetrical-true.summary @@ -1,27 +1,27 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop A (Started fc16-builder) - * Stop C (fc16-builder) + * Stop A (Started fc16-builder) due to required C start + * Stop C (fc16-builder) due to node availability Executing cluster transition: * Resource action: A stop on fc16-builder * Resource action: C stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/bug-5028-bottom.summary b/pengine/test10/bug-5028-bottom.summary index b43ba4efde..de24e2922c 100644 --- a/pengine/test10/bug-5028-bottom.summary +++ b/pengine/test10/bug-5028-bottom.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ bl460g6a bl460g6b ] Resource Group: dummy-g dummy01 (ocf::heartbeat:Dummy): FAILED bl460g6a ( blocked ) dummy02 (ocf::heartbeat:Dummy-stop-NG): Started bl460g6a Transition Summary: * Shutdown bl460g6a - * Stop dummy02 (bl460g6a) + * Stop dummy02 (bl460g6a) due to node availability Executing cluster transition: * Pseudo action: dummy-g_stop_0 * Resource action: dummy02 stop on bl460g6a * Pseudo action: all_stopped Revised cluster status: Online: [ bl460g6a bl460g6b ] Resource Group: dummy-g dummy01 (ocf::heartbeat:Dummy): FAILED bl460g6a ( blocked ) dummy02 (ocf::heartbeat:Dummy-stop-NG): Stopped diff --git a/pengine/test10/bug-5140-require-all-false.summary b/pengine/test10/bug-5140-require-all-false.summary index e06e969c3a..cf5193c685 100644 --- a/pengine/test10/bug-5140-require-all-false.summary +++ b/pengine/test10/bug-5140-require-all-false.summary @@ -1,81 +1,81 @@ 4 of 35 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node hex-1: standby Node hex-2: standby Node hex-3: OFFLINE (standby) fencing (stonith:external/sbd): Stopped Clone Set: baseclone [basegrp] Resource Group: basegrp:0 dlm (ocf::pacemaker:controld): Started hex-2 clvmd (ocf::lvm2:clvmd): Started hex-2 o2cb (ocf::ocfs2:o2cb): Started hex-2 vg1 (ocf::heartbeat:LVM): Stopped fs-ocfs-1 (ocf::heartbeat:Filesystem): Stopped Stopped: [ hex-1 hex-3 ] fs-xfs-1 (ocf::heartbeat:Filesystem): Stopped Clone Set: fs2 [fs-ocfs-2] Stopped: [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r0 [drbd-r0] Stopped (disabled): [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r1 [drbd-r1] Stopped (disabled): [ hex-1 hex-2 hex-3 ] Resource Group: md0-group md0 (ocf::heartbeat:Raid1): Stopped vg-md0 (ocf::heartbeat:LVM): Stopped fs-md0 (ocf::heartbeat:Filesystem): Stopped dummy1 (ocf::heartbeat:Delay): Stopped dummy3 (ocf::heartbeat:Delay): Stopped dummy4 (ocf::heartbeat:Delay): Stopped dummy5 (ocf::heartbeat:Delay): Stopped dummy6 (ocf::heartbeat:Delay): Stopped Resource Group: r0-group fs-r0 (ocf::heartbeat:Filesystem): Stopped dummy2 (ocf::heartbeat:Delay): Stopped cluster-md0 (ocf::heartbeat:Raid1): Stopped Transition Summary: - * Stop dlm:0 (hex-2) - * Stop clvmd:0 (hex-2) - * Stop o2cb:0 (hex-2) + * Stop dlm:0 (hex-2) due to node availability + * Stop clvmd:0 (hex-2) due to node availability + * Stop o2cb:0 (hex-2) due to node availability Executing cluster transition: * Pseudo action: baseclone_stop_0 * Pseudo action: basegrp:0_stop_0 * Resource action: o2cb stop on hex-2 * Resource action: clvmd stop on hex-2 * Resource action: dlm stop on hex-2 * Pseudo action: all_stopped * Pseudo action: basegrp:0_stopped_0 * Pseudo action: baseclone_stopped_0 Revised cluster status: Node hex-1: standby Node hex-2: standby Node hex-3: OFFLINE (standby) fencing (stonith:external/sbd): Stopped Clone Set: baseclone [basegrp] Stopped: [ hex-1 hex-2 hex-3 ] fs-xfs-1 (ocf::heartbeat:Filesystem): Stopped Clone Set: fs2 [fs-ocfs-2] Stopped: [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r0 [drbd-r0] Stopped (disabled): [ hex-1 hex-2 hex-3 ] Master/Slave Set: ms-r1 [drbd-r1] Stopped (disabled): [ hex-1 hex-2 hex-3 ] Resource Group: md0-group md0 (ocf::heartbeat:Raid1): Stopped vg-md0 (ocf::heartbeat:LVM): Stopped fs-md0 (ocf::heartbeat:Filesystem): Stopped dummy1 (ocf::heartbeat:Delay): Stopped dummy3 (ocf::heartbeat:Delay): Stopped dummy4 (ocf::heartbeat:Delay): Stopped dummy5 (ocf::heartbeat:Delay): Stopped dummy6 (ocf::heartbeat:Delay): Stopped Resource Group: r0-group fs-r0 (ocf::heartbeat:Filesystem): Stopped dummy2 (ocf::heartbeat:Delay): Stopped cluster-md0 (ocf::heartbeat:Raid1): Stopped diff --git a/pengine/test10/bug-5186-partial-migrate.summary b/pengine/test10/bug-5186-partial-migrate.summary index c2685d8621..cd01706253 100644 --- a/pengine/test10/bug-5186-partial-migrate.summary +++ b/pengine/test10/bug-5186-partial-migrate.summary @@ -1,91 +1,91 @@ Current cluster status: Node bl460g1n7 (3232261593): UNCLEAN (offline) Online: [ bl460g1n6 bl460g1n8 ] prmDummy (ocf::pacemaker:Dummy): Started bl460g1n7 (UNCLEAN) prmVM2 (ocf::heartbeat:VirtualDomain): Migrating bl460g1n7 (UNCLEAN) Resource Group: grpStonith6 prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 Resource Group: grpStonith7 prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 Resource Group: grpStonith8 prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n7 (UNCLEAN) prmStonith8-2 (stonith:external/ipmi): Started bl460g1n7 (UNCLEAN) Clone Set: clnDiskd1 [prmDiskd1] prmDiskd1 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Clone Set: clnDiskd2 [prmDiskd2] prmDiskd2 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Clone Set: clnPing [prmPing] prmPing (ocf::pacemaker:ping): Started bl460g1n7 (UNCLEAN) Started: [ bl460g1n6 bl460g1n8 ] Transition Summary: * Fence (reboot) bl460g1n7 'prmDummy is thought to be active there' * Move prmDummy (Started bl460g1n7 -> bl460g1n6) * Move prmVM2 (Started bl460g1n7 -> bl460g1n8) * Move prmStonith8-1 (Started bl460g1n7 -> bl460g1n6) * Move prmStonith8-2 (Started bl460g1n7 -> bl460g1n6) - * Stop prmDiskd1:0 (bl460g1n7) - * Stop prmDiskd2:0 (bl460g1n7) - * Stop prmPing:0 (bl460g1n7) + * Stop prmDiskd1:0 (bl460g1n7) due to node availability + * Stop prmDiskd2:0 (bl460g1n7) due to node availability + * Stop prmPing:0 (bl460g1n7) due to node availability Executing cluster transition: * Resource action: prmVM2 stop on bl460g1n6 * Pseudo action: grpStonith8_stop_0 * Pseudo action: prmStonith8-2_stop_0 * Fencing bl460g1n7 (reboot) * Pseudo action: prmDummy_stop_0 * Pseudo action: prmVM2_stop_0 * Pseudo action: prmStonith8-1_stop_0 * Pseudo action: clnDiskd1_stop_0 * Pseudo action: clnDiskd2_stop_0 * Pseudo action: clnPing_stop_0 * Pseudo action: stonith_complete * Resource action: prmDummy start on bl460g1n6 * Resource action: prmVM2 start on bl460g1n8 * Pseudo action: grpStonith8_stopped_0 * Pseudo action: grpStonith8_start_0 * Resource action: prmStonith8-1 start on bl460g1n6 * Resource action: prmStonith8-2 start on bl460g1n6 * Pseudo action: prmDiskd1_stop_0 * Pseudo action: clnDiskd1_stopped_0 * Pseudo action: prmDiskd2_stop_0 * Pseudo action: clnDiskd2_stopped_0 * Pseudo action: prmPing_stop_0 * Pseudo action: clnPing_stopped_0 * Pseudo action: all_stopped * Resource action: prmVM2 monitor=10000 on bl460g1n8 * Pseudo action: grpStonith8_running_0 * Resource action: prmStonith8-1 monitor=10000 on bl460g1n6 * Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6 Revised cluster status: Online: [ bl460g1n6 bl460g1n8 ] OFFLINE: [ bl460g1n7 ] prmDummy (ocf::pacemaker:Dummy): Started bl460g1n6 prmVM2 (ocf::heartbeat:VirtualDomain): Started bl460g1n8 Resource Group: grpStonith6 prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 Resource Group: grpStonith7 prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 Resource Group: grpStonith8 prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n6 prmStonith8-2 (stonith:external/ipmi): Started bl460g1n6 Clone Set: clnDiskd1 [prmDiskd1] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] Clone Set: clnDiskd2 [prmDiskd2] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] Clone Set: clnPing [prmPing] Started: [ bl460g1n6 bl460g1n8 ] Stopped: [ bl460g1n7 ] diff --git a/pengine/test10/bug-cl-5170.summary b/pengine/test10/bug-cl-5170.summary index c932a86481..70b589e989 100644 --- a/pengine/test10/bug-cl-5170.summary +++ b/pengine/test10/bug-cl-5170.summary @@ -1,34 +1,34 @@ Current cluster status: Node TCS-1: OFFLINE (standby) Online: [ TCS-2 ] Resource Group: svc ip_trf (ocf::heartbeat:IPaddr2): Started TCS-2 ip_mgmt (ocf::heartbeat:IPaddr2): Started TCS-2 Clone Set: cl_tomcat_nms [d_tomcat_nms] d_tomcat_nms (ocf::ntc:tomcat): FAILED TCS-2 ( blocked ) Stopped: [ TCS-1 ] Transition Summary: - * Stop ip_trf (TCS-2) - * Stop ip_mgmt (TCS-2) + * Stop ip_trf (TCS-2) due to node availability + * Stop ip_mgmt (TCS-2) due to node availability Executing cluster transition: * Pseudo action: svc_stop_0 * Resource action: ip_mgmt stop on TCS-2 * Resource action: ip_trf stop on TCS-2 * Pseudo action: all_stopped * Pseudo action: svc_stopped_0 Revised cluster status: Node TCS-1: OFFLINE (standby) Online: [ TCS-2 ] Resource Group: svc ip_trf (ocf::heartbeat:IPaddr2): Stopped ip_mgmt (ocf::heartbeat:IPaddr2): Stopped Clone Set: cl_tomcat_nms [d_tomcat_nms] d_tomcat_nms (ocf::ntc:tomcat): FAILED TCS-2 ( blocked ) Stopped: [ TCS-1 ] diff --git a/pengine/test10/bug-cl-5212.summary b/pengine/test10/bug-cl-5212.summary index b5d5146add..92d3af3254 100644 --- a/pengine/test10/bug-cl-5212.summary +++ b/pengine/test10/bug-cl-5212.summary @@ -1,67 +1,67 @@ Current cluster status: Node srv01 (3232238280): UNCLEAN (offline) Node srv02 (3232238290): UNCLEAN (offline) Online: [ srv03 ] Resource Group: grpStonith1 prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith2 prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Resource Group: grpStonith3 prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Master/Slave Set: msPostgresql [pgsql] pgsql (ocf::pacemaker:Stateful): Slave srv02 ( UNCLEAN ) pgsql (ocf::pacemaker:Stateful): Master srv01 (UNCLEAN) Slaves: [ srv03 ] Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): Started srv02 (UNCLEAN) prmPingd (ocf::pacemaker:ping): Started srv01 (UNCLEAN) Started: [ srv03 ] Transition Summary: * Stop prmStonith1-1 (Started srv02 - blocked) * Stop prmStonith2-1 (Started srv01 - blocked) - * Stop prmStonith3-1 (srv01 - blocked) - * Stop pgsql:0 (srv02 - blocked) + * Stop prmStonith3-1 (srv01 - blocked) due to node availability + * Stop pgsql:0 (srv02 - blocked) due to node availability * Demote pgsql:1 (Master -> Stopped srv01 - blocked) - * Stop prmPingd:0 (srv02 - blocked) - * Stop prmPingd:1 (srv01 - blocked) + * Stop prmPingd:0 (srv02 - blocked) due to node availability + * Stop prmPingd:1 (srv01 - blocked) due to node availability Executing cluster transition: * Pseudo action: grpStonith1_stop_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stop_0 * Pseudo action: grpStonith2_start_0 * Pseudo action: grpStonith3_stop_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: clnPingd_stop_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: clnPingd_stopped_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: msPostgresql_post_notify_stopped_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 Revised cluster status: Node srv01 (3232238280): UNCLEAN (offline) Node srv02 (3232238290): UNCLEAN (offline) Online: [ srv03 ] Resource Group: grpStonith1 prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith2 prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Resource Group: grpStonith3 prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) Master/Slave Set: msPostgresql [pgsql] pgsql (ocf::pacemaker:Stateful): Slave srv02 ( UNCLEAN ) pgsql (ocf::pacemaker:Stateful): Master srv01 (UNCLEAN) Slaves: [ srv03 ] Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): Started srv02 (UNCLEAN) prmPingd (ocf::pacemaker:ping): Started srv01 (UNCLEAN) Started: [ srv03 ] diff --git a/pengine/test10/bug-cl-5247.summary b/pengine/test10/bug-cl-5247.summary index fc97e8d991..0f328cc7fe 100644 --- a/pengine/test10/bug-cl-5247.summary +++ b/pengine/test10/bug-cl-5247.summary @@ -1,101 +1,101 @@ Using the original execution date of: 2015-08-12 02:53:40Z Current cluster status: Online: [ bl460g8n3 bl460g8n4 ] Containers: [ pgsr01:prmDB1 ] prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 prmDB2 (ocf::heartbeat:VirtualDomain): FAILED bl460g8n4 Resource Group: grpStonith1 prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 Resource Group: grpStonith2 prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 Resource Group: master-group vip-master (ocf::heartbeat:Dummy): FAILED pgsr02 vip-rep (ocf::heartbeat:Dummy): FAILED pgsr02 Master/Slave Set: msPostgresql [pgsql] Masters: [ pgsr01 ] Stopped: [ bl460g8n3 bl460g8n4 ] Transition Summary: * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean' - * Stop prmDB2 (bl460g8n4) + * Stop prmDB2 (bl460g8n4) due to node availability * Restart prmStonith1-2 (Started bl460g8n4) * Restart prmStonith2-2 (Started bl460g8n3) * Recover vip-master (Started pgsr02 -> pgsr01) * Recover vip-rep (Started pgsr02 -> pgsr01) * Demote pgsql:0 (Master -> Stopped pgsr02) * Stop pgsr02 (bl460g8n4) Executing cluster transition: * Pseudo action: grpStonith1_stop_0 * Resource action: prmStonith1-2 stop on bl460g8n4 * Pseudo action: grpStonith2_stop_0 * Resource action: prmStonith2-2 stop on bl460g8n3 * Pseudo action: msPostgresql_pre_notify_demote_0 * Resource action: pgsr01 monitor on bl460g8n4 * Resource action: pgsr02 monitor on bl460g8n3 * Pseudo action: grpStonith1_stopped_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stopped_0 * Pseudo action: grpStonith2_start_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 * Pseudo action: msPostgresql_demote_0 * Resource action: pgsr02 stop on bl460g8n4 * Resource action: prmDB2 stop on bl460g8n4 * Pseudo action: stonith-pgsr02-off on pgsr02 * Pseudo action: stonith_complete * Pseudo action: pgsql_post_notify_stop_0 * Pseudo action: pgsql_demote_0 * Pseudo action: msPostgresql_demoted_0 * Pseudo action: msPostgresql_post_notify_demoted_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: master-group_stop_0 * Pseudo action: vip-rep_stop_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: vip-master_stop_0 * Pseudo action: pgsql_stop_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: master-group_stopped_0 * Pseudo action: master-group_start_0 * Resource action: vip-master start on pgsr01 * Resource action: vip-rep start on pgsr01 * Pseudo action: msPostgresql_post_notify_stopped_0 * Pseudo action: master-group_running_0 * Resource action: vip-master monitor=10000 on pgsr01 * Resource action: vip-rep monitor=10000 on pgsr01 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 * Pseudo action: pgsql_notified_0 * Resource action: pgsql monitor=9000 on pgsr01 * Pseudo action: all_stopped * Resource action: prmStonith1-2 start on bl460g8n4 * Resource action: prmStonith1-2 monitor=3600000 on bl460g8n4 * Resource action: prmStonith2-2 start on bl460g8n3 * Resource action: prmStonith2-2 monitor=3600000 on bl460g8n3 * Pseudo action: grpStonith1_running_0 * Pseudo action: grpStonith2_running_0 Using the original execution date of: 2015-08-12 02:53:40Z Revised cluster status: Online: [ bl460g8n3 bl460g8n4 ] Containers: [ pgsr01:prmDB1 ] prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 prmDB2 (ocf::heartbeat:VirtualDomain): FAILED Resource Group: grpStonith1 prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 Resource Group: grpStonith2 prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 Resource Group: master-group vip-master (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ] vip-rep (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ] Master/Slave Set: msPostgresql [pgsql] Masters: [ pgsr01 ] Stopped: [ bl460g8n3 bl460g8n4 ] diff --git a/pengine/test10/bug-lf-2153.summary b/pengine/test10/bug-lf-2153.summary index 99954755f4..e95713ef7e 100644 --- a/pengine/test10/bug-lf-2153.summary +++ b/pengine/test10/bug-lf-2153.summary @@ -1,58 +1,58 @@ Current cluster status: Node bob (9a4cafd3-fcfc-4de9-9440-10bc8822d9af): standby Online: [ alice ] Master/Slave Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] Masters: [ alice ] Slaves: [ bob ] Clone Set: cl_tgtd [res_tgtd] Started: [ alice bob ] Resource Group: rg_iscsivg01 res_portblock_iscsivg01_block (ocf::heartbeat:portblock): Started alice res_lvm_iscsivg01 (ocf::heartbeat:LVM): Started alice res_target_iscsivg01 (ocf::heartbeat:iSCSITarget): Started alice res_lu_iscsivg01_lun1 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_lu_iscsivg01_lun2 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_ip_alicebob01 (ocf::heartbeat:IPaddr2): Started alice res_portblock_iscsivg01_unblock (ocf::heartbeat:portblock): Started alice Transition Summary: - * Stop res_drbd_iscsivg01:0 (bob) - * Stop res_tgtd:0 (bob) + * Stop res_drbd_iscsivg01:0 (bob) due to node availability + * Stop res_tgtd:0 (bob) due to node availability Executing cluster transition: * Pseudo action: ms_drbd_iscsivg01_pre_notify_stop_0 * Pseudo action: cl_tgtd_stop_0 * Resource action: res_drbd_iscsivg01:0 notify on bob * Resource action: res_drbd_iscsivg01:1 notify on alice * Pseudo action: ms_drbd_iscsivg01_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_iscsivg01_stop_0 * Resource action: res_tgtd:0 stop on bob * Pseudo action: cl_tgtd_stopped_0 * Resource action: res_drbd_iscsivg01:0 stop on bob * Pseudo action: ms_drbd_iscsivg01_stopped_0 * Pseudo action: ms_drbd_iscsivg01_post_notify_stopped_0 * Resource action: res_drbd_iscsivg01:1 notify on alice * Pseudo action: ms_drbd_iscsivg01_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node bob (9a4cafd3-fcfc-4de9-9440-10bc8822d9af): standby Online: [ alice ] Master/Slave Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] Masters: [ alice ] Stopped: [ bob ] Clone Set: cl_tgtd [res_tgtd] Started: [ alice ] Stopped: [ bob ] Resource Group: rg_iscsivg01 res_portblock_iscsivg01_block (ocf::heartbeat:portblock): Started alice res_lvm_iscsivg01 (ocf::heartbeat:LVM): Started alice res_target_iscsivg01 (ocf::heartbeat:iSCSITarget): Started alice res_lu_iscsivg01_lun1 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_lu_iscsivg01_lun2 (ocf::heartbeat:iSCSILogicalUnit): Started alice res_ip_alicebob01 (ocf::heartbeat:IPaddr2): Started alice res_portblock_iscsivg01_unblock (ocf::heartbeat:portblock): Started alice diff --git a/pengine/test10/bug-lf-2171.summary b/pengine/test10/bug-lf-2171.summary index b1413c3ce1..8c1d8a4e3f 100644 --- a/pengine/test10/bug-lf-2171.summary +++ b/pengine/test10/bug-lf-2171.summary @@ -1,37 +1,37 @@ 3 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ xenserver1 xenserver2 ] Clone Set: cl_res_Dummy1 [res_Dummy1] Started: [ xenserver1 xenserver2 ] Resource Group: gr_Dummy res_Dummy2 (ocf::heartbeat:Dummy): Started xenserver1 res_Dummy3 (ocf::heartbeat:Dummy): Started xenserver1 Transition Summary: - * Stop res_Dummy1:0 (xenserver1) - * Stop res_Dummy1:1 (xenserver2) - * Stop res_Dummy2 (Started xenserver1) - * Stop res_Dummy3 (Started xenserver1) + * Stop res_Dummy1:0 (xenserver1) due to node availability + * Stop res_Dummy1:1 (xenserver2) due to node availability + * Stop res_Dummy2 (Started xenserver1) due to unrunnable cl_res_Dummy1 running + * Stop res_Dummy3 (Started xenserver1) due to unrunnable cl_res_Dummy1 running Executing cluster transition: * Pseudo action: gr_Dummy_stop_0 * Resource action: res_Dummy2 stop on xenserver1 * Resource action: res_Dummy3 stop on xenserver1 * Pseudo action: gr_Dummy_stopped_0 * Pseudo action: cl_res_Dummy1_stop_0 * Resource action: res_Dummy1:1 stop on xenserver1 * Resource action: res_Dummy1:0 stop on xenserver2 * Pseudo action: cl_res_Dummy1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ xenserver1 xenserver2 ] Clone Set: cl_res_Dummy1 [res_Dummy1] Stopped (disabled): [ xenserver1 xenserver2 ] Resource Group: gr_Dummy res_Dummy2 (ocf::heartbeat:Dummy): Stopped res_Dummy3 (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/bug-lf-2361.summary b/pengine/test10/bug-lf-2361.summary index b81456cf9a..3089e04873 100644 --- a/pengine/test10/bug-lf-2361.summary +++ b/pengine/test10/bug-lf-2361.summary @@ -1,42 +1,42 @@ Current cluster status: Online: [ alice.demo bob.demo ] dummy1 (ocf::heartbeat:Dummy): Stopped Master/Slave Set: ms_stateful [stateful] Stopped: [ alice.demo bob.demo ] Clone Set: cl_dummy2 [dummy2] Stopped: [ alice.demo bob.demo ] Transition Summary: * Start stateful:0 (alice.demo) * Start stateful:1 (bob.demo) - * Start dummy2:0 (alice.demo - blocked) - * Start dummy2:1 (bob.demo - blocked) + * Start dummy2:0 (alice.demo - blocked) due to unrunnable dummy1 start + * Start dummy2:1 (bob.demo - blocked) due to unrunnable dummy1 start Executing cluster transition: * Pseudo action: ms_stateful_pre_notify_start_0 * Resource action: service2:0 delete on alice.demo * Resource action: service2:0 delete on bob.demo * Resource action: service2:1 delete on bob.demo * Resource action: service1 delete on alice.demo * Resource action: service1 delete on bob.demo * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 * Pseudo action: ms_stateful_start_0 * Resource action: stateful:0 start on alice.demo * Resource action: stateful:1 start on bob.demo * Pseudo action: ms_stateful_running_0 * Pseudo action: ms_stateful_post_notify_running_0 * Resource action: stateful:0 notify on alice.demo * Resource action: stateful:1 notify on bob.demo * Pseudo action: ms_stateful_confirmed-post_notify_running_0 Revised cluster status: Online: [ alice.demo bob.demo ] dummy1 (ocf::heartbeat:Dummy): Stopped Master/Slave Set: ms_stateful [stateful] Slaves: [ alice.demo bob.demo ] Clone Set: cl_dummy2 [dummy2] Stopped: [ alice.demo bob.demo ] diff --git a/pengine/test10/bug-lf-2422.summary b/pengine/test10/bug-lf-2422.summary index 54341c924d..5e22e677af 100644 --- a/pengine/test10/bug-lf-2422.summary +++ b/pengine/test10/bug-lf-2422.summary @@ -1,81 +1,81 @@ 8 of 21 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] sbd_stonith (stonith:external/sbd): Started qa-suse-2 Clone Set: c-o2stage [o2stage] Started: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] Clone Set: c-ocfs [ocfs] Started: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] Transition Summary: - * Stop o2cb:0 (qa-suse-1) - * Stop cmirror:0 (qa-suse-1) - * Stop o2cb:1 (qa-suse-4) - * Stop cmirror:1 (qa-suse-4) - * Stop o2cb:2 (qa-suse-3) - * Stop cmirror:2 (qa-suse-3) - * Stop o2cb:3 (qa-suse-2) - * Stop cmirror:3 (qa-suse-2) - * Stop ocfs:0 (qa-suse-1) - * Stop ocfs:1 (qa-suse-4) - * Stop ocfs:2 (qa-suse-3) - * Stop ocfs:3 (qa-suse-2) + * Stop o2cb:0 (qa-suse-1) due to node availability + * Stop cmirror:0 (qa-suse-1) due to node availability + * Stop o2cb:1 (qa-suse-4) due to node availability + * Stop cmirror:1 (qa-suse-4) due to node availability + * Stop o2cb:2 (qa-suse-3) due to node availability + * Stop cmirror:2 (qa-suse-3) due to node availability + * Stop o2cb:3 (qa-suse-2) due to node availability + * Stop cmirror:3 (qa-suse-2) due to node availability + * Stop ocfs:0 (qa-suse-1) due to node availability + * Stop ocfs:1 (qa-suse-4) due to node availability + * Stop ocfs:2 (qa-suse-3) due to node availability + * Stop ocfs:3 (qa-suse-2) due to node availability Executing cluster transition: * Resource action: sbd_stonith monitor=15000 on qa-suse-2 * Pseudo action: c-ocfs_stop_0 * Resource action: ocfs:3 stop on qa-suse-2 * Resource action: ocfs:2 stop on qa-suse-3 * Resource action: ocfs:0 stop on qa-suse-4 * Resource action: ocfs:1 stop on qa-suse-1 * Pseudo action: c-ocfs_stopped_0 * Pseudo action: c-o2stage_stop_0 * Pseudo action: o2stage:0_stop_0 * Resource action: cmirror:1 stop on qa-suse-1 * Pseudo action: o2stage:1_stop_0 * Resource action: cmirror:0 stop on qa-suse-4 * Pseudo action: o2stage:2_stop_0 * Resource action: cmirror:2 stop on qa-suse-3 * Pseudo action: o2stage:3_stop_0 * Resource action: cmirror:3 stop on qa-suse-2 * Resource action: o2cb:1 stop on qa-suse-1 * Resource action: o2cb:0 stop on qa-suse-4 * Resource action: o2cb:2 stop on qa-suse-3 * Resource action: o2cb:3 stop on qa-suse-2 * Pseudo action: all_stopped * Pseudo action: o2stage:0_stopped_0 * Pseudo action: o2stage:1_stopped_0 * Pseudo action: o2stage:2_stopped_0 * Pseudo action: o2stage:3_stopped_0 * Pseudo action: c-o2stage_stopped_0 Revised cluster status: Online: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] sbd_stonith (stonith:external/sbd): Started qa-suse-2 Clone Set: c-o2stage [o2stage] Resource Group: o2stage:0 dlm (ocf::pacemaker:controld): Started qa-suse-1 clvm (ocf::lvm2:clvmd): Started qa-suse-1 o2cb (ocf::ocfs2:o2cb): Stopped ( disabled ) cmirror (ocf::lvm2:cmirrord): Stopped Resource Group: o2stage:1 dlm (ocf::pacemaker:controld): Started qa-suse-4 clvm (ocf::lvm2:clvmd): Started qa-suse-4 o2cb (ocf::ocfs2:o2cb): Stopped ( disabled ) cmirror (ocf::lvm2:cmirrord): Stopped Resource Group: o2stage:2 dlm (ocf::pacemaker:controld): Started qa-suse-3 clvm (ocf::lvm2:clvmd): Started qa-suse-3 o2cb (ocf::ocfs2:o2cb): Stopped ( disabled ) cmirror (ocf::lvm2:cmirrord): Stopped Resource Group: o2stage:3 dlm (ocf::pacemaker:controld): Started qa-suse-2 clvm (ocf::lvm2:clvmd): Started qa-suse-2 o2cb (ocf::ocfs2:o2cb): Stopped ( disabled ) cmirror (ocf::lvm2:cmirrord): Stopped Clone Set: c-ocfs [ocfs] Stopped: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] diff --git a/pengine/test10/bug-lf-2453.summary b/pengine/test10/bug-lf-2453.summary index 3ff1a6bbdc..398868b230 100644 --- a/pengine/test10/bug-lf-2453.summary +++ b/pengine/test10/bug-lf-2453.summary @@ -1,39 +1,39 @@ 2 of 5 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ domu1 domu2 ] PrimitiveResource1 (ocf::heartbeat:IPaddr2): Started domu1 Clone Set: CloneResource1 [apache] Started: [ domu1 domu2 ] Clone Set: CloneResource2 [DummyResource] Started: [ domu1 domu2 ] Transition Summary: - * Stop PrimitiveResource1 (Started domu1) - * Stop apache:0 (domu1) - * Stop apache:1 (domu2) - * Stop DummyResource:0 (Started domu1) - * Stop DummyResource:1 (Started domu2) + * Stop PrimitiveResource1 (Started domu1) due to required CloneResource2 running + * Stop apache:0 (domu1) due to node availability + * Stop apache:1 (domu2) due to node availability + * Stop DummyResource:0 (Started domu1) due to unrunnable CloneResource1 running + * Stop DummyResource:1 (Started domu2) due to unrunnable CloneResource1 running Executing cluster transition: * Resource action: PrimitiveResource1 stop on domu1 * Pseudo action: CloneResource2_stop_0 * Resource action: DummyResource:1 stop on domu1 * Resource action: DummyResource:0 stop on domu2 * Pseudo action: CloneResource2_stopped_0 * Pseudo action: CloneResource1_stop_0 * Resource action: apache:1 stop on domu1 * Resource action: apache:0 stop on domu2 * Pseudo action: CloneResource1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ domu1 domu2 ] PrimitiveResource1 (ocf::heartbeat:IPaddr2): Stopped Clone Set: CloneResource1 [apache] Stopped (disabled): [ domu1 domu2 ] Clone Set: CloneResource2 [DummyResource] Stopped: [ domu1 domu2 ] diff --git a/pengine/test10/bug-lf-2508.summary b/pengine/test10/bug-lf-2508.summary index ddaf1c31ff..5d0d90c48a 100644 --- a/pengine/test10/bug-lf-2508.summary +++ b/pengine/test10/bug-lf-2508.summary @@ -1,112 +1,112 @@ Current cluster status: Node srv02 (71085d5e-1c63-49e0-8c8c-400d610b4182): UNCLEAN (offline) Online: [ srv01 srv03 srv04 ] Resource Group: Group01 Dummy01 (ocf::heartbeat:Dummy): Stopped Resource Group: Group02 Dummy02 (ocf::heartbeat:Dummy): Started srv02 (UNCLEAN) Resource Group: Group03 Dummy03 (ocf::heartbeat:Dummy): Started srv03 Clone Set: clnStonith1 [grpStonith1] Resource Group: grpStonith1:1 prmStonith1-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith1-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Started: [ srv03 srv04 ] Stopped: [ srv01 ] Clone Set: clnStonith2 [grpStonith2] Started: [ srv01 srv03 srv04 ] Stopped: [ srv02 ] Clone Set: clnStonith3 [grpStonith3] Resource Group: grpStonith3:0 prmStonith3-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith3-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Resource Group: grpStonith3:1 prmStonith3-1 (stonith:external/stonith-helper): Started srv01 prmStonith3-3 (stonith:external/ssh): Stopped Started: [ srv04 ] Stopped: [ srv03 ] Clone Set: clnStonith4 [grpStonith4] Resource Group: grpStonith4:1 prmStonith4-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) prmStonith4-3 (stonith:external/ssh): Started srv02 (UNCLEAN) Started: [ srv01 srv03 ] Stopped: [ srv04 ] Transition Summary: * Fence (reboot) srv02 'peer is no longer part of the cluster' * Start Dummy01 (srv01) * Move Dummy02 (Started srv02 -> srv04) - * Stop prmStonith1-1:1 (srv02) - * Stop prmStonith1-3:1 (srv02) - * Stop prmStonith3-1:0 (srv02) - * Stop prmStonith3-3:0 (srv02) + * Stop prmStonith1-1:1 (srv02) due to node availability + * Stop prmStonith1-3:1 (srv02) due to node availability + * Stop prmStonith3-1:0 (srv02) due to node availability + * Stop prmStonith3-3:0 (srv02) due to node availability * Start prmStonith3-3:1 (srv01) - * Stop prmStonith4-1:1 (srv02) - * Stop prmStonith4-3:1 (srv02) + * Stop prmStonith4-1:1 (srv02) due to node availability + * Stop prmStonith4-3:1 (srv02) due to node availability Executing cluster transition: * Pseudo action: Group01_start_0 * Resource action: prmStonith3-1:1 monitor=3600000 on srv01 * Fencing srv02 (reboot) * Pseudo action: Group02_stop_0 * Pseudo action: Dummy02_stop_0 * Pseudo action: clnStonith1_stop_0 * Pseudo action: clnStonith3_stop_0 * Pseudo action: clnStonith4_stop_0 * Pseudo action: stonith_complete * Resource action: Dummy01 start on srv01 * Pseudo action: Group02_stopped_0 * Pseudo action: Group02_start_0 * Resource action: Dummy02 start on srv04 * Pseudo action: grpStonith1:1_stop_0 * Pseudo action: prmStonith1-3:1_stop_0 * Pseudo action: grpStonith3:0_stop_0 * Pseudo action: prmStonith3-3:1_stop_0 * Pseudo action: grpStonith4:1_stop_0 * Pseudo action: prmStonith4-3:1_stop_0 * Pseudo action: Group01_running_0 * Resource action: Dummy01 monitor=10000 on srv01 * Pseudo action: Group02_running_0 * Resource action: Dummy02 monitor=10000 on srv04 * Pseudo action: prmStonith1-1:1_stop_0 * Pseudo action: prmStonith3-1:1_stop_0 * Pseudo action: prmStonith4-1:1_stop_0 * Pseudo action: all_stopped * Pseudo action: grpStonith1:1_stopped_0 * Pseudo action: clnStonith1_stopped_0 * Pseudo action: grpStonith3:0_stopped_0 * Pseudo action: clnStonith3_stopped_0 * Pseudo action: clnStonith3_start_0 * Pseudo action: grpStonith4:1_stopped_0 * Pseudo action: clnStonith4_stopped_0 * Pseudo action: grpStonith3:1_start_0 * Resource action: prmStonith3-3:1 start on srv01 * Pseudo action: grpStonith3:1_running_0 * Resource action: prmStonith3-3:1 monitor=3600000 on srv01 * Pseudo action: clnStonith3_running_0 Revised cluster status: Online: [ srv01 srv03 srv04 ] OFFLINE: [ srv02 ] Resource Group: Group01 Dummy01 (ocf::heartbeat:Dummy): Started srv01 Resource Group: Group02 Dummy02 (ocf::heartbeat:Dummy): Started srv04 Resource Group: Group03 Dummy03 (ocf::heartbeat:Dummy): Started srv03 Clone Set: clnStonith1 [grpStonith1] Started: [ srv03 srv04 ] Stopped: [ srv01 srv02 ] Clone Set: clnStonith2 [grpStonith2] Started: [ srv01 srv03 srv04 ] Stopped: [ srv02 ] Clone Set: clnStonith3 [grpStonith3] Started: [ srv01 srv04 ] Stopped: [ srv02 srv03 ] Clone Set: clnStonith4 [grpStonith4] Started: [ srv01 srv03 ] Stopped: [ srv02 srv04 ] diff --git a/pengine/test10/bug-lf-2551.summary b/pengine/test10/bug-lf-2551.summary index 1b57ea7fd0..953727782e 100644 --- a/pengine/test10/bug-lf-2551.summary +++ b/pengine/test10/bug-lf-2551.summary @@ -1,226 +1,226 @@ Current cluster status: Node hex-9: UNCLEAN (offline) Online: [ hex-0 hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Started hex-0 Clone Set: base-clone [base-group] Resource Group: base-group:3 dlm (ocf::pacemaker:controld): Started hex-9 (UNCLEAN) o2cb (ocf::ocfs2:o2cb): Started hex-9 (UNCLEAN) clvm (ocf::lvm2:clvmd): Started hex-9 (UNCLEAN) cmirrord (ocf::lvm2:cmirrord): Started hex-9 (UNCLEAN) vg1 (ocf::heartbeat:LVM): Started hex-9 (UNCLEAN) ocfs2-1 (ocf::heartbeat:Filesystem): Started hex-9 (UNCLEAN) Started: [ hex-0 hex-7 hex-8 ] vm-01 (ocf::heartbeat:Xen): Started hex-7 vm-02 (ocf::heartbeat:Xen): Started hex-8 vm-03 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-04 (ocf::heartbeat:Xen): Started hex-7 vm-05 (ocf::heartbeat:Xen): Started hex-8 fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN) vm-06 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-07 (ocf::heartbeat:Xen): Started hex-7 vm-08 (ocf::heartbeat:Xen): Started hex-8 vm-09 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-10 (ocf::heartbeat:Xen): Started hex-0 vm-11 (ocf::heartbeat:Xen): Started hex-7 vm-12 (ocf::heartbeat:Xen): Started hex-8 vm-13 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-14 (ocf::heartbeat:Xen): Started hex-0 vm-15 (ocf::heartbeat:Xen): Started hex-7 vm-16 (ocf::heartbeat:Xen): Started hex-8 vm-17 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-18 (ocf::heartbeat:Xen): Started hex-0 vm-19 (ocf::heartbeat:Xen): Started hex-7 vm-20 (ocf::heartbeat:Xen): Started hex-8 vm-21 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-22 (ocf::heartbeat:Xen): Started hex-0 vm-23 (ocf::heartbeat:Xen): Started hex-7 vm-24 (ocf::heartbeat:Xen): Started hex-8 vm-25 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-26 (ocf::heartbeat:Xen): Started hex-0 vm-27 (ocf::heartbeat:Xen): Started hex-7 vm-28 (ocf::heartbeat:Xen): Started hex-8 vm-29 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-30 (ocf::heartbeat:Xen): Started hex-0 vm-31 (ocf::heartbeat:Xen): Started hex-7 vm-32 (ocf::heartbeat:Xen): Started hex-8 dummy1 (ocf::heartbeat:Dummy): Started hex-9 (UNCLEAN) vm-33 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-34 (ocf::heartbeat:Xen): Started hex-0 vm-35 (ocf::heartbeat:Xen): Started hex-7 vm-36 (ocf::heartbeat:Xen): Started hex-8 vm-37 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-38 (ocf::heartbeat:Xen): Started hex-0 vm-39 (ocf::heartbeat:Xen): Started hex-7 vm-40 (ocf::heartbeat:Xen): Started hex-8 vm-41 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-42 (ocf::heartbeat:Xen): Started hex-0 vm-43 (ocf::heartbeat:Xen): Started hex-7 vm-44 (ocf::heartbeat:Xen): Started hex-8 vm-45 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-46 (ocf::heartbeat:Xen): Started hex-0 vm-47 (ocf::heartbeat:Xen): Started hex-7 vm-48 (ocf::heartbeat:Xen): Started hex-8 vm-49 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-50 (ocf::heartbeat:Xen): Started hex-0 vm-51 (ocf::heartbeat:Xen): Started hex-7 vm-52 (ocf::heartbeat:Xen): Started hex-8 vm-53 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-54 (ocf::heartbeat:Xen): Started hex-0 vm-55 (ocf::heartbeat:Xen): Started hex-7 vm-56 (ocf::heartbeat:Xen): Started hex-8 vm-57 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-58 (ocf::heartbeat:Xen): Started hex-0 vm-59 (ocf::heartbeat:Xen): Started hex-7 vm-60 (ocf::heartbeat:Xen): Started hex-8 vm-61 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN) vm-62 (ocf::heartbeat:Xen): Stopped vm-63 (ocf::heartbeat:Xen): Stopped vm-64 (ocf::heartbeat:Xen): Stopped Transition Summary: * Fence (reboot) hex-9 'peer is no longer part of the cluster' * Move fencing-sbd (Started hex-9 -> hex-0) * Move dummy1 (Started hex-9 -> hex-0) - * Stop dlm:3 (hex-9) - * Stop o2cb:3 (hex-9) - * Stop clvm:3 (hex-9) - * Stop cmirrord:3 (hex-9) - * Stop vg1:3 (hex-9) - * Stop ocfs2-1:3 (hex-9) + * Stop dlm:3 (hex-9) due to node availability + * Stop o2cb:3 (hex-9) due to node availability + * Stop clvm:3 (hex-9) due to node availability + * Stop cmirrord:3 (hex-9) due to node availability + * Stop vg1:3 (hex-9) due to node availability + * Stop ocfs2-1:3 (hex-9) due to node availability * Stop vm-03 (hex-9) * Stop vm-06 (hex-9) * Stop vm-09 (hex-9) * Stop vm-13 (hex-9) * Stop vm-17 (hex-9) * Stop vm-21 (hex-9) * Stop vm-25 (hex-9) * Stop vm-29 (hex-9) * Stop vm-33 (hex-9) * Stop vm-37 (hex-9) * Stop vm-41 (hex-9) * Stop vm-45 (hex-9) * Stop vm-49 (hex-9) * Stop vm-53 (hex-9) * Stop vm-57 (hex-9) * Stop vm-61 (hex-9) Executing cluster transition: * Pseudo action: fencing-sbd_stop_0 * Resource action: dummy1 monitor=300000 on hex-8 * Resource action: dummy1 monitor=300000 on hex-7 * Pseudo action: load_stopped_hex-8 * Pseudo action: load_stopped_hex-7 * Pseudo action: load_stopped_hex-0 * Fencing hex-9 (reboot) * Resource action: fencing-sbd start on hex-0 * Pseudo action: dummy1_stop_0 * Pseudo action: vm-03_stop_0 * Pseudo action: vm-06_stop_0 * Pseudo action: vm-09_stop_0 * Pseudo action: vm-13_stop_0 * Pseudo action: vm-17_stop_0 * Pseudo action: vm-21_stop_0 * Pseudo action: vm-25_stop_0 * Pseudo action: vm-29_stop_0 * Pseudo action: vm-33_stop_0 * Pseudo action: vm-37_stop_0 * Pseudo action: vm-41_stop_0 * Pseudo action: vm-45_stop_0 * Pseudo action: vm-49_stop_0 * Pseudo action: vm-53_stop_0 * Pseudo action: vm-57_stop_0 * Pseudo action: vm-61_stop_0 * Pseudo action: stonith_complete * Pseudo action: load_stopped_hex-9 * Resource action: dummy1 start on hex-0 * Pseudo action: base-clone_stop_0 * Resource action: dummy1 monitor=30000 on hex-0 * Pseudo action: base-group:3_stop_0 * Pseudo action: ocfs2-1:3_stop_0 * Pseudo action: vg1:3_stop_0 * Pseudo action: cmirrord:3_stop_0 * Pseudo action: clvm:3_stop_0 * Pseudo action: o2cb:3_stop_0 * Pseudo action: dlm:3_stop_0 * Pseudo action: all_stopped * Pseudo action: base-group:3_stopped_0 * Pseudo action: base-clone_stopped_0 Revised cluster status: Online: [ hex-0 hex-7 hex-8 ] OFFLINE: [ hex-9 ] vm-00 (ocf::heartbeat:Xen): Started hex-0 Clone Set: base-clone [base-group] Started: [ hex-0 hex-7 hex-8 ] Stopped: [ hex-9 ] vm-01 (ocf::heartbeat:Xen): Started hex-7 vm-02 (ocf::heartbeat:Xen): Started hex-8 vm-03 (ocf::heartbeat:Xen): Stopped vm-04 (ocf::heartbeat:Xen): Started hex-7 vm-05 (ocf::heartbeat:Xen): Started hex-8 fencing-sbd (stonith:external/sbd): Started hex-0 vm-06 (ocf::heartbeat:Xen): Stopped vm-07 (ocf::heartbeat:Xen): Started hex-7 vm-08 (ocf::heartbeat:Xen): Started hex-8 vm-09 (ocf::heartbeat:Xen): Stopped vm-10 (ocf::heartbeat:Xen): Started hex-0 vm-11 (ocf::heartbeat:Xen): Started hex-7 vm-12 (ocf::heartbeat:Xen): Started hex-8 vm-13 (ocf::heartbeat:Xen): Stopped vm-14 (ocf::heartbeat:Xen): Started hex-0 vm-15 (ocf::heartbeat:Xen): Started hex-7 vm-16 (ocf::heartbeat:Xen): Started hex-8 vm-17 (ocf::heartbeat:Xen): Stopped vm-18 (ocf::heartbeat:Xen): Started hex-0 vm-19 (ocf::heartbeat:Xen): Started hex-7 vm-20 (ocf::heartbeat:Xen): Started hex-8 vm-21 (ocf::heartbeat:Xen): Stopped vm-22 (ocf::heartbeat:Xen): Started hex-0 vm-23 (ocf::heartbeat:Xen): Started hex-7 vm-24 (ocf::heartbeat:Xen): Started hex-8 vm-25 (ocf::heartbeat:Xen): Stopped vm-26 (ocf::heartbeat:Xen): Started hex-0 vm-27 (ocf::heartbeat:Xen): Started hex-7 vm-28 (ocf::heartbeat:Xen): Started hex-8 vm-29 (ocf::heartbeat:Xen): Stopped vm-30 (ocf::heartbeat:Xen): Started hex-0 vm-31 (ocf::heartbeat:Xen): Started hex-7 vm-32 (ocf::heartbeat:Xen): Started hex-8 dummy1 (ocf::heartbeat:Dummy): Started hex-0 vm-33 (ocf::heartbeat:Xen): Stopped vm-34 (ocf::heartbeat:Xen): Started hex-0 vm-35 (ocf::heartbeat:Xen): Started hex-7 vm-36 (ocf::heartbeat:Xen): Started hex-8 vm-37 (ocf::heartbeat:Xen): Stopped vm-38 (ocf::heartbeat:Xen): Started hex-0 vm-39 (ocf::heartbeat:Xen): Started hex-7 vm-40 (ocf::heartbeat:Xen): Started hex-8 vm-41 (ocf::heartbeat:Xen): Stopped vm-42 (ocf::heartbeat:Xen): Started hex-0 vm-43 (ocf::heartbeat:Xen): Started hex-7 vm-44 (ocf::heartbeat:Xen): Started hex-8 vm-45 (ocf::heartbeat:Xen): Stopped vm-46 (ocf::heartbeat:Xen): Started hex-0 vm-47 (ocf::heartbeat:Xen): Started hex-7 vm-48 (ocf::heartbeat:Xen): Started hex-8 vm-49 (ocf::heartbeat:Xen): Stopped vm-50 (ocf::heartbeat:Xen): Started hex-0 vm-51 (ocf::heartbeat:Xen): Started hex-7 vm-52 (ocf::heartbeat:Xen): Started hex-8 vm-53 (ocf::heartbeat:Xen): Stopped vm-54 (ocf::heartbeat:Xen): Started hex-0 vm-55 (ocf::heartbeat:Xen): Started hex-7 vm-56 (ocf::heartbeat:Xen): Started hex-8 vm-57 (ocf::heartbeat:Xen): Stopped vm-58 (ocf::heartbeat:Xen): Started hex-0 vm-59 (ocf::heartbeat:Xen): Started hex-7 vm-60 (ocf::heartbeat:Xen): Started hex-8 vm-61 (ocf::heartbeat:Xen): Stopped vm-62 (ocf::heartbeat:Xen): Stopped vm-63 (ocf::heartbeat:Xen): Stopped vm-64 (ocf::heartbeat:Xen): Stopped diff --git a/pengine/test10/bug-lf-2574.summary b/pengine/test10/bug-lf-2574.summary index 3024a7373f..800453cc73 100644 --- a/pengine/test10/bug-lf-2574.summary +++ b/pengine/test10/bug-lf-2574.summary @@ -1,37 +1,37 @@ Current cluster status: Online: [ srv01 srv02 srv03 ] main_rsc (ocf::pacemaker:Dummy): Started srv01 main_rsc2 (ocf::pacemaker:Dummy): Started srv02 Clone Set: clnDummy1 [prmDummy1] Started: [ srv02 srv03 ] Stopped: [ srv01 ] Clone Set: clnPingd [prmPingd] Started: [ srv01 srv02 srv03 ] Transition Summary: * Move main_rsc (Started srv01 -> srv03) - * Stop prmPingd:0 (srv01) + * Stop prmPingd:0 (srv01) due to node availability Executing cluster transition: * Resource action: main_rsc stop on srv01 * Pseudo action: clnPingd_stop_0 * Resource action: main_rsc start on srv03 * Resource action: prmPingd:0 stop on srv01 * Pseudo action: clnPingd_stopped_0 * Pseudo action: all_stopped * Resource action: main_rsc monitor=10000 on srv03 Revised cluster status: Online: [ srv01 srv02 srv03 ] main_rsc (ocf::pacemaker:Dummy): Started srv03 main_rsc2 (ocf::pacemaker:Dummy): Started srv02 Clone Set: clnDummy1 [prmDummy1] Started: [ srv02 srv03 ] Stopped: [ srv01 ] Clone Set: clnPingd [prmPingd] Started: [ srv02 srv03 ] Stopped: [ srv01 ] diff --git a/pengine/test10/bug-lf-2619.summary b/pengine/test10/bug-lf-2619.summary index 2816ac3ccf..9a2213d5df 100644 --- a/pengine/test10/bug-lf-2619.summary +++ b/pengine/test10/bug-lf-2619.summary @@ -1,99 +1,99 @@ Current cluster status: Online: [ act1 act2 act3 sby1 sby2 ] Resource Group: grpPostgreSQLDB1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-1 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-2 (ocf::pacemaker:Dummy): Started act1 prmFsPostgreSQLDB1-3 (ocf::pacemaker:Dummy): Started act1 prmIpPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 prmApPostgreSQLDB1 (ocf::pacemaker:Dummy): Started act1 Resource Group: grpPostgreSQLDB2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-1 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-3 (ocf::pacemaker:Dummy): Started act2 prmIpPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmApPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 Resource Group: grpPostgreSQLDB3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-1 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-2 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-3 (ocf::pacemaker:Dummy): Started act3 prmIpPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmApPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 Clone Set: clnPingd [prmPingd] prmPingd (ocf::pacemaker:ping): FAILED act1 Started: [ act2 act3 sby1 sby2 ] Transition Summary: * Move prmExPostgreSQLDB1 (Started act1 -> sby1) * Move prmFsPostgreSQLDB1-1 (Started act1 -> sby1) * Move prmFsPostgreSQLDB1-2 (Started act1 -> sby1) * Move prmFsPostgreSQLDB1-3 (Started act1 -> sby1) * Move prmIpPostgreSQLDB1 (Started act1 -> sby1) * Move prmApPostgreSQLDB1 (Started act1 -> sby1) - * Stop prmPingd:0 (act1) + * Stop prmPingd:0 (act1) due to node availability Executing cluster transition: * Pseudo action: grpPostgreSQLDB1_stop_0 * Resource action: prmApPostgreSQLDB1 stop on act1 * Pseudo action: load_stopped_sby2 * Pseudo action: load_stopped_sby1 * Pseudo action: load_stopped_act3 * Pseudo action: load_stopped_act2 * Resource action: prmIpPostgreSQLDB1 stop on act1 * Resource action: prmFsPostgreSQLDB1-3 stop on act1 * Resource action: prmFsPostgreSQLDB1-2 stop on act1 * Resource action: prmFsPostgreSQLDB1-1 stop on act1 * Resource action: prmExPostgreSQLDB1 stop on act1 * Pseudo action: load_stopped_act1 * Pseudo action: grpPostgreSQLDB1_stopped_0 * Pseudo action: grpPostgreSQLDB1_start_0 * Resource action: prmExPostgreSQLDB1 start on sby1 * Resource action: prmFsPostgreSQLDB1-1 start on sby1 * Resource action: prmFsPostgreSQLDB1-2 start on sby1 * Resource action: prmFsPostgreSQLDB1-3 start on sby1 * Resource action: prmIpPostgreSQLDB1 start on sby1 * Resource action: prmApPostgreSQLDB1 start on sby1 * Pseudo action: clnPingd_stop_0 * Pseudo action: grpPostgreSQLDB1_running_0 * Resource action: prmExPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-1 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-2 monitor=5000 on sby1 * Resource action: prmFsPostgreSQLDB1-3 monitor=5000 on sby1 * Resource action: prmIpPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmApPostgreSQLDB1 monitor=5000 on sby1 * Resource action: prmPingd:0 stop on act1 * Pseudo action: clnPingd_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ act1 act2 act3 sby1 sby2 ] Resource Group: grpPostgreSQLDB1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-1 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-2 (ocf::pacemaker:Dummy): Started sby1 prmFsPostgreSQLDB1-3 (ocf::pacemaker:Dummy): Started sby1 prmIpPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 prmApPostgreSQLDB1 (ocf::pacemaker:Dummy): Started sby1 Resource Group: grpPostgreSQLDB2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-1 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-2 (ocf::pacemaker:Dummy): Started act2 prmFsPostgreSQLDB2-3 (ocf::pacemaker:Dummy): Started act2 prmIpPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 prmApPostgreSQLDB2 (ocf::pacemaker:Dummy): Started act2 Resource Group: grpPostgreSQLDB3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-1 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-2 (ocf::pacemaker:Dummy): Started act3 prmFsPostgreSQLDB3-3 (ocf::pacemaker:Dummy): Started act3 prmIpPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 prmApPostgreSQLDB3 (ocf::pacemaker:Dummy): Started act3 Clone Set: clnPingd [prmPingd] Started: [ act2 act3 sby1 sby2 ] Stopped: [ act1 ] diff --git a/pengine/test10/bug-n-385265.summary b/pengine/test10/bug-n-385265.summary index 8f95787ba7..e5bc4bccd5 100644 --- a/pengine/test10/bug-n-385265.summary +++ b/pengine/test10/bug-n-385265.summary @@ -1,24 +1,24 @@ Current cluster status: Online: [ ih01 ih02 ] Resource Group: group_common resource_ip_common (ocf::heartbeat:IPaddr2): Started ih02 resource_idvscommon (ocf::dfs:idvs): FAILED ih02 Transition Summary: - * Stop resource_idvscommon (ih02) + * Stop resource_idvscommon (ih02) due to node availability Executing cluster transition: * Pseudo action: group_common_stop_0 * Resource action: resource_idvscommon stop on ih02 * Pseudo action: all_stopped * Pseudo action: group_common_stopped_0 Revised cluster status: Online: [ ih01 ih02 ] Resource Group: group_common resource_ip_common (ocf::heartbeat:IPaddr2): Started ih02 resource_idvscommon (ocf::dfs:idvs): Stopped diff --git a/pengine/test10/bug-rh-1097457.summary b/pengine/test10/bug-rh-1097457.summary index 29dd0187ea..ee21df8615 100644 --- a/pengine/test10/bug-rh-1097457.summary +++ b/pengine/test10/bug-rh-1097457.summary @@ -1,105 +1,105 @@ 2 of 26 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ lama2 lama3 ] Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 VM1 (ocf::heartbeat:VirtualDomain): Started lama2 FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1 FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1 VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3 VM3 (ocf::heartbeat:VirtualDomain): Started lama3 FSlun3 (ocf::heartbeat:Filesystem): FAILED lamaVM2 FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3 FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3 Resource Group: lamaVM1-G1 FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G2 FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G3 FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM2-G4 FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2 FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2 Clone Set: FAKE6-clone [FAKE6] Started: [ lamaVM1 lamaVM2 lamaVM3 ] Transition Summary: * Fence (reboot) lamaVM2 (resource: VM2) 'guest is unclean' * Recover VM2 (Started lama3) * Recover FSlun3 (Started lamaVM2 -> lama2) - * Restart FAKE4 (Started lamaVM2) - * Restart FAKE4-IP (Started lamaVM2) - * Restart FAKE6:2 (Started lamaVM2) - * Restart lamaVM2 (Started lama3) + * Restart FAKE4 (Started lamaVM2) due to required VM2 start + * Restart FAKE4-IP (Started lamaVM2) due to required VM2 start + * Restart FAKE6:2 (Started lamaVM2) due to required VM2 start + * Restart lamaVM2 (Started lama3) due to required VM2 start Executing cluster transition: * Resource action: lamaVM2 stop on lama3 * Resource action: VM2 stop on lama3 * Pseudo action: stonith-lamaVM2-reboot on lamaVM2 * Pseudo action: stonith_complete * Resource action: VM2 start on lama3 * Resource action: VM2 monitor=10000 on lama3 * Pseudo action: lamaVM2-G4_stop_0 * Pseudo action: FAKE4-IP_stop_0 * Pseudo action: FAKE6-clone_stop_0 * Resource action: lamaVM2 start on lama3 * Resource action: lamaVM2 monitor=30000 on lama3 * Resource action: FSlun3 monitor=10000 on lamaVM2 * Pseudo action: FAKE4_stop_0 * Pseudo action: FAKE6_stop_0 * Pseudo action: FAKE6-clone_stopped_0 * Pseudo action: FAKE6-clone_start_0 * Pseudo action: lamaVM2-G4_stopped_0 * Resource action: FAKE6 start on lamaVM2 * Resource action: FAKE6 monitor=30000 on lamaVM2 * Pseudo action: FAKE6-clone_running_0 * Pseudo action: FSlun3_stop_0 * Pseudo action: all_stopped * Resource action: FSlun3 start on lama2 * Pseudo action: lamaVM2-G4_start_0 * Resource action: FAKE4 start on lamaVM2 * Resource action: FAKE4 monitor=30000 on lamaVM2 * Resource action: FAKE4-IP start on lamaVM2 * Resource action: FAKE4-IP monitor=30000 on lamaVM2 * Resource action: FSlun3 monitor=10000 on lama2 * Pseudo action: lamaVM2-G4_running_0 Revised cluster status: Online: [ lama2 lama3 ] Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 VM1 (ocf::heartbeat:VirtualDomain): Started lama2 FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1 FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1 VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3 VM3 (ocf::heartbeat:VirtualDomain): Started lama3 FSlun3 (ocf::heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ] FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3 FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled ) FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3 Resource Group: lamaVM1-G1 FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G2 FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM1-G3 FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1 FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1 Resource Group: lamaVM2-G4 FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2 FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2 Clone Set: FAKE6-clone [FAKE6] Started: [ lamaVM1 lamaVM2 lamaVM3 ] diff --git a/pengine/test10/bug-suse-707150.summary b/pengine/test10/bug-suse-707150.summary index da1d5c4f87..d6922abf10 100644 --- a/pengine/test10/bug-suse-707150.summary +++ b/pengine/test10/bug-suse-707150.summary @@ -1,72 +1,72 @@ 9 of 28 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ hex-0 hex-9 ] OFFLINE: [ hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Stopped ( disabled ) Clone Set: base-clone [base-group] Resource Group: base-group:0 dlm (ocf::pacemaker:controld): Started hex-0 o2cb (ocf::ocfs2:o2cb): Stopped clvm (ocf::lvm2:clvmd): Stopped cmirrord (ocf::lvm2:cmirrord): Stopped vg1 (ocf::heartbeat:LVM): Stopped ( disabled ) ocfs2-1 (ocf::heartbeat:Filesystem): Stopped Stopped: [ hex-7 hex-8 hex-9 ] vm-01 (ocf::heartbeat:Xen): Stopped fencing-sbd (stonith:external/sbd): Started hex-9 dummy1 (ocf::heartbeat:Dummy): Started hex-0 Transition Summary: * Start o2cb:0 (hex-0) * Start clvm:0 (hex-0) * Start cmirrord:0 (hex-0) * Start dlm:1 (hex-9) * Start o2cb:1 (hex-9) * Start clvm:1 (hex-9) * Start cmirrord:1 (hex-9) - * Start vm-01 (hex-9 - blocked) + * Start vm-01 (hex-9 - blocked) due to unrunnable base-clone running Executing cluster transition: * Resource action: vg1:1 monitor on hex-9 * Pseudo action: base-clone_start_0 * Pseudo action: load_stopped_hex-9 * Pseudo action: load_stopped_hex-8 * Pseudo action: load_stopped_hex-7 * Pseudo action: load_stopped_hex-0 * Pseudo action: base-group:0_start_0 * Resource action: o2cb:0 start on hex-0 * Resource action: clvm:0 start on hex-0 * Resource action: cmirrord:0 start on hex-0 * Pseudo action: base-group:1_start_0 * Resource action: dlm:1 start on hex-9 * Resource action: o2cb:1 start on hex-9 * Resource action: clvm:1 start on hex-9 * Resource action: cmirrord:1 start on hex-9 Revised cluster status: Online: [ hex-0 hex-9 ] OFFLINE: [ hex-7 hex-8 ] vm-00 (ocf::heartbeat:Xen): Stopped ( disabled ) Clone Set: base-clone [base-group] Resource Group: base-group:0 dlm (ocf::pacemaker:controld): Started hex-0 o2cb (ocf::ocfs2:o2cb): Started hex-0 clvm (ocf::lvm2:clvmd): Started hex-0 cmirrord (ocf::lvm2:cmirrord): Started hex-0 vg1 (ocf::heartbeat:LVM): Stopped ( disabled ) ocfs2-1 (ocf::heartbeat:Filesystem): Stopped Resource Group: base-group:1 dlm (ocf::pacemaker:controld): Started hex-9 o2cb (ocf::ocfs2:o2cb): Started hex-9 clvm (ocf::lvm2:clvmd): Started hex-9 cmirrord (ocf::lvm2:cmirrord): Started hex-9 vg1 (ocf::heartbeat:LVM): Stopped ( disabled ) ocfs2-1 (ocf::heartbeat:Filesystem): Stopped Stopped: [ hex-7 hex-8 ] vm-01 (ocf::heartbeat:Xen): Stopped fencing-sbd (stonith:external/sbd): Started hex-9 dummy1 (ocf::heartbeat:Dummy): Started hex-0 diff --git a/pengine/test10/bundle-order-partial-start-2.summary b/pengine/test10/bundle-order-partial-start-2.summary index 5e3927cb6d..d67f8fc1fa 100644 --- a/pengine/test10/bundle-order-partial-start-2.summary +++ b/pengine/test10/bundle-order-partial-start-2.summary @@ -1,84 +1,84 @@ Current cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Slave undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud Transition Summary: * Start rabbitmq:0 (rabbitmq-bundle-0) - * Restart galera-bundle-docker-0 (Started undercloud) - * Restart galera-bundle-0 (Started undercloud) + * Restart galera-bundle-docker-0 (Started undercloud) due to required haproxy-bundle running + * Restart galera-bundle-0 (Started undercloud) due to required galera-bundle-docker-0 start * Start galera:0 (galera-bundle-0) * Promote redis:0 (Slave -> Master redis-bundle-0) * Start haproxy-bundle-docker-0 (undercloud) Executing cluster transition: * Resource action: galera-bundle-0 stop on undercloud * Resource action: haproxy-bundle-docker-0 monitor on undercloud * Pseudo action: haproxy-bundle_start_0 * Pseudo action: redis-bundle_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Pseudo action: rabbitmq-bundle_start_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Resource action: redis promote on redis-bundle-0 * Resource action: haproxy-bundle-docker-0 start on undercloud * Pseudo action: haproxy-bundle_running_0 * Pseudo action: redis-bundle-master_promoted_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Pseudo action: all_stopped * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 * Resource action: redis monitor=20000 on redis-bundle-0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud * Pseudo action: redis-bundle_promoted_0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Pseudo action: rabbitmq-bundle_running_0 * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 * Pseudo action: galera-bundle_start_0 * Resource action: galera-bundle-docker-0 start on undercloud * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud * Resource action: galera-bundle-0 start on undercloud * Resource action: galera-bundle-0 monitor=60000 on undercloud * Pseudo action: galera-bundle-master_start_0 * Resource action: galera:0 start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 Revised cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Slave undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started undercloud Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud diff --git a/pengine/test10/bundle-order-partial-stop.summary b/pengine/test10/bundle-order-partial-stop.summary index b30a237b31..e7bac73658 100644 --- a/pengine/test10/bundle-order-partial-stop.summary +++ b/pengine/test10/bundle-order-partial-stop.summary @@ -1,113 +1,113 @@ Current cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Master undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started undercloud Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud Transition Summary: * Shutdown undercloud - * Stop rabbitmq-bundle-docker-0 (undercloud) - * Stop rabbitmq-bundle-0 (undercloud) - * Stop rabbitmq:0 (Started rabbitmq-bundle-0) - * Stop galera-bundle-docker-0 (undercloud) - * Stop galera-bundle-0 (undercloud) + * Stop rabbitmq-bundle-docker-0 (undercloud) due to node availability + * Stop rabbitmq-bundle-0 (undercloud) due to node availability + * Stop rabbitmq:0 (Started rabbitmq-bundle-0) due to unrunnable rabbitmq-bundle-0 start + * Stop galera-bundle-docker-0 (undercloud) due to node availability + * Stop galera-bundle-0 (undercloud) due to node availability * Demote galera:0 (Master -> Slave galera-bundle-0) - * Restart galera:0 (Slave galera-bundle-0) - * Stop redis-bundle-docker-0 (undercloud) - * Stop redis-bundle-0 (undercloud) + * Restart galera:0 (Slave galera-bundle-0) due to unrunnable galera-bundle-0 start + * Stop redis-bundle-docker-0 (undercloud) due to node availability + * Stop redis-bundle-0 (undercloud) due to node availability * Demote redis:0 (Master -> Slave redis-bundle-0) - * Restart redis:0 (Slave redis-bundle-0) - * Stop ip-192.168.122.254 (undercloud) - * Stop ip-192.168.122.250 (undercloud) - * Stop ip-192.168.122.249 (undercloud) - * Stop ip-192.168.122.253 (undercloud) - * Stop ip-192.168.122.247 (undercloud) - * Stop ip-192.168.122.248 (undercloud) - * Stop haproxy-bundle-docker-0 (undercloud) - * Stop openstack-cinder-volume-docker-0 (undercloud) + * Restart redis:0 (Slave redis-bundle-0) due to unrunnable redis-bundle-0 start + * Stop ip-192.168.122.254 (undercloud) due to node availability + * Stop ip-192.168.122.250 (undercloud) due to node availability + * Stop ip-192.168.122.249 (undercloud) due to node availability + * Stop ip-192.168.122.253 (undercloud) due to node availability + * Stop ip-192.168.122.247 (undercloud) due to node availability + * Stop ip-192.168.122.248 (undercloud) due to node availability + * Stop haproxy-bundle-docker-0 (undercloud) due to node availability + * Stop openstack-cinder-volume-docker-0 (undercloud) due to node availability Executing cluster transition: * Resource action: galera cancel=10000 on galera-bundle-0 * Resource action: redis cancel=20000 on redis-bundle-0 * Pseudo action: openstack-cinder-volume_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: galera-bundle-master_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: galera demote on galera-bundle-0 * Resource action: redis demote on redis-bundle-0 * Resource action: openstack-cinder-volume-docker-0 stop on undercloud * Pseudo action: openstack-cinder-volume_stopped_0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Resource action: rabbitmq-bundle-0 stop on undercloud * Pseudo action: redis-bundle_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera stop on galera-bundle-0 * Resource action: galera-bundle-0 stop on undercloud * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle_stopped_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Resource action: redis stop on redis-bundle-0 * Resource action: redis-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_stopped_0 * Resource action: redis-bundle-docker-0 stop on undercloud * Pseudo action: redis-bundle_stopped_0 * Pseudo action: haproxy-bundle_stop_0 * Resource action: haproxy-bundle-docker-0 stop on undercloud * Pseudo action: haproxy-bundle_stopped_0 * Resource action: ip-192.168.122.254 stop on undercloud * Resource action: ip-192.168.122.250 stop on undercloud * Resource action: ip-192.168.122.249 stop on undercloud * Resource action: ip-192.168.122.253 stop on undercloud * Resource action: ip-192.168.122.247 stop on undercloud * Resource action: ip-192.168.122.248 stop on undercloud * Cluster action: do_shutdown on undercloud * Pseudo action: all_stopped Revised cluster status: Online: [ undercloud ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Stopped diff --git a/pengine/test10/bundle-order-startup-clone.summary b/pengine/test10/bundle-order-startup-clone.summary index f3f8be0270..0acfd1e4bc 100644 --- a/pengine/test10/bundle-order-startup-clone.summary +++ b/pengine/test10/bundle-order-startup-clone.summary @@ -1,69 +1,69 @@ Current cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Clone Set: storage-clone [storage] Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped Transition Summary: - * Start storage:0 (metal-1 - blocked) - * Start storage:1 (metal-2 - blocked) - * Start storage:2 (metal-3 - blocked) - * Start galera-bundle-docker-0 (metal-1 - blocked) - * Start galera-bundle-0 (metal-1 - blocked) - * Start galera:0 (galera-bundle-0 - blocked) + * Start storage:0 (metal-1 - blocked) due to unrunnable redis-bundle promoted + * Start storage:1 (metal-2 - blocked) due to unrunnable redis-bundle promoted + * Start storage:2 (metal-3 - blocked) due to unrunnable redis-bundle promoted + * Start galera-bundle-docker-0 (metal-1 - blocked) due to unrunnable storage-clone notified + * Start galera-bundle-0 (metal-1 - blocked) due to unrunnable galera-bundle-docker-0 start + * Start galera:0 (galera-bundle-0 - blocked) due to unrunnable galera-bundle-docker-0 start * Start haproxy-bundle-docker-0 (metal-2) * Start redis-bundle-docker-0 (metal-2) * Start redis-bundle-0 (metal-2) * Start redis:0 (redis-bundle-0) Executing cluster transition: * Resource action: storage:0 monitor on metal-1 * Resource action: storage:1 monitor on metal-2 * Resource action: storage:2 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-2 * Resource action: galera-bundle-docker-0 monitor on metal-1 * Resource action: haproxy-bundle-docker-0 monitor on metal-3 * Resource action: haproxy-bundle-docker-0 monitor on metal-2 * Resource action: haproxy-bundle-docker-0 monitor on metal-1 * Resource action: redis-bundle-docker-0 monitor on metal-3 * Resource action: redis-bundle-docker-0 monitor on metal-2 * Resource action: redis-bundle-docker-0 monitor on metal-1 * Pseudo action: redis-bundle_start_0 * Pseudo action: haproxy-bundle_start_0 * Resource action: haproxy-bundle-docker-0 start on metal-2 * Resource action: redis-bundle-docker-0 start on metal-2 * Resource action: redis-bundle-0 start on metal-2 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: haproxy-bundle_running_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-2 * Resource action: redis:0 start on redis-bundle-0 * Resource action: redis-bundle-docker-0 monitor=60000 on metal-2 * Resource action: redis-bundle-0 monitor=60000 on metal-2 * Pseudo action: redis-bundle-master_running_0 * Pseudo action: redis-bundle_running_0 * Resource action: redis:0 monitor=60000 on redis-bundle-0 * Resource action: redis:0 monitor=45000 on redis-bundle-0 Revised cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ redis-bundle-0:redis-bundle-docker-0 ] Clone Set: storage-clone [storage] Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-2 Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Slave metal-2 diff --git a/pengine/test10/bundle-order-stop-clone.summary b/pengine/test10/bundle-order-stop-clone.summary index 404eecde99..9a6b0f2901 100644 --- a/pengine/test10/bundle-order-stop-clone.summary +++ b/pengine/test10/bundle-order-stop-clone.summary @@ -1,75 +1,75 @@ Current cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] Clone Set: storage-clone [storage] Started: [ metal-1 metal-2 metal-3 ] Stopped: [ rabbitmq-bundle-0 ] Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Slave metal-1 galera-bundle-1 (ocf::heartbeat:galera): Slave metal-2 galera-bundle-2 (ocf::heartbeat:galera): Slave metal-3 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-1 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started metal-2 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started metal-3 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master metal-1 redis-bundle-1 (ocf::heartbeat:redis): Master metal-2 redis-bundle-2 (ocf::heartbeat:redis): Master metal-3 Transition Summary: - * Stop storage:0 (metal-1) - * Stop galera-bundle-docker-0 (metal-1) - * Stop galera-bundle-0 (Started metal-1) - * Stop galera:0 (Slave galera-bundle-0) + * Stop storage:0 (metal-1) due to node availability + * Stop galera-bundle-docker-0 (metal-1) due to node availability + * Stop galera-bundle-0 (Started metal-1) due to unrunnable galera-bundle-docker-0 start + * Stop galera:0 (Slave galera-bundle-0) due to unrunnable galera-bundle-docker-0 start Executing cluster transition: * Pseudo action: storage-clone_pre_notify_stop_0 * Pseudo action: galera-bundle_stop_0 * Resource action: storage:0 notify on metal-1 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-pre_notify_stop_0 * Pseudo action: galera-bundle-master_stop_0 * Resource action: galera:0 stop on galera-bundle-0 * Resource action: galera-bundle-0 stop on metal-1 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-docker-0 stop on metal-1 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: storage-clone_stop_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: storage:0 stop on metal-1 * Pseudo action: storage-clone_stopped_0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: storage-clone_post_notify_stopped_0 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ metal-1 metal-2 metal-3 ] RemoteOFFLINE: [ rabbitmq-bundle-0 ] Containers: [ galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] Clone Set: storage-clone [storage] Started: [ metal-2 metal-3 ] Stopped: [ metal-1 rabbitmq-bundle-0 ] Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped galera-bundle-1 (ocf::heartbeat:galera): Slave metal-2 galera-bundle-2 (ocf::heartbeat:galera): Slave metal-3 Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started metal-1 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started metal-2 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started metal-3 Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master metal-1 redis-bundle-1 (ocf::heartbeat:redis): Master metal-2 redis-bundle-2 (ocf::heartbeat:redis): Master metal-3 diff --git a/pengine/test10/bundle-order-stop.summary b/pengine/test10/bundle-order-stop.summary index b30a237b31..e7bac73658 100644 --- a/pengine/test10/bundle-order-stop.summary +++ b/pengine/test10/bundle-order-stop.summary @@ -1,113 +1,113 @@ Current cluster status: Online: [ undercloud ] Containers: [ galera-bundle-0:galera-bundle-docker-0 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 redis-bundle-0:redis-bundle-docker-0 ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started undercloud Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Master undercloud Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Master undercloud ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Started undercloud ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Started undercloud Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started undercloud Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started undercloud Transition Summary: * Shutdown undercloud - * Stop rabbitmq-bundle-docker-0 (undercloud) - * Stop rabbitmq-bundle-0 (undercloud) - * Stop rabbitmq:0 (Started rabbitmq-bundle-0) - * Stop galera-bundle-docker-0 (undercloud) - * Stop galera-bundle-0 (undercloud) + * Stop rabbitmq-bundle-docker-0 (undercloud) due to node availability + * Stop rabbitmq-bundle-0 (undercloud) due to node availability + * Stop rabbitmq:0 (Started rabbitmq-bundle-0) due to unrunnable rabbitmq-bundle-0 start + * Stop galera-bundle-docker-0 (undercloud) due to node availability + * Stop galera-bundle-0 (undercloud) due to node availability * Demote galera:0 (Master -> Slave galera-bundle-0) - * Restart galera:0 (Slave galera-bundle-0) - * Stop redis-bundle-docker-0 (undercloud) - * Stop redis-bundle-0 (undercloud) + * Restart galera:0 (Slave galera-bundle-0) due to unrunnable galera-bundle-0 start + * Stop redis-bundle-docker-0 (undercloud) due to node availability + * Stop redis-bundle-0 (undercloud) due to node availability * Demote redis:0 (Master -> Slave redis-bundle-0) - * Restart redis:0 (Slave redis-bundle-0) - * Stop ip-192.168.122.254 (undercloud) - * Stop ip-192.168.122.250 (undercloud) - * Stop ip-192.168.122.249 (undercloud) - * Stop ip-192.168.122.253 (undercloud) - * Stop ip-192.168.122.247 (undercloud) - * Stop ip-192.168.122.248 (undercloud) - * Stop haproxy-bundle-docker-0 (undercloud) - * Stop openstack-cinder-volume-docker-0 (undercloud) + * Restart redis:0 (Slave redis-bundle-0) due to unrunnable redis-bundle-0 start + * Stop ip-192.168.122.254 (undercloud) due to node availability + * Stop ip-192.168.122.250 (undercloud) due to node availability + * Stop ip-192.168.122.249 (undercloud) due to node availability + * Stop ip-192.168.122.253 (undercloud) due to node availability + * Stop ip-192.168.122.247 (undercloud) due to node availability + * Stop ip-192.168.122.248 (undercloud) due to node availability + * Stop haproxy-bundle-docker-0 (undercloud) due to node availability + * Stop openstack-cinder-volume-docker-0 (undercloud) due to node availability Executing cluster transition: * Resource action: galera cancel=10000 on galera-bundle-0 * Resource action: redis cancel=20000 on redis-bundle-0 * Pseudo action: openstack-cinder-volume_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: galera-bundle-master_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: galera demote on galera-bundle-0 * Resource action: redis demote on redis-bundle-0 * Resource action: openstack-cinder-volume-docker-0 stop on undercloud * Pseudo action: openstack-cinder-volume_stopped_0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Resource action: rabbitmq-bundle-0 stop on undercloud * Pseudo action: redis-bundle_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera stop on galera-bundle-0 * Resource action: galera-bundle-0 stop on undercloud * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle_stopped_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Resource action: redis stop on redis-bundle-0 * Resource action: redis-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_stopped_0 * Resource action: redis-bundle-docker-0 stop on undercloud * Pseudo action: redis-bundle_stopped_0 * Pseudo action: haproxy-bundle_stop_0 * Resource action: haproxy-bundle-docker-0 stop on undercloud * Pseudo action: haproxy-bundle_stopped_0 * Resource action: ip-192.168.122.254 stop on undercloud * Resource action: ip-192.168.122.250 stop on undercloud * Resource action: ip-192.168.122.249 stop on undercloud * Resource action: ip-192.168.122.253 stop on undercloud * Resource action: ip-192.168.122.247 stop on undercloud * Resource action: ip-192.168.122.248 stop on undercloud * Cluster action: do_shutdown on undercloud * Pseudo action: all_stopped Revised cluster status: Online: [ undercloud ] Docker container: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Stopped Docker container: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest] galera-bundle-0 (ocf::heartbeat:galera): Stopped Docker container: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest] redis-bundle-0 (ocf::heartbeat:redis): Stopped ip-192.168.122.254 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.250 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.249 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.253 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.247 (ocf::heartbeat:IPaddr2): Stopped ip-192.168.122.248 (ocf::heartbeat:IPaddr2): Stopped Docker container: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Stopped Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Stopped diff --git a/pengine/test10/clone-anon-dup.summary b/pengine/test10/clone-anon-dup.summary index 6ad247f461..843972d17d 100644 --- a/pengine/test10/clone-anon-dup.summary +++ b/pengine/test10/clone-anon-dup.summary @@ -1,34 +1,34 @@ Current cluster status: Online: [ wc01 wc02 wc03 ] stonith-1 (stonith:dummy): Stopped Clone Set: clone_webservice [group_webservice] Resource Group: group_webservice:2 fs_www (ocf::heartbeat:Filesystem): ORPHANED Stopped apache2 (ocf::heartbeat:apache): ORPHANED Started wc02 Started: [ wc01 wc02 ] Transition Summary: * Start stonith-1 (wc01) - * Stop apache2:2 (wc02) + * Stop apache2:2 (wc02) due to node availability Executing cluster transition: * Resource action: stonith-1 monitor on wc03 * Resource action: stonith-1 monitor on wc02 * Resource action: stonith-1 monitor on wc01 * Pseudo action: clone_webservice_stop_0 * Resource action: stonith-1 start on wc01 * Pseudo action: group_webservice:2_stop_0 * Resource action: apache2:0 stop on wc02 * Pseudo action: all_stopped * Pseudo action: group_webservice:2_stopped_0 * Pseudo action: clone_webservice_stopped_0 Revised cluster status: Online: [ wc01 wc02 wc03 ] stonith-1 (stonith:dummy): Started wc01 Clone Set: clone_webservice [group_webservice] Started: [ wc01 wc02 ] diff --git a/pengine/test10/clone-anon-failcount.summary b/pengine/test10/clone-anon-failcount.summary index cd4349c037..3fb39e3306 100644 --- a/pengine/test10/clone-anon-failcount.summary +++ b/pengine/test10/clone-anon-failcount.summary @@ -1,118 +1,118 @@ Current cluster status: Online: [ srv01 srv02 srv03 srv04 ] Resource Group: UMgroup01 UmVIPcheck (ocf::pacemaker:Dummy): Started srv01 UmIPaddr (ocf::pacemaker:Dummy): Started srv01 UmDummy01 (ocf::pacemaker:Dummy): Started srv01 UmDummy02 (ocf::pacemaker:Dummy): Started srv01 Resource Group: OVDBgroup02-1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started srv01 Resource Group: OVDBgroup02-2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started srv02 Resource Group: OVDBgroup02-3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started srv03 Resource Group: grpStonith1 prmStonithN1 (stonith:external/ssh): Started srv04 Resource Group: grpStonith2 prmStonithN2 (stonith:external/ssh): Started srv01 Resource Group: grpStonith3 prmStonithN3 (stonith:external/ssh): Started srv02 Resource Group: grpStonith4 prmStonithN4 (stonith:external/ssh): Started srv03 Clone Set: clnUMgroup01 [clnUmResource] Resource Group: clnUmResource:0 clnUMdummy01 (ocf::pacemaker:Dummy): FAILED srv04 clnUMdummy02 (ocf::pacemaker:Dummy): Started srv04 Started: [ srv01 ] Stopped: [ srv02 srv03 ] Clone Set: clnPingd [clnPrmPingd] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnDiskd1 [clnPrmDiskd1] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy1 [clnG3dummy01] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy2 [clnG3dummy02] Started: [ srv01 srv02 srv03 srv04 ] Transition Summary: * Move UmVIPcheck (Started srv01 -> srv04) * Move UmIPaddr (Started srv01 -> srv04) * Move UmDummy01 (Started srv01 -> srv04) * Move UmDummy02 (Started srv01 -> srv04) * Recover clnUMdummy01:0 (Started srv04) - * Restart clnUMdummy02:0 (Started srv04) - * Stop clnUMdummy01:1 (srv01) - * Stop clnUMdummy02:1 (srv01) + * Restart clnUMdummy02:0 (Started srv04) due to required clnUMdummy01:0 start + * Stop clnUMdummy01:1 (srv01) due to node availability + * Stop clnUMdummy02:1 (srv01) due to node availability Executing cluster transition: * Pseudo action: UMgroup01_stop_0 * Resource action: UmDummy02 stop on srv01 * Resource action: UmDummy01 stop on srv01 * Resource action: UmIPaddr stop on srv01 * Resource action: UmVIPcheck stop on srv01 * Pseudo action: UMgroup01_stopped_0 * Pseudo action: clnUMgroup01_stop_0 * Pseudo action: clnUmResource:0_stop_0 * Resource action: clnUMdummy02:1 stop on srv04 * Pseudo action: clnUmResource:1_stop_0 * Resource action: clnUMdummy02:0 stop on srv01 * Resource action: clnUMdummy01:1 stop on srv04 * Resource action: clnUMdummy01:0 stop on srv01 * Pseudo action: all_stopped * Pseudo action: clnUmResource:0_stopped_0 * Pseudo action: clnUmResource:1_stopped_0 * Pseudo action: clnUMgroup01_stopped_0 * Pseudo action: clnUMgroup01_start_0 * Pseudo action: clnUmResource:0_start_0 * Resource action: clnUMdummy01:1 start on srv04 * Resource action: clnUMdummy01:1 monitor=10000 on srv04 * Resource action: clnUMdummy02:1 start on srv04 * Resource action: clnUMdummy02:1 monitor=10000 on srv04 * Pseudo action: clnUmResource:0_running_0 * Pseudo action: clnUMgroup01_running_0 * Pseudo action: UMgroup01_start_0 * Resource action: UmVIPcheck start on srv04 * Resource action: UmIPaddr start on srv04 * Resource action: UmDummy01 start on srv04 * Resource action: UmDummy02 start on srv04 * Pseudo action: UMgroup01_running_0 * Resource action: UmIPaddr monitor=10000 on srv04 * Resource action: UmDummy01 monitor=10000 on srv04 * Resource action: UmDummy02 monitor=10000 on srv04 Revised cluster status: Online: [ srv01 srv02 srv03 srv04 ] Resource Group: UMgroup01 UmVIPcheck (ocf::pacemaker:Dummy): Started srv04 UmIPaddr (ocf::pacemaker:Dummy): Started srv04 UmDummy01 (ocf::pacemaker:Dummy): Started srv04 UmDummy02 (ocf::pacemaker:Dummy): Started srv04 Resource Group: OVDBgroup02-1 prmExPostgreSQLDB1 (ocf::pacemaker:Dummy): Started srv01 Resource Group: OVDBgroup02-2 prmExPostgreSQLDB2 (ocf::pacemaker:Dummy): Started srv02 Resource Group: OVDBgroup02-3 prmExPostgreSQLDB3 (ocf::pacemaker:Dummy): Started srv03 Resource Group: grpStonith1 prmStonithN1 (stonith:external/ssh): Started srv04 Resource Group: grpStonith2 prmStonithN2 (stonith:external/ssh): Started srv01 Resource Group: grpStonith3 prmStonithN3 (stonith:external/ssh): Started srv02 Resource Group: grpStonith4 prmStonithN4 (stonith:external/ssh): Started srv03 Clone Set: clnUMgroup01 [clnUmResource] Started: [ srv04 ] Stopped: [ srv01 srv02 srv03 ] Clone Set: clnPingd [clnPrmPingd] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnDiskd1 [clnPrmDiskd1] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy1 [clnG3dummy01] Started: [ srv01 srv02 srv03 srv04 ] Clone Set: clnG3dummy2 [clnG3dummy02] Started: [ srv01 srv02 srv03 srv04 ] diff --git a/pengine/test10/clone-interleave-2.summary b/pengine/test10/clone-interleave-2.summary index b15a546081..78d46cdc28 100644 --- a/pengine/test10/clone-interleave-2.summary +++ b/pengine/test10/clone-interleave-2.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-3 [child-3] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: - * Restart dummy (Started pcmk-1) - * Stop child-2:0 (pcmk-1) + * Restart dummy (Started pcmk-1) due to required clone-3 running + * Stop child-2:0 (pcmk-1) due to node availability * Stop child-3:0 (pcmk-1) Executing cluster transition: * Resource action: dummy stop on pcmk-1 * Pseudo action: clone-3_stop_0 * Resource action: child-3:2 stop on pcmk-1 * Pseudo action: clone-3_stopped_0 * Pseudo action: clone-3_start_0 * Pseudo action: clone-2_stop_0 * Pseudo action: clone-3_running_0 * Resource action: dummy start on pcmk-1 * Resource action: child-2:2 stop on pcmk-1 * Pseudo action: clone-2_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] Started: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-1 ] Clone Set: clone-3 [child-3] Started: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-1 ] diff --git a/pengine/test10/clone-interleave-3.summary b/pengine/test10/clone-interleave-3.summary index 004fa54c38..8b13dc43f0 100644 --- a/pengine/test10/clone-interleave-3.summary +++ b/pengine/test10/clone-interleave-3.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] child-2 (ocf::pacemaker:Dummy): FAILED pcmk-1 Started: [ pcmk-2 pcmk-3 ] Clone Set: clone-3 [child-3] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: - * Restart dummy (Started pcmk-1) + * Restart dummy (Started pcmk-1) due to required clone-3 running * Recover child-2:0 (Started pcmk-1) - * Restart child-3:0 (Started pcmk-1) + * Restart child-3:0 (Started pcmk-1) due to required child-2:0 start Executing cluster transition: * Resource action: dummy stop on pcmk-1 * Pseudo action: clone-3_stop_0 * Resource action: child-3:2 stop on pcmk-1 * Pseudo action: clone-3_stopped_0 * Pseudo action: clone-2_stop_0 * Resource action: child-2:2 stop on pcmk-1 * Pseudo action: clone-2_stopped_0 * Pseudo action: clone-2_start_0 * Pseudo action: all_stopped * Resource action: child-2:2 start on pcmk-1 * Pseudo action: clone-2_running_0 * Pseudo action: clone-3_start_0 * Resource action: child-3:2 start on pcmk-1 * Pseudo action: clone-3_running_0 * Resource action: dummy start on pcmk-1 Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] dummy (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: clone-1 [child-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-2 [child-2] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: clone-3 [child-3] Started: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/pengine/test10/clone-max-zero.summary b/pengine/test10/clone-max-zero.summary index 38aa6baf1c..33c4e89aa7 100644 --- a/pengine/test10/clone-max-zero.summary +++ b/pengine/test10/clone-max-zero.summary @@ -1,50 +1,50 @@ Current cluster status: Online: [ c001n11 c001n12 ] fencing (stonith:external/ssh): Started c001n11 Clone Set: dlm-clone [dlm] dlm (ocf::pacemaker:controld): ORPHANED Started c001n12 dlm (ocf::pacemaker:controld): ORPHANED Started c001n11 Clone Set: o2cb-clone [o2cb] Started: [ c001n11 c001n12 ] Clone Set: clone-drbd0 [drbd0] Started: [ c001n11 c001n12 ] Clone Set: c-ocfs2-1 [ocfs2-1] Started: [ c001n11 c001n12 ] Transition Summary: - * Stop dlm:0 (c001n12) - * Stop dlm:1 (c001n11) - * Stop o2cb:0 (c001n12) - * Stop o2cb:1 (c001n11) - * Stop ocfs2-1:0 (c001n12) - * Stop ocfs2-1:1 (c001n11) + * Stop dlm:0 (c001n12) due to node availability + * Stop dlm:1 (c001n11) due to node availability + * Stop o2cb:0 (c001n12) due to node availability + * Stop o2cb:1 (c001n11) due to node availability + * Stop ocfs2-1:0 (c001n12) due to node availability + * Stop ocfs2-1:1 (c001n11) due to node availability Executing cluster transition: * Pseudo action: c-ocfs2-1_stop_0 * Resource action: ocfs2-1:1 stop on c001n12 * Resource action: ocfs2-1:0 stop on c001n11 * Pseudo action: c-ocfs2-1_stopped_0 * Pseudo action: o2cb-clone_stop_0 * Resource action: o2cb:1 stop on c001n12 * Resource action: o2cb:0 stop on c001n11 * Pseudo action: o2cb-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm:1 stop on c001n12 * Resource action: dlm:0 stop on c001n11 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n11 c001n12 ] fencing (stonith:external/ssh): Started c001n11 Clone Set: dlm-clone [dlm] Clone Set: o2cb-clone [o2cb] Stopped: [ c001n11 c001n12 ] Clone Set: clone-drbd0 [drbd0] Started: [ c001n11 c001n12 ] Clone Set: c-ocfs2-1 [ocfs2-1] Stopped: [ c001n11 c001n12 ] diff --git a/pengine/test10/clone-require-all-2.summary b/pengine/test10/clone-require-all-2.summary index d4b2519247..f5861e7c19 100644 --- a/pengine/test10/clone-require-all-2.summary +++ b/pengine/test10/clone-require-all-2.summary @@ -1,41 +1,41 @@ Current cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 ] Stopped: [ rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] Transition Summary: * Move shooter (Started rhel7-auto1 -> rhel7-auto3) - * Stop A:0 (rhel7-auto1) - * Stop A:1 (rhel7-auto2) - * Start B:0 (rhel7-auto4 - blocked) - * Start B:1 (rhel7-auto3 - blocked) + * Stop A:0 (rhel7-auto1) due to node availability + * Stop A:1 (rhel7-auto2) due to node availability + * Start B:0 (rhel7-auto4 - blocked) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory + * Start B:1 (rhel7-auto3 - blocked) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory Executing cluster transition: * Resource action: shooter stop on rhel7-auto1 * Pseudo action: A-clone_stop_0 * Resource action: shooter start on rhel7-auto3 * Resource action: A stop on rhel7-auto1 * Resource action: A stop on rhel7-auto2 * Pseudo action: A-clone_stopped_0 * Pseudo action: all_stopped * Resource action: shooter monitor=60000 on rhel7-auto3 Revised cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto3 Clone Set: A-clone [A] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/pengine/test10/clone-require-all-3.summary b/pengine/test10/clone-require-all-3.summary index 68191b168a..1c887e506f 100644 --- a/pengine/test10/clone-require-all-3.summary +++ b/pengine/test10/clone-require-all-3.summary @@ -1,46 +1,46 @@ Current cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 ] Stopped: [ rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 ] Transition Summary: * Move shooter (Started rhel7-auto1 -> rhel7-auto3) - * Stop A:0 (rhel7-auto1) - * Stop A:1 (rhel7-auto2) - * Stop B:0 (Started rhel7-auto3) - * Stop B:1 (Started rhel7-auto4) + * Stop A:0 (rhel7-auto1) due to node availability + * Stop A:1 (rhel7-auto2) due to node availability + * Stop B:0 (Started rhel7-auto3) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory + * Stop B:1 (Started rhel7-auto4) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory Executing cluster transition: * Resource action: shooter stop on rhel7-auto1 * Pseudo action: B-clone_stop_0 * Resource action: shooter start on rhel7-auto3 * Resource action: B stop on rhel7-auto3 * Resource action: B stop on rhel7-auto4 * Pseudo action: B-clone_stopped_0 * Resource action: shooter monitor=60000 on rhel7-auto3 * Pseudo action: A-clone_stop_0 * Resource action: A stop on rhel7-auto1 * Resource action: A stop on rhel7-auto2 * Pseudo action: A-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node rhel7-auto1 (1): standby Node rhel7-auto2 (2): standby Online: [ rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto3 Clone Set: A-clone [A] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/pengine/test10/clone-require-all-4.summary b/pengine/test10/clone-require-all-4.summary index 49ae3bd87e..90d2059708 100644 --- a/pengine/test10/clone-require-all-4.summary +++ b/pengine/test10/clone-require-all-4.summary @@ -1,40 +1,40 @@ Current cluster status: Node rhel7-auto1 (1): standby Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 ] Stopped: [ rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 ] Transition Summary: * Move shooter (Started rhel7-auto1 -> rhel7-auto2) - * Stop A:0 (rhel7-auto1) + * Stop A:0 (rhel7-auto1) due to node availability Executing cluster transition: * Resource action: shooter stop on rhel7-auto1 * Pseudo action: A-clone_stop_0 * Resource action: shooter start on rhel7-auto2 * Resource action: A stop on rhel7-auto1 * Pseudo action: A-clone_stopped_0 * Pseudo action: A-clone_start_0 * Pseudo action: all_stopped * Resource action: shooter monitor=60000 on rhel7-auto2 * Pseudo action: A-clone_running_0 Revised cluster status: Node rhel7-auto1 (1): standby Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto2 Clone Set: A-clone [A] Started: [ rhel7-auto2 ] Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto1 rhel7-auto2 ] diff --git a/pengine/test10/clone-require-all-6.summary b/pengine/test10/clone-require-all-6.summary index 6561ea3020..20ccf7670f 100644 --- a/pengine/test10/clone-require-all-6.summary +++ b/pengine/test10/clone-require-all-6.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] Stopped: [ rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto2 ] Transition Summary: - * Stop A:0 (rhel7-auto1) - * Stop A:2 (rhel7-auto3) + * Stop A:0 (rhel7-auto1) due to node availability + * Stop A:2 (rhel7-auto3) due to node availability Executing cluster transition: * Pseudo action: A-clone_stop_0 * Resource action: A stop on rhel7-auto1 * Resource action: A stop on rhel7-auto3 * Pseudo action: A-clone_stopped_0 * Pseudo action: A-clone_start_0 * Pseudo action: all_stopped * Pseudo action: A-clone_running_0 Revised cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] shooter (stonith:fence_xvm): Started rhel7-auto1 Clone Set: A-clone [A] Started: [ rhel7-auto2 ] Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] Clone Set: B-clone [B] Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] Stopped: [ rhel7-auto2 ] diff --git a/pengine/test10/clone_min_interleave_start_one.summary b/pengine/test10/clone_min_interleave_start_one.summary index b15f68a884..4ee71c4e41 100644 --- a/pengine/test10/clone_min_interleave_start_one.summary +++ b/pengine/test10/clone_min_interleave_start_one.summary @@ -1,39 +1,39 @@ Current cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Stopped: [ c7auto1 c7auto2 c7auto3 ] Transition Summary: * Start FAKE1:0 (c7auto1) - * Start FAKE2:0 (c7auto2 - blocked) - * Start FAKE2:1 (c7auto3 - blocked) - * Start FAKE2:2 (c7auto1 - blocked) - * Start FAKE3:0 (c7auto2 - blocked) - * Start FAKE3:1 (c7auto3 - blocked) - * Start FAKE3:2 (c7auto1 - blocked) + * Start FAKE2:0 (c7auto2 - blocked) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Start FAKE2:1 (c7auto3 - blocked) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Start FAKE2:2 (c7auto1 - blocked) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Start FAKE3:0 (c7auto2 - blocked) due to unrunnable FAKE2:0 start + * Start FAKE3:1 (c7auto3 - blocked) due to unrunnable FAKE2:1 start + * Start FAKE3:2 (c7auto1 - blocked) due to unrunnable FAKE2:2 start Executing cluster transition: * Pseudo action: FAKE1-clone_start_0 * Resource action: FAKE1 start on c7auto1 * Pseudo action: FAKE1-clone_running_0 * Resource action: FAKE1 monitor=10000 on c7auto1 Revised cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/clone_min_interleave_stop_one.summary b/pengine/test10/clone_min_interleave_stop_one.summary index 9280b7e8b9..4fd094e80b 100644 --- a/pengine/test10/clone_min_interleave_stop_one.summary +++ b/pengine/test10/clone_min_interleave_stop_one.summary @@ -1,35 +1,35 @@ Current cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Started: [ c7auto1 c7auto2 c7auto3 ] Transition Summary: - * Stop FAKE1:0 (c7auto3) + * Stop FAKE1:0 (c7auto3) due to node availability Executing cluster transition: * Pseudo action: FAKE1-clone_stop_0 * Resource action: FAKE1 stop on c7auto3 * Pseudo action: FAKE1-clone_stopped_0 * Pseudo action: FAKE1-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKE1-clone_running_0 Revised cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 c7auto2 ] Stopped: [ c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Started: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/clone_min_interleave_stop_two.summary b/pengine/test10/clone_min_interleave_stop_two.summary index fb28e0d3aa..0866f3cd8d 100644 --- a/pengine/test10/clone_min_interleave_stop_two.summary +++ b/pengine/test10/clone_min_interleave_stop_two.summary @@ -1,53 +1,53 @@ Current cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Started: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Started: [ c7auto1 c7auto2 c7auto3 ] Transition Summary: - * Stop FAKE1:0 (c7auto3) - * Stop FAKE1:2 (c7auto2) - * Stop FAKE2:0 (Started c7auto3) - * Stop FAKE2:1 (Started c7auto1) - * Stop FAKE2:2 (Started c7auto2) - * Stop FAKE3:0 (Started c7auto3) - * Stop FAKE3:1 (Started c7auto1) - * Stop FAKE3:2 (Started c7auto2) + * Stop FAKE1:0 (c7auto3) due to node availability + * Stop FAKE1:2 (c7auto2) due to node availability + * Stop FAKE2:0 (Started c7auto3) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Stop FAKE2:1 (Started c7auto1) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Stop FAKE2:2 (Started c7auto2) due to unrunnable clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory + * Stop FAKE3:0 (Started c7auto3) due to required FAKE2:0 start + * Stop FAKE3:1 (Started c7auto1) due to required FAKE2:1 start + * Stop FAKE3:2 (Started c7auto2) due to required FAKE2:2 start Executing cluster transition: * Pseudo action: FAKE3-clone_stop_0 * Resource action: FAKE3 stop on c7auto3 * Resource action: FAKE3 stop on c7auto1 * Resource action: FAKE3 stop on c7auto2 * Pseudo action: FAKE3-clone_stopped_0 * Pseudo action: FAKE2-clone_stop_0 * Resource action: FAKE2 stop on c7auto3 * Resource action: FAKE2 stop on c7auto1 * Resource action: FAKE2 stop on c7auto2 * Pseudo action: FAKE2-clone_stopped_0 * Pseudo action: FAKE1-clone_stop_0 * Resource action: FAKE1 stop on c7auto3 * Resource action: FAKE1 stop on c7auto2 * Pseudo action: FAKE1-clone_stopped_0 * Pseudo action: FAKE1-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKE1-clone_running_0 Revised cluster status: Online: [ c7auto1 c7auto2 c7auto3 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKE1-clone [FAKE1] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 ] Clone Set: FAKE2-clone [FAKE2] Stopped: [ c7auto1 c7auto2 c7auto3 ] Clone Set: FAKE3-clone [FAKE3] Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/clone_min_start_one.summary b/pengine/test10/clone_min_start_one.summary index ee33e01916..196f1b3039 100644 --- a/pengine/test10/clone_min_start_one.summary +++ b/pengine/test10/clone_min_start_one.summary @@ -1,37 +1,37 @@ Current cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped Transition Summary: * Move shooter (Started c7auto1 -> c7auto3) * Start FAKECLONE:0 (c7auto3) - * Start FAKE (c7auto4 - blocked) + * Start FAKE (c7auto4 - blocked) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing cluster transition: * Resource action: shooter stop on c7auto1 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Resource action: shooter start on c7auto3 * Resource action: FAKECLONE start on c7auto3 * Pseudo action: FAKECLONE-clone_running_0 * Resource action: shooter monitor=60000 on c7auto3 * Resource action: FAKECLONE monitor=10000 on c7auto3 Revised cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto3 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto3 ] Stopped: [ c7auto1 c7auto2 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/clone_min_stop_all.summary b/pengine/test10/clone_min_stop_all.summary index eb2944f930..877d12f407 100644 --- a/pengine/test10/clone_min_stop_all.summary +++ b/pengine/test10/clone_min_stop_all.summary @@ -1,43 +1,43 @@ Current cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] FAKE (ocf::heartbeat:Dummy): Started c7auto4 Transition Summary: * Move shooter (Started c7auto1 -> c7auto4) - * Stop FAKECLONE:0 (c7auto1) - * Stop FAKECLONE:1 (c7auto2) - * Stop FAKECLONE:2 (c7auto3) - * Stop FAKE (Started c7auto4) + * Stop FAKECLONE:0 (c7auto1) due to node availability + * Stop FAKECLONE:1 (c7auto2) due to node availability + * Stop FAKECLONE:2 (c7auto3) due to node availability + * Stop FAKE (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing cluster transition: * Resource action: shooter stop on c7auto1 * Resource action: FAKE stop on c7auto4 * Resource action: shooter start on c7auto4 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: shooter monitor=60000 on c7auto4 * Resource action: FAKECLONE stop on c7auto1 * Resource action: FAKECLONE stop on c7auto2 * Resource action: FAKECLONE stop on c7auto3 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto4 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/clone_min_stop_one.summary b/pengine/test10/clone_min_stop_one.summary index 9206a0d7be..3fdb675a42 100644 --- a/pengine/test10/clone_min_stop_one.summary +++ b/pengine/test10/clone_min_stop_one.summary @@ -1,32 +1,32 @@ Current cluster status: Node c7auto2 (2): standby Online: [ c7auto1 c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] FAKE (ocf::heartbeat:Dummy): Started c7auto4 Transition Summary: - * Stop FAKECLONE:1 (c7auto2) + * Stop FAKECLONE:1 (c7auto2) due to node availability Executing cluster transition: * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: FAKECLONE stop on c7auto2 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKECLONE-clone_running_0 Revised cluster status: Node c7auto2 (2): standby Online: [ c7auto1 c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto3 ] Stopped: [ c7auto2 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Started c7auto4 diff --git a/pengine/test10/clone_min_stop_two.summary b/pengine/test10/clone_min_stop_two.summary index c009d7d13f..4d8c38f53a 100644 --- a/pengine/test10/clone_min_stop_two.summary +++ b/pengine/test10/clone_min_stop_two.summary @@ -1,42 +1,42 @@ Current cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] FAKE (ocf::heartbeat:Dummy): Started c7auto4 Transition Summary: * Move shooter (Started c7auto1 -> c7auto3) - * Stop FAKECLONE:0 (c7auto1) - * Stop FAKECLONE:1 (c7auto2) - * Stop FAKE (Started c7auto4) + * Stop FAKECLONE:0 (c7auto1) due to node availability + * Stop FAKECLONE:1 (c7auto2) due to node availability + * Stop FAKE (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing cluster transition: * Resource action: shooter stop on c7auto1 * Resource action: FAKE stop on c7auto4 * Resource action: shooter start on c7auto3 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: shooter monitor=60000 on c7auto3 * Resource action: FAKECLONE stop on c7auto1 * Resource action: FAKECLONE stop on c7auto2 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKECLONE-clone_running_0 Revised cluster status: Node c7auto1 (1): standby Node c7auto2 (2): standby Online: [ c7auto3 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto3 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto3 ] Stopped: [ c7auto1 c7auto2 c7auto4 ] FAKE (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/cloned-group-stop.summary b/pengine/test10/cloned-group-stop.summary index 8357c38b3d..f7a980c116 100644 --- a/pengine/test10/cloned-group-stop.summary +++ b/pengine/test10/cloned-group-stop.summary @@ -1,89 +1,89 @@ 2 of 20 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ rhos4-node3 rhos4-node4 ] virt-fencing (stonith:fence_xvm): Started rhos4-node3 Resource Group: mysql-group mysql-vip (ocf::heartbeat:IPaddr2): Started rhos4-node3 mysql-fs (ocf::heartbeat:Filesystem): Started rhos4-node3 mysql-db (ocf::heartbeat:mysql): Started rhos4-node3 Clone Set: qpidd-clone [qpidd] Started: [ rhos4-node3 rhos4-node4 ] Clone Set: keystone-clone [keystone] Started: [ rhos4-node3 rhos4-node4 ] Clone Set: glance-clone [glance] Started: [ rhos4-node3 rhos4-node4 ] Clone Set: cinder-clone [cinder] Started: [ rhos4-node3 rhos4-node4 ] Transition Summary: - * Stop qpidd:0 (rhos4-node4) - * Stop qpidd:1 (rhos4-node3) - * Stop keystone:0 (Started rhos4-node4) - * Stop keystone:1 (Started rhos4-node3) - * Stop glance-fs:0 (Started rhos4-node4) - * Stop glance-registry:0 (Started rhos4-node4) - * Stop glance-api:0 (Started rhos4-node4) - * Stop glance-fs:1 (Started rhos4-node3) - * Stop glance-registry:1 (Started rhos4-node3) - * Stop glance-api:1 (Started rhos4-node3) - * Stop cinder-api:0 (Started rhos4-node4) - * Stop cinder-scheduler:0 (Started rhos4-node4) - * Stop cinder-volume:0 (Started rhos4-node4) - * Stop cinder-api:1 (Started rhos4-node3) - * Stop cinder-scheduler:1 (Started rhos4-node3) - * Stop cinder-volume:1 (Started rhos4-node3) + * Stop qpidd:0 (rhos4-node4) due to node availability + * Stop qpidd:1 (rhos4-node3) due to node availability + * Stop keystone:0 (Started rhos4-node4) due to unrunnable qpidd-clone running + * Stop keystone:1 (Started rhos4-node3) due to unrunnable qpidd-clone running + * Stop glance-fs:0 (Started rhos4-node4) due to required keystone-clone running + * Stop glance-registry:0 (Started rhos4-node4) due to required glance-fs:0 start + * Stop glance-api:0 (Started rhos4-node4) due to required glance-registry:0 start + * Stop glance-fs:1 (Started rhos4-node3) due to required keystone-clone running + * Stop glance-registry:1 (Started rhos4-node3) due to required glance-fs:1 start + * Stop glance-api:1 (Started rhos4-node3) due to required glance-registry:1 start + * Stop cinder-api:0 (Started rhos4-node4) due to required glance-clone running + * Stop cinder-scheduler:0 (Started rhos4-node4) due to required cinder-api:0 start + * Stop cinder-volume:0 (Started rhos4-node4) due to required cinder-scheduler:0 start + * Stop cinder-api:1 (Started rhos4-node3) due to required glance-clone running + * Stop cinder-scheduler:1 (Started rhos4-node3) due to required cinder-api:1 start + * Stop cinder-volume:1 (Started rhos4-node3) due to required cinder-scheduler:1 start Executing cluster transition: * Pseudo action: cinder-clone_stop_0 * Pseudo action: cinder:0_stop_0 * Resource action: cinder-volume stop on rhos4-node4 * Pseudo action: cinder:1_stop_0 * Resource action: cinder-volume stop on rhos4-node3 * Resource action: cinder-scheduler stop on rhos4-node4 * Resource action: cinder-scheduler stop on rhos4-node3 * Resource action: cinder-api stop on rhos4-node4 * Resource action: cinder-api stop on rhos4-node3 * Pseudo action: cinder:0_stopped_0 * Pseudo action: cinder:1_stopped_0 * Pseudo action: cinder-clone_stopped_0 * Pseudo action: glance-clone_stop_0 * Pseudo action: glance:0_stop_0 * Resource action: glance-api stop on rhos4-node4 * Pseudo action: glance:1_stop_0 * Resource action: glance-api stop on rhos4-node3 * Resource action: glance-registry stop on rhos4-node4 * Resource action: glance-registry stop on rhos4-node3 * Resource action: glance-fs stop on rhos4-node4 * Resource action: glance-fs stop on rhos4-node3 * Pseudo action: glance:0_stopped_0 * Pseudo action: glance:1_stopped_0 * Pseudo action: glance-clone_stopped_0 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhos4-node4 * Resource action: keystone stop on rhos4-node3 * Pseudo action: keystone-clone_stopped_0 * Pseudo action: qpidd-clone_stop_0 * Resource action: qpidd stop on rhos4-node4 * Resource action: qpidd stop on rhos4-node3 * Pseudo action: qpidd-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ rhos4-node3 rhos4-node4 ] virt-fencing (stonith:fence_xvm): Started rhos4-node3 Resource Group: mysql-group mysql-vip (ocf::heartbeat:IPaddr2): Started rhos4-node3 mysql-fs (ocf::heartbeat:Filesystem): Started rhos4-node3 mysql-db (ocf::heartbeat:mysql): Started rhos4-node3 Clone Set: qpidd-clone [qpidd] Stopped (disabled): [ rhos4-node3 rhos4-node4 ] Clone Set: keystone-clone [keystone] Stopped: [ rhos4-node3 rhos4-node4 ] Clone Set: glance-clone [glance] Stopped: [ rhos4-node3 rhos4-node4 ] Clone Set: cinder-clone [cinder] Stopped: [ rhos4-node3 rhos4-node4 ] diff --git a/pengine/test10/cloned-group.summary b/pengine/test10/cloned-group.summary index 7d64be4b2e..e1456b9f69 100644 --- a/pengine/test10/cloned-group.summary +++ b/pengine/test10/cloned-group.summary @@ -1,47 +1,47 @@ Current cluster status: Online: [ webcluster01 ] OFFLINE: [ webcluster02 ] Clone Set: apache2_clone [grrr] Resource Group: grrr:2 apache2 (ocf::heartbeat:apache): ORPHANED Started webcluster01 mysql-proxy (lsb:mysql-proxy): ORPHANED Started webcluster01 Started: [ webcluster01 ] Stopped: [ webcluster02 ] Transition Summary: * Restart apache2:0 (Started webcluster01) - * Restart mysql-proxy:0 (Started webcluster01) - * Stop apache2:2 (webcluster01) - * Stop mysql-proxy:2 (webcluster01) + * Restart mysql-proxy:0 (Started webcluster01) due to required apache2:0 start + * Stop apache2:2 (webcluster01) due to node availability + * Stop mysql-proxy:2 (webcluster01) due to node availability Executing cluster transition: * Pseudo action: apache2_clone_stop_0 * Pseudo action: grrr:0_stop_0 * Resource action: mysql-proxy:1 stop on webcluster01 * Pseudo action: grrr:2_stop_0 * Resource action: mysql-proxy:0 stop on webcluster01 * Resource action: apache2:1 stop on webcluster01 * Resource action: apache2:0 stop on webcluster01 * Pseudo action: all_stopped * Pseudo action: grrr:0_stopped_0 * Pseudo action: grrr:2_stopped_0 * Pseudo action: apache2_clone_stopped_0 * Pseudo action: apache2_clone_start_0 * Pseudo action: grrr:0_start_0 * Resource action: apache2:1 start on webcluster01 * Resource action: apache2:1 monitor=10000 on webcluster01 * Resource action: mysql-proxy:1 start on webcluster01 * Resource action: mysql-proxy:1 monitor=10000 on webcluster01 * Pseudo action: grrr:0_running_0 * Pseudo action: apache2_clone_running_0 Revised cluster status: Online: [ webcluster01 ] OFFLINE: [ webcluster02 ] Clone Set: apache2_clone [grrr] Started: [ webcluster01 ] Stopped: [ webcluster02 ] diff --git a/pengine/test10/cloned_start_one.summary b/pengine/test10/cloned_start_one.summary index 20ac58f85d..5dedc18dd8 100644 --- a/pengine/test10/cloned_start_one.summary +++ b/pengine/test10/cloned_start_one.summary @@ -1,41 +1,41 @@ Current cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto3 c7auto4 ] Stopped: [ c7auto1 c7auto2 ] Transition Summary: * Start FAKECLONE:0 (c7auto1) - * Stop FAKECLONE2:0 (c7auto3) - * Stop FAKECLONE2:1 (Started c7auto4) + * Stop FAKECLONE2:0 (c7auto3) due to node availability + * Stop FAKECLONE2:1 (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory Executing cluster transition: * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: FAKECLONE2-clone_stop_0 * Resource action: FAKECLONE start on c7auto1 * Pseudo action: FAKECLONE-clone_running_0 * Resource action: FAKECLONE2 stop on c7auto3 * Resource action: FAKECLONE2 stop on c7auto4 * Pseudo action: FAKECLONE2-clone_stopped_0 * Pseudo action: all_stopped * Resource action: FAKECLONE monitor=10000 on c7auto1 Revised cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] diff --git a/pengine/test10/cloned_start_two.summary b/pengine/test10/cloned_start_two.summary index bea4609013..2b47881c9d 100644 --- a/pengine/test10/cloned_start_two.summary +++ b/pengine/test10/cloned_start_two.summary @@ -1,42 +1,42 @@ Current cluster status: Node c7auto3 (3): standby Online: [ c7auto1 c7auto2 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto3 c7auto4 ] Stopped: [ c7auto1 c7auto2 ] Transition Summary: * Start FAKECLONE:0 (c7auto2) * Start FAKECLONE:1 (c7auto1) - * Stop FAKECLONE2:0 (c7auto3) + * Stop FAKECLONE2:0 (c7auto3) due to node availability Executing cluster transition: * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: FAKECLONE2-clone_stop_0 * Resource action: FAKECLONE start on c7auto2 * Resource action: FAKECLONE start on c7auto1 * Pseudo action: FAKECLONE-clone_running_0 * Resource action: FAKECLONE2 stop on c7auto3 * Pseudo action: FAKECLONE2-clone_stopped_0 * Pseudo action: all_stopped * Pseudo action: clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory * Resource action: FAKECLONE monitor=10000 on c7auto2 * Resource action: FAKECLONE monitor=10000 on c7auto1 Revised cluster status: Node c7auto3 (3): standby Online: [ c7auto1 c7auto2 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 ] Stopped: [ c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto4 ] Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/cloned_stop_one.summary b/pengine/test10/cloned_stop_one.summary index 1a952a2303..892548d79f 100644 --- a/pengine/test10/cloned_stop_one.summary +++ b/pengine/test10/cloned_stop_one.summary @@ -1,40 +1,40 @@ Current cluster status: Node c7auto3 (3): standby Online: [ c7auto1 c7auto2 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto3 c7auto4 ] Stopped: [ c7auto1 c7auto2 ] Transition Summary: - * Stop FAKECLONE:2 (c7auto3) - * Stop FAKECLONE2:0 (c7auto3) + * Stop FAKECLONE:2 (c7auto3) due to node availability + * Stop FAKECLONE2:0 (c7auto3) due to node availability Executing cluster transition: * Pseudo action: FAKECLONE2-clone_stop_0 * Resource action: FAKECLONE2 stop on c7auto3 * Pseudo action: FAKECLONE2-clone_stopped_0 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: FAKECLONE stop on c7auto3 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKECLONE-clone_running_0 Revised cluster status: Node c7auto3 (3): standby Online: [ c7auto1 c7auto2 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 ] Stopped: [ c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto4 ] Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/pengine/test10/cloned_stop_two.summary b/pengine/test10/cloned_stop_two.summary index 531295f47b..8f1e0398ea 100644 --- a/pengine/test10/cloned_stop_two.summary +++ b/pengine/test10/cloned_stop_two.summary @@ -1,45 +1,45 @@ Current cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 c7auto2 c7auto3 ] Stopped: [ c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Started: [ c7auto3 c7auto4 ] Stopped: [ c7auto1 c7auto2 ] Transition Summary: - * Stop FAKECLONE:1 (c7auto2) - * Stop FAKECLONE:2 (c7auto3) - * Stop FAKECLONE2:0 (c7auto3) - * Stop FAKECLONE2:1 (Started c7auto4) + * Stop FAKECLONE:1 (c7auto2) due to node availability + * Stop FAKECLONE:2 (c7auto3) due to node availability + * Stop FAKECLONE2:0 (c7auto3) due to node availability + * Stop FAKECLONE2:1 (Started c7auto4) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory Executing cluster transition: * Pseudo action: FAKECLONE2-clone_stop_0 * Resource action: FAKECLONE2 stop on c7auto3 * Resource action: FAKECLONE2 stop on c7auto4 * Pseudo action: FAKECLONE2-clone_stopped_0 * Pseudo action: FAKECLONE-clone_stop_0 * Resource action: FAKECLONE stop on c7auto2 * Resource action: FAKECLONE stop on c7auto3 * Pseudo action: FAKECLONE-clone_stopped_0 * Pseudo action: FAKECLONE-clone_start_0 * Pseudo action: all_stopped * Pseudo action: FAKECLONE-clone_running_0 Revised cluster status: Node c7auto2 (2): standby Node c7auto3 (3): standby Online: [ c7auto1 c7auto4 ] shooter (stonith:fence_phd_kvm): Started c7auto1 Clone Set: FAKECLONE-clone [FAKECLONE] Started: [ c7auto1 ] Stopped: [ c7auto2 c7auto3 c7auto4 ] Clone Set: FAKECLONE2-clone [FAKECLONE2] Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] diff --git a/pengine/test10/colocation_constraint_stops_slave.summary b/pengine/test10/colocation_constraint_stops_slave.summary index fe9e044025..a97b74b39a 100644 --- a/pengine/test10/colocation_constraint_stops_slave.summary +++ b/pengine/test10/colocation_constraint_stops_slave.summary @@ -1,34 +1,34 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Slaves: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop NATIVE_RSC_A:0 (fc16-builder) + * Stop NATIVE_RSC_A:0 (fc16-builder) due to node availability * Stop NATIVE_RSC_B (fc16-builder) Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_B stop on fc16-builder * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/complex_enforce_colo.summary b/pengine/test10/complex_enforce_colo.summary index dd838b26f0..a21d5c1330 100644 --- a/pengine/test10/complex_enforce_colo.summary +++ b/pengine/test10/complex_enforce_colo.summary @@ -1,453 +1,453 @@ 3 of 132 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] node1-fence (stonith:fence_xvm): Started rhos6-node1 node2-fence (stonith:fence_xvm): Started rhos6-node2 node3-fence (stonith:fence_xvm): Started rhos6-node3 Clone Set: lb-haproxy-clone [lb-haproxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] vip-db (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-rabbitmq (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-qpid (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-keystone (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-glance (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-cinder (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-swift (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-neutron (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-nova (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-horizon (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-heat (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-ceilometer (ocf::heartbeat:IPaddr2): Started rhos6-node3 Master/Slave Set: galera-master [galera] Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: rabbitmq-server-clone [rabbitmq-server] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: memcached-clone [memcached] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: mongodb-clone [mongodb] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: keystone-clone [keystone] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-fs-clone [glance-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-registry-clone [glance-registry] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-api-clone [glance-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] cinder-api (systemd:openstack-cinder-api): Started rhos6-node1 cinder-scheduler (systemd:openstack-cinder-scheduler): Started rhos6-node1 cinder-volume (systemd:openstack-cinder-volume): Started rhos6-node1 Clone Set: swift-fs-clone [swift-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-account-clone [swift-account] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-container-clone [swift-container] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-object-clone [swift-object] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-proxy-clone [swift-proxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] swift-object-expirer (systemd:openstack-swift-object-expirer): Started rhos6-node2 Clone Set: neutron-server-clone [neutron-server] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-scale-clone [neutron-scale] (unique) neutron-scale:0 (ocf::neutron:NeutronScale): Started rhos6-node3 neutron-scale:1 (ocf::neutron:NeutronScale): Started rhos6-node2 neutron-scale:2 (ocf::neutron:NeutronScale): Started rhos6-node1 Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-consoleauth-clone [nova-consoleauth] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-novncproxy-clone [nova-novncproxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-api-clone [nova-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-scheduler-clone [nova-scheduler] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-conductor-clone [nova-conductor] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] ceilometer-central (systemd:openstack-ceilometer-central): Started rhos6-node3 Clone Set: ceilometer-collector-clone [ceilometer-collector] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-api-clone [ceilometer-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-delay-clone [ceilometer-delay] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-notification-clone [ceilometer-notification] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-clone [heat-api] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cfn-clone [heat-api-cfn] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] heat-engine (systemd:openstack-heat-engine): Started rhos6-node2 Clone Set: horizon-clone [horizon] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Transition Summary: - * Stop keystone:0 (rhos6-node1) - * Stop keystone:1 (rhos6-node2) - * Stop keystone:2 (rhos6-node3) + * Stop keystone:0 (rhos6-node1) due to node availability + * Stop keystone:1 (rhos6-node2) due to node availability + * Stop keystone:2 (rhos6-node3) due to node availability * Stop glance-registry:0 (rhos6-node1) * Stop glance-registry:1 (rhos6-node2) * Stop glance-registry:2 (rhos6-node3) * Stop glance-api:0 (rhos6-node1) * Stop glance-api:1 (rhos6-node2) * Stop glance-api:2 (rhos6-node3) - * Stop cinder-api (Started rhos6-node1) - * Stop cinder-scheduler (Started rhos6-node1) + * Stop cinder-api (Started rhos6-node1) due to unrunnable keystone-clone running + * Stop cinder-scheduler (Started rhos6-node1) due to required cinder-api start * Stop cinder-volume (Started rhos6-node1) * Stop swift-account:0 (rhos6-node1) * Stop swift-account:1 (rhos6-node2) * Stop swift-account:2 (rhos6-node3) * Stop swift-container:0 (rhos6-node1) * Stop swift-container:1 (rhos6-node2) * Stop swift-container:2 (rhos6-node3) * Stop swift-object:0 (rhos6-node1) * Stop swift-object:1 (rhos6-node2) * Stop swift-object:2 (rhos6-node3) * Stop swift-proxy:0 (rhos6-node1) * Stop swift-proxy:1 (rhos6-node2) * Stop swift-proxy:2 (rhos6-node3) - * Stop swift-object-expirer (Started rhos6-node2) + * Stop swift-object-expirer (Started rhos6-node2) due to required swift-proxy-clone running * Stop neutron-server:0 (rhos6-node1) * Stop neutron-server:1 (rhos6-node2) * Stop neutron-server:2 (rhos6-node3) * Stop neutron-scale:0 (rhos6-node3) * Stop neutron-scale:1 (rhos6-node2) * Stop neutron-scale:2 (rhos6-node1) * Stop neutron-ovs-cleanup:0 (rhos6-node1) * Stop neutron-ovs-cleanup:1 (rhos6-node2) * Stop neutron-ovs-cleanup:2 (rhos6-node3) * Stop neutron-netns-cleanup:0 (rhos6-node1) * Stop neutron-netns-cleanup:1 (rhos6-node2) * Stop neutron-netns-cleanup:2 (rhos6-node3) * Stop neutron-openvswitch-agent:0 (rhos6-node1) * Stop neutron-openvswitch-agent:1 (rhos6-node2) * Stop neutron-openvswitch-agent:2 (rhos6-node3) * Stop neutron-dhcp-agent:0 (rhos6-node1) * Stop neutron-dhcp-agent:1 (rhos6-node2) * Stop neutron-dhcp-agent:2 (rhos6-node3) * Stop neutron-l3-agent:0 (rhos6-node1) * Stop neutron-l3-agent:1 (rhos6-node2) * Stop neutron-l3-agent:2 (rhos6-node3) * Stop neutron-metadata-agent:0 (rhos6-node1) * Stop neutron-metadata-agent:1 (rhos6-node2) * Stop neutron-metadata-agent:2 (rhos6-node3) * Stop nova-consoleauth:0 (rhos6-node1) * Stop nova-consoleauth:1 (rhos6-node2) * Stop nova-consoleauth:2 (rhos6-node3) * Stop nova-novncproxy:0 (rhos6-node1) * Stop nova-novncproxy:1 (rhos6-node2) * Stop nova-novncproxy:2 (rhos6-node3) * Stop nova-api:0 (rhos6-node1) * Stop nova-api:1 (rhos6-node2) * Stop nova-api:2 (rhos6-node3) * Stop nova-scheduler:0 (rhos6-node1) * Stop nova-scheduler:1 (rhos6-node2) * Stop nova-scheduler:2 (rhos6-node3) * Stop nova-conductor:0 (rhos6-node1) * Stop nova-conductor:1 (rhos6-node2) * Stop nova-conductor:2 (rhos6-node3) - * Stop ceilometer-central (Started rhos6-node3) - * Stop ceilometer-collector:0 (Started rhos6-node1) - * Stop ceilometer-collector:1 (Started rhos6-node2) - * Stop ceilometer-collector:2 (Started rhos6-node3) - * Stop ceilometer-api:0 (Started rhos6-node1) - * Stop ceilometer-api:1 (Started rhos6-node2) - * Stop ceilometer-api:2 (Started rhos6-node3) - * Stop ceilometer-delay:0 (Started rhos6-node1) - * Stop ceilometer-delay:1 (Started rhos6-node2) - * Stop ceilometer-delay:2 (Started rhos6-node3) - * Stop ceilometer-alarm-evaluator:0 (Started rhos6-node1) - * Stop ceilometer-alarm-evaluator:1 (Started rhos6-node2) - * Stop ceilometer-alarm-evaluator:2 (Started rhos6-node3) - * Stop ceilometer-alarm-notifier:0 (Started rhos6-node1) - * Stop ceilometer-alarm-notifier:1 (Started rhos6-node2) - * Stop ceilometer-alarm-notifier:2 (Started rhos6-node3) - * Stop ceilometer-notification:0 (Started rhos6-node1) - * Stop ceilometer-notification:1 (Started rhos6-node2) - * Stop ceilometer-notification:2 (Started rhos6-node3) - * Stop heat-api:0 (Started rhos6-node1) - * Stop heat-api:1 (Started rhos6-node2) - * Stop heat-api:2 (Started rhos6-node3) - * Stop heat-api-cfn:0 (Started rhos6-node1) - * Stop heat-api-cfn:1 (Started rhos6-node2) - * Stop heat-api-cfn:2 (Started rhos6-node3) - * Stop heat-api-cloudwatch:0 (Started rhos6-node1) - * Stop heat-api-cloudwatch:1 (Started rhos6-node2) - * Stop heat-api-cloudwatch:2 (Started rhos6-node3) - * Stop heat-engine (Started rhos6-node2) + * Stop ceilometer-central (Started rhos6-node3) due to unrunnable keystone-clone running + * Stop ceilometer-collector:0 (Started rhos6-node1) due to required ceilometer-central start + * Stop ceilometer-collector:1 (Started rhos6-node2) due to required ceilometer-central start + * Stop ceilometer-collector:2 (Started rhos6-node3) due to required ceilometer-central start + * Stop ceilometer-api:0 (Started rhos6-node1) due to required ceilometer-collector:0 start + * Stop ceilometer-api:1 (Started rhos6-node2) due to required ceilometer-collector:1 start + * Stop ceilometer-api:2 (Started rhos6-node3) due to required ceilometer-collector:2 start + * Stop ceilometer-delay:0 (Started rhos6-node1) due to required ceilometer-api:0 start + * Stop ceilometer-delay:1 (Started rhos6-node2) due to required ceilometer-api:1 start + * Stop ceilometer-delay:2 (Started rhos6-node3) due to required ceilometer-api:2 start + * Stop ceilometer-alarm-evaluator:0 (Started rhos6-node1) due to required ceilometer-delay:0 start + * Stop ceilometer-alarm-evaluator:1 (Started rhos6-node2) due to required ceilometer-delay:1 start + * Stop ceilometer-alarm-evaluator:2 (Started rhos6-node3) due to required ceilometer-delay:2 start + * Stop ceilometer-alarm-notifier:0 (Started rhos6-node1) due to required ceilometer-alarm-evaluator:0 start + * Stop ceilometer-alarm-notifier:1 (Started rhos6-node2) due to required ceilometer-alarm-evaluator:1 start + * Stop ceilometer-alarm-notifier:2 (Started rhos6-node3) due to required ceilometer-alarm-evaluator:2 start + * Stop ceilometer-notification:0 (Started rhos6-node1) due to required ceilometer-alarm-notifier:0 start + * Stop ceilometer-notification:1 (Started rhos6-node2) due to required ceilometer-alarm-notifier:1 start + * Stop ceilometer-notification:2 (Started rhos6-node3) due to required ceilometer-alarm-notifier:2 start + * Stop heat-api:0 (Started rhos6-node1) due to required ceilometer-notification:0 start + * Stop heat-api:1 (Started rhos6-node2) due to required ceilometer-notification:1 start + * Stop heat-api:2 (Started rhos6-node3) due to required ceilometer-notification:2 start + * Stop heat-api-cfn:0 (Started rhos6-node1) due to required heat-api:0 start + * Stop heat-api-cfn:1 (Started rhos6-node2) due to required heat-api:1 start + * Stop heat-api-cfn:2 (Started rhos6-node3) due to required heat-api:2 start + * Stop heat-api-cloudwatch:0 (Started rhos6-node1) due to required heat-api-cfn:0 start + * Stop heat-api-cloudwatch:1 (Started rhos6-node2) due to required heat-api-cfn:1 start + * Stop heat-api-cloudwatch:2 (Started rhos6-node3) due to required heat-api-cfn:2 start + * Stop heat-engine (Started rhos6-node2) due to required heat-api-cloudwatch-clone running Executing cluster transition: * Pseudo action: glance-api-clone_stop_0 * Resource action: cinder-volume stop on rhos6-node1 * Pseudo action: swift-object-clone_stop_0 * Resource action: swift-object-expirer stop on rhos6-node2 * Pseudo action: neutron-metadata-agent-clone_stop_0 * Pseudo action: nova-conductor-clone_stop_0 * Resource action: heat-engine stop on rhos6-node2 * Resource action: glance-api stop on rhos6-node1 * Resource action: glance-api stop on rhos6-node2 * Resource action: glance-api stop on rhos6-node3 * Pseudo action: glance-api-clone_stopped_0 * Resource action: cinder-scheduler stop on rhos6-node1 * Resource action: swift-object stop on rhos6-node1 * Resource action: swift-object stop on rhos6-node2 * Resource action: swift-object stop on rhos6-node3 * Pseudo action: swift-object-clone_stopped_0 * Pseudo action: swift-proxy-clone_stop_0 * Resource action: neutron-metadata-agent stop on rhos6-node1 * Resource action: neutron-metadata-agent stop on rhos6-node2 * Resource action: neutron-metadata-agent stop on rhos6-node3 * Pseudo action: neutron-metadata-agent-clone_stopped_0 * Resource action: nova-conductor stop on rhos6-node1 * Resource action: nova-conductor stop on rhos6-node2 * Resource action: nova-conductor stop on rhos6-node3 * Pseudo action: nova-conductor-clone_stopped_0 * Pseudo action: heat-api-cloudwatch-clone_stop_0 * Pseudo action: glance-registry-clone_stop_0 * Resource action: cinder-api stop on rhos6-node1 * Pseudo action: swift-container-clone_stop_0 * Resource action: swift-proxy stop on rhos6-node1 * Resource action: swift-proxy stop on rhos6-node2 * Resource action: swift-proxy stop on rhos6-node3 * Pseudo action: swift-proxy-clone_stopped_0 * Pseudo action: neutron-l3-agent-clone_stop_0 * Pseudo action: nova-scheduler-clone_stop_0 * Resource action: heat-api-cloudwatch stop on rhos6-node1 * Resource action: heat-api-cloudwatch stop on rhos6-node2 * Resource action: heat-api-cloudwatch stop on rhos6-node3 * Pseudo action: heat-api-cloudwatch-clone_stopped_0 * Resource action: glance-registry stop on rhos6-node1 * Resource action: glance-registry stop on rhos6-node2 * Resource action: glance-registry stop on rhos6-node3 * Pseudo action: glance-registry-clone_stopped_0 * Resource action: swift-container stop on rhos6-node1 * Resource action: swift-container stop on rhos6-node2 * Resource action: swift-container stop on rhos6-node3 * Pseudo action: swift-container-clone_stopped_0 * Resource action: neutron-l3-agent stop on rhos6-node1 * Resource action: neutron-l3-agent stop on rhos6-node2 * Resource action: neutron-l3-agent stop on rhos6-node3 * Pseudo action: neutron-l3-agent-clone_stopped_0 * Resource action: nova-scheduler stop on rhos6-node1 * Resource action: nova-scheduler stop on rhos6-node2 * Resource action: nova-scheduler stop on rhos6-node3 * Pseudo action: nova-scheduler-clone_stopped_0 * Pseudo action: heat-api-cfn-clone_stop_0 * Pseudo action: swift-account-clone_stop_0 * Pseudo action: neutron-dhcp-agent-clone_stop_0 * Pseudo action: nova-api-clone_stop_0 * Resource action: heat-api-cfn stop on rhos6-node1 * Resource action: heat-api-cfn stop on rhos6-node2 * Resource action: heat-api-cfn stop on rhos6-node3 * Pseudo action: heat-api-cfn-clone_stopped_0 * Resource action: swift-account stop on rhos6-node1 * Resource action: swift-account stop on rhos6-node2 * Resource action: swift-account stop on rhos6-node3 * Pseudo action: swift-account-clone_stopped_0 * Resource action: neutron-dhcp-agent stop on rhos6-node1 * Resource action: neutron-dhcp-agent stop on rhos6-node2 * Resource action: neutron-dhcp-agent stop on rhos6-node3 * Pseudo action: neutron-dhcp-agent-clone_stopped_0 * Resource action: nova-api stop on rhos6-node1 * Resource action: nova-api stop on rhos6-node2 * Resource action: nova-api stop on rhos6-node3 * Pseudo action: nova-api-clone_stopped_0 * Pseudo action: heat-api-clone_stop_0 * Pseudo action: neutron-openvswitch-agent-clone_stop_0 * Pseudo action: nova-novncproxy-clone_stop_0 * Resource action: heat-api stop on rhos6-node1 * Resource action: heat-api stop on rhos6-node2 * Resource action: heat-api stop on rhos6-node3 * Pseudo action: heat-api-clone_stopped_0 * Resource action: neutron-openvswitch-agent stop on rhos6-node1 * Resource action: neutron-openvswitch-agent stop on rhos6-node2 * Resource action: neutron-openvswitch-agent stop on rhos6-node3 * Pseudo action: neutron-openvswitch-agent-clone_stopped_0 * Resource action: nova-novncproxy stop on rhos6-node1 * Resource action: nova-novncproxy stop on rhos6-node2 * Resource action: nova-novncproxy stop on rhos6-node3 * Pseudo action: nova-novncproxy-clone_stopped_0 * Pseudo action: ceilometer-notification-clone_stop_0 * Pseudo action: neutron-netns-cleanup-clone_stop_0 * Pseudo action: nova-consoleauth-clone_stop_0 * Resource action: ceilometer-notification stop on rhos6-node1 * Resource action: ceilometer-notification stop on rhos6-node2 * Resource action: ceilometer-notification stop on rhos6-node3 * Pseudo action: ceilometer-notification-clone_stopped_0 * Resource action: neutron-netns-cleanup stop on rhos6-node1 * Resource action: neutron-netns-cleanup stop on rhos6-node2 * Resource action: neutron-netns-cleanup stop on rhos6-node3 * Pseudo action: neutron-netns-cleanup-clone_stopped_0 * Resource action: nova-consoleauth stop on rhos6-node1 * Resource action: nova-consoleauth stop on rhos6-node2 * Resource action: nova-consoleauth stop on rhos6-node3 * Pseudo action: nova-consoleauth-clone_stopped_0 * Pseudo action: ceilometer-alarm-notifier-clone_stop_0 * Pseudo action: neutron-ovs-cleanup-clone_stop_0 * Resource action: ceilometer-alarm-notifier stop on rhos6-node1 * Resource action: ceilometer-alarm-notifier stop on rhos6-node2 * Resource action: ceilometer-alarm-notifier stop on rhos6-node3 * Pseudo action: ceilometer-alarm-notifier-clone_stopped_0 * Resource action: neutron-ovs-cleanup stop on rhos6-node1 * Resource action: neutron-ovs-cleanup stop on rhos6-node2 * Resource action: neutron-ovs-cleanup stop on rhos6-node3 * Pseudo action: neutron-ovs-cleanup-clone_stopped_0 * Pseudo action: ceilometer-alarm-evaluator-clone_stop_0 * Pseudo action: neutron-scale-clone_stop_0 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node1 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node2 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node3 * Pseudo action: ceilometer-alarm-evaluator-clone_stopped_0 * Resource action: neutron-scale:0 stop on rhos6-node3 * Resource action: neutron-scale:1 stop on rhos6-node2 * Resource action: neutron-scale:2 stop on rhos6-node1 * Pseudo action: neutron-scale-clone_stopped_0 * Pseudo action: ceilometer-delay-clone_stop_0 * Pseudo action: neutron-server-clone_stop_0 * Resource action: ceilometer-delay stop on rhos6-node1 * Resource action: ceilometer-delay stop on rhos6-node2 * Resource action: ceilometer-delay stop on rhos6-node3 * Pseudo action: ceilometer-delay-clone_stopped_0 * Resource action: neutron-server stop on rhos6-node1 * Resource action: neutron-server stop on rhos6-node2 * Resource action: neutron-server stop on rhos6-node3 * Pseudo action: neutron-server-clone_stopped_0 * Pseudo action: ceilometer-api-clone_stop_0 * Resource action: ceilometer-api stop on rhos6-node1 * Resource action: ceilometer-api stop on rhos6-node2 * Resource action: ceilometer-api stop on rhos6-node3 * Pseudo action: ceilometer-api-clone_stopped_0 * Pseudo action: ceilometer-collector-clone_stop_0 * Resource action: ceilometer-collector stop on rhos6-node1 * Resource action: ceilometer-collector stop on rhos6-node2 * Resource action: ceilometer-collector stop on rhos6-node3 * Pseudo action: ceilometer-collector-clone_stopped_0 * Resource action: ceilometer-central stop on rhos6-node3 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhos6-node1 * Resource action: keystone stop on rhos6-node2 * Resource action: keystone stop on rhos6-node3 * Pseudo action: keystone-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] node1-fence (stonith:fence_xvm): Started rhos6-node1 node2-fence (stonith:fence_xvm): Started rhos6-node2 node3-fence (stonith:fence_xvm): Started rhos6-node3 Clone Set: lb-haproxy-clone [lb-haproxy] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] vip-db (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-rabbitmq (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-qpid (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-keystone (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-glance (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-cinder (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-swift (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-neutron (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-nova (ocf::heartbeat:IPaddr2): Started rhos6-node3 vip-horizon (ocf::heartbeat:IPaddr2): Started rhos6-node1 vip-heat (ocf::heartbeat:IPaddr2): Started rhos6-node2 vip-ceilometer (ocf::heartbeat:IPaddr2): Started rhos6-node3 Master/Slave Set: galera-master [galera] Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: rabbitmq-server-clone [rabbitmq-server] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: memcached-clone [memcached] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: mongodb-clone [mongodb] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: keystone-clone [keystone] Stopped (disabled): [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-fs-clone [glance-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-registry-clone [glance-registry] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: glance-api-clone [glance-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] cinder-api (systemd:openstack-cinder-api): Stopped cinder-scheduler (systemd:openstack-cinder-scheduler): Stopped cinder-volume (systemd:openstack-cinder-volume): Stopped Clone Set: swift-fs-clone [swift-fs] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-account-clone [swift-account] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-container-clone [swift-container] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-object-clone [swift-object] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: swift-proxy-clone [swift-proxy] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped Clone Set: neutron-server-clone [neutron-server] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-scale-clone [neutron-scale] (unique) neutron-scale:0 (ocf::neutron:NeutronScale): Stopped neutron-scale:1 (ocf::neutron:NeutronScale): Stopped neutron-scale:2 (ocf::neutron:NeutronScale): Stopped Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-consoleauth-clone [nova-consoleauth] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-novncproxy-clone [nova-novncproxy] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-api-clone [nova-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-scheduler-clone [nova-scheduler] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: nova-conductor-clone [nova-conductor] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] ceilometer-central (systemd:openstack-ceilometer-central): Stopped Clone Set: ceilometer-collector-clone [ceilometer-collector] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-api-clone [ceilometer-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-delay-clone [ceilometer-delay] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: ceilometer-notification-clone [ceilometer-notification] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-clone [heat-api] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cfn-clone [heat-api-cfn] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch] Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] heat-engine (systemd:openstack-heat-engine): Stopped Clone Set: horizon-clone [horizon] Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] diff --git a/pengine/test10/container-2.summary b/pengine/test10/container-2.summary index b0c08b88ec..f011cd3a98 100644 --- a/pengine/test10/container-2.summary +++ b/pengine/test10/container-2.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Restart container1 (Started node1) * Recover rsc1 (Started node1) - * Restart rsc2 (Started node1) + * Restart rsc2 (Started node1) due to required container1 start Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 start on node1 * Resource action: rsc2 monitor=5000 on node1 Revised cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/container-3.summary b/pengine/test10/container-3.summary index 194c68727f..f853ab2098 100644 --- a/pengine/test10/container-3.summary +++ b/pengine/test10/container-3.summary @@ -1,31 +1,31 @@ Current cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED (failure ignored) rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Restart container1 (Started node1) * Start rsc1 (node1) - * Restart rsc2 (Started node1) + * Restart rsc2 (Started node1) due to required container1 start Executing cluster transition: * Resource action: rsc2 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 * Resource action: rsc2 monitor=5000 on node1 * Resource action: rsc1 monitor=10000 on node1 Revised cluster status: Online: [ node1 node2 ] container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 (failure ignored) rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/container-group-2.summary b/pengine/test10/container-group-2.summary index 4451c63cf7..c0dbbf8bdb 100644 --- a/pengine/test10/container-group-2.summary +++ b/pengine/test10/container-group-2.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): FAILED node1 rsc2 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Restart container1 (Started node1) * Recover rsc1 (Started node1) - * Restart rsc2 (Started node1) + * Restart rsc2 (Started node1) due to required rsc1 start Executing cluster transition: * Pseudo action: container-group_stop_0 * Resource action: rsc2 stop on node1 * Resource action: rsc1 stop on node1 * Resource action: container1 stop on node1 * Pseudo action: all_stopped * Pseudo action: container-group_stopped_0 * Pseudo action: container-group_start_0 * Resource action: container1 start on node1 * Resource action: container1 monitor=20000 on node1 * Resource action: rsc1 start on node1 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 start on node1 * Resource action: rsc2 monitor=5000 on node1 * Pseudo action: container-group_running_0 Revised cluster status: Online: [ node1 node2 ] Resource Group: container-group container1 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node1 diff --git a/pengine/test10/enforce-colo1.summary b/pengine/test10/enforce-colo1.summary index 985907305c..b79b8cbc0b 100644 --- a/pengine/test10/enforce-colo1.summary +++ b/pengine/test10/enforce-colo1.summary @@ -1,37 +1,37 @@ 3 of 6 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto2 engine (ocf::heartbeat:Dummy): Started rhel7-auto3 Clone Set: keystone-clone [keystone] Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] central (ocf::heartbeat:Dummy): Started rhel7-auto3 Transition Summary: * Stop engine (Started rhel7-auto3) - * Stop keystone:0 (rhel7-auto2) - * Stop keystone:1 (rhel7-auto3) - * Stop keystone:2 (rhel7-auto1) - * Stop central (Started rhel7-auto3) + * Stop keystone:0 (rhel7-auto2) due to node availability + * Stop keystone:1 (rhel7-auto3) due to node availability + * Stop keystone:2 (rhel7-auto1) due to node availability + * Stop central (Started rhel7-auto3) due to unrunnable keystone-clone running Executing cluster transition: * Resource action: engine stop on rhel7-auto3 * Resource action: central stop on rhel7-auto3 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhel7-auto2 * Resource action: keystone stop on rhel7-auto3 * Resource action: keystone stop on rhel7-auto1 * Pseudo action: keystone-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] shooter (stonith:fence_xvm): Started rhel7-auto2 engine (ocf::heartbeat:Dummy): Stopped Clone Set: keystone-clone [keystone] Stopped (disabled): [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] central (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/group-fail.summary b/pengine/test10/group-fail.summary index aa03d2121e..9067bf684d 100644 --- a/pengine/test10/group-fail.summary +++ b/pengine/test10/group-fail.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] Resource Group: group1 rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) + * Restart rsc2 (Started node1) due to required rsc1 start * Start rsc3 (node1) - * Restart rsc4 (Started node1) + * Restart rsc4 (Started node1) due to required rsc3 start Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc4 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 * Pseudo action: group1_start_0 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 * Resource action: rsc3 start on node1 * Resource action: rsc4 start on node1 * Pseudo action: group1_running_0 Revised cluster status: Online: [ node1 node2 ] Resource Group: group1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/group-unmanaged-stopped.summary b/pengine/test10/group-unmanaged-stopped.summary index 9f542ff285..de0586acc8 100644 --- a/pengine/test10/group-unmanaged-stopped.summary +++ b/pengine/test10/group-unmanaged-stopped.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 ] Resource Group: group-1 r192.168.122.113 (ocf::heartbeat:IPaddr2): Started pcmk-1 r192.168.122.114 (ocf::heartbeat:IPaddr2): Stopped (unmanaged) r192.168.122.115 (ocf::heartbeat:IPaddr2): Started pcmk-1 Transition Summary: - * Stop r192.168.122.115 (pcmk-1) + * Stop r192.168.122.115 (pcmk-1) due to node availability Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: r192.168.122.115 stop on pcmk-1 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 Revised cluster status: Online: [ pcmk-1 pcmk-2 ] Resource Group: group-1 r192.168.122.113 (ocf::heartbeat:IPaddr2): Started pcmk-1 r192.168.122.114 (ocf::heartbeat:IPaddr2): Stopped (unmanaged) r192.168.122.115 (ocf::heartbeat:IPaddr2): Stopped diff --git a/pengine/test10/group10.summary b/pengine/test10/group10.summary index c630a21b59..570fd28bf3 100644 --- a/pengine/test10/group10.summary +++ b/pengine/test10/group10.summary @@ -1,67 +1,67 @@ Current cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): FAILED c001n01 child_192.168.100.182 (ocf::heartbeat:IPaddr): Started c001n01 child_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Started c001n08 Transition Summary: * Recover child_192.168.100.181 (Started c001n01) - * Restart child_192.168.100.182 (Started c001n01) - * Restart child_192.168.100.183 (Started c001n01) + * Restart child_192.168.100.182 (Started c001n01) due to required child_192.168.100.181 start + * Restart child_192.168.100.183 (Started c001n01) due to required child_192.168.100.182 start Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: child_192.168.100.183 stop on c001n01 * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n02 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n01 * Resource action: child_192.168.100.182 stop on c001n01 * Resource action: child_192.168.100.181 stop on c001n01 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: child_192.168.100.181 start on c001n01 * Resource action: child_192.168.100.181 monitor=5000 on c001n01 * Resource action: child_192.168.100.182 start on c001n01 * Resource action: child_192.168.100.182 monitor=5000 on c001n01 * Resource action: child_192.168.100.183 start on c001n01 * Resource action: child_192.168.100.183 monitor=5000 on c001n01 * Pseudo action: group-1_running_0 Revised cluster status: Online: [ c001n01 c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08 Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n01 child_192.168.100.182 (ocf::heartbeat:IPaddr): Started c001n01 child_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n01 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 child_DoFencing:1 (stonith:ssh): Started c001n02 child_DoFencing:2 (stonith:ssh): Started c001n03 child_DoFencing:3 (stonith:ssh): Started c001n08 diff --git a/pengine/test10/group11.summary b/pengine/test10/group11.summary index 9619e513e1..204d6327de 100644 --- a/pengine/test10/group11.summary +++ b/pengine/test10/group11.summary @@ -1,30 +1,30 @@ 2 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ node1 ] Resource Group: group1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 ( disabled ) rsc3 (heartbeat:apache): Started node1 Transition Summary: - * Stop rsc2 (node1) - * Stop rsc3 (node1) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc3 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 * Pseudo action: group1_start_0 Revised cluster status: Online: [ node1 ] Resource Group: group1 rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Stopped ( disabled ) rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/group13.summary b/pengine/test10/group13.summary index e728b2e581..5f92a4fea8 100644 --- a/pengine/test10/group13.summary +++ b/pengine/test10/group13.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ jamesltc ] Resource Group: nfs resource_nfs (lsb:nfs): Started jamesltc Resource Group: fs resource_fs (ocf::heartbeat:Filesystem): Stopped Transition Summary: - * Stop resource_nfs (jamesltc) + * Stop resource_nfs (jamesltc) due to node availability Executing cluster transition: * Pseudo action: nfs_stop_0 * Resource action: resource_nfs stop on jamesltc * Pseudo action: all_stopped * Pseudo action: nfs_stopped_0 Revised cluster status: Online: [ jamesltc ] Resource Group: nfs resource_nfs (lsb:nfs): Stopped Resource Group: fs resource_fs (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/group14.summary b/pengine/test10/group14.summary index 451aeda17d..69e688bfda 100644 --- a/pengine/test10/group14.summary +++ b/pengine/test10/group14.summary @@ -1,101 +1,101 @@ Current cluster status: Online: [ c001n06 c001n07 ] OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ] DcIPaddr (heartbeat:IPaddr): Stopped Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n06 r192.168.100.182 (ocf::heartbeat:IPaddr): Stopped r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped migrator (ocf::heartbeat:Dummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped Transition Summary: - * Start DcIPaddr (c001n06 - blocked) - * Stop r192.168.100.181 (Started c001n06) - * Start r192.168.100.182 (c001n07 - blocked) - * Start r192.168.100.183 (c001n07 - blocked) - * Start lsb_dummy (c001n06 - blocked) - * Start migrator (c001n06 - blocked) - * Start rsc_c001n03 (c001n06 - blocked) - * Start rsc_c001n02 (c001n07 - blocked) - * Start rsc_c001n04 (c001n06 - blocked) - * Start rsc_c001n05 (c001n07 - blocked) - * Start rsc_c001n06 (c001n06 - blocked) - * Start rsc_c001n07 (c001n07 - blocked) + * Start DcIPaddr (c001n06 - blocked) due to no quorum + * Stop r192.168.100.181 (Started c001n06) due to no quorum + * Start r192.168.100.182 (c001n07 - blocked) due to no quorum + * Start r192.168.100.183 (c001n07 - blocked) due to no quorum + * Start lsb_dummy (c001n06 - blocked) due to no quorum + * Start migrator (c001n06 - blocked) due to no quorum + * Start rsc_c001n03 (c001n06 - blocked) due to no quorum + * Start rsc_c001n02 (c001n07 - blocked) due to no quorum + * Start rsc_c001n04 (c001n06 - blocked) due to no quorum + * Start rsc_c001n05 (c001n07 - blocked) due to no quorum + * Start rsc_c001n06 (c001n06 - blocked) due to no quorum + * Start rsc_c001n07 (c001n07 - blocked) due to no quorum * Start child_DoFencing:0 (c001n06) * Start child_DoFencing:1 (c001n07) - * Start ocf_msdummy:0 (c001n06 - blocked) - * Start ocf_msdummy:1 (c001n07 - blocked) - * Start ocf_msdummy:2 (c001n06 - blocked) - * Start ocf_msdummy:3 (c001n07 - blocked) + * Start ocf_msdummy:0 (c001n06 - blocked) due to no quorum + * Start ocf_msdummy:1 (c001n07 - blocked) due to no quorum + * Start ocf_msdummy:2 (c001n06 - blocked) due to no quorum + * Start ocf_msdummy:3 (c001n07 - blocked) due to no quorum Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: r192.168.100.181 stop on c001n06 * Pseudo action: DoFencing_start_0 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: child_DoFencing:0 start on c001n06 * Resource action: child_DoFencing:1 start on c001n07 * Pseudo action: DoFencing_running_0 * Resource action: child_DoFencing:0 monitor=20000 on c001n06 * Resource action: child_DoFencing:1 monitor=20000 on c001n07 Revised cluster status: Online: [ c001n06 c001n07 ] OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ] DcIPaddr (heartbeat:IPaddr): Stopped Resource Group: group-1 r192.168.100.181 (ocf::heartbeat:IPaddr): Stopped r192.168.100.182 (ocf::heartbeat:IPaddr): Stopped r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped migrator (ocf::heartbeat:Dummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Started: [ c001n06 c001n07 ] Stopped: [ c001n02 c001n03 c001n04 c001n05 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/group9.summary b/pengine/test10/group9.summary index 6989f1d755..f6755b9bc3 100644 --- a/pengine/test10/group9.summary +++ b/pengine/test10/group9.summary @@ -1,65 +1,65 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 Resource Group: foo rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): FAILED node1 rsc5 (heartbeat:apache): Started node1 Resource Group: bar rsc6 (heartbeat:apache): Started node1 rsc7 (heartbeat:apache): FAILED node1 rsc8 (heartbeat:apache): Started node1 Transition Summary: * Recover rsc4 (Started node1) - * Restart rsc5 (Started node1) + * Restart rsc5 (Started node1) due to required rsc4 start * Move rsc6 (Started node1 -> node2) * Recover rsc7 (Started node1 -> node2) * Move rsc8 (Started node1 -> node2) Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Pseudo action: foo_stop_0 * Resource action: rsc3 monitor on node2 * Resource action: rsc4 monitor on node2 * Resource action: rsc5 monitor on node2 * Pseudo action: bar_stop_0 * Resource action: rsc6 monitor on node2 * Resource action: rsc7 monitor on node2 * Resource action: rsc8 monitor on node2 * Resource action: rsc5 stop on node1 * Resource action: rsc8 stop on node1 * Resource action: rsc4 stop on node1 * Resource action: rsc7 stop on node1 * Pseudo action: foo_stopped_0 * Pseudo action: foo_start_0 * Resource action: rsc4 start on node1 * Resource action: rsc5 start on node1 * Resource action: rsc6 stop on node1 * Pseudo action: all_stopped * Pseudo action: foo_running_0 * Pseudo action: bar_stopped_0 * Pseudo action: bar_start_0 * Resource action: rsc6 start on node2 * Resource action: rsc7 start on node2 * Resource action: rsc8 start on node2 * Pseudo action: bar_running_0 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 Resource Group: foo rsc3 (heartbeat:apache): Started node1 rsc4 (heartbeat:apache): Started node1 rsc5 (heartbeat:apache): Started node1 Resource Group: bar rsc6 (heartbeat:apache): Started node2 rsc7 (heartbeat:apache): Started node2 rsc8 (heartbeat:apache): Started node2 diff --git a/pengine/test10/inc10.summary b/pengine/test10/inc10.summary index 77552e75da..053a9ba2f6 100644 --- a/pengine/test10/inc10.summary +++ b/pengine/test10/inc10.summary @@ -1,45 +1,45 @@ Current cluster status: Node xen-2 (e3aa8547-3d52-47df-a8a3-ca94538a5282): standby Online: [ xen-1 xen-3 xen-4 ] Clone Set: DoFencing [child_DoFencing] Started: [ xen-1 xen-2 xen-3 xen-4 ] Clone Set: ocfs2-clone [ocfs2] Started: [ xen-1 xen-2 xen-3 xen-4 ] Transition Summary: - * Stop child_DoFencing:1 (xen-2) - * Stop ocfs2:1 (xen-2) + * Stop child_DoFencing:1 (xen-2) due to node availability + * Stop ocfs2:1 (xen-2) due to node availability Executing cluster transition: * Pseudo action: DoFencing_stop_0 * Pseudo action: ocfs2-clone_pre_notify_stop_0 * Resource action: child_DoFencing:2 stop on xen-2 * Pseudo action: DoFencing_stopped_0 * Resource action: ocfs2:1 notify on xen-3 * Resource action: ocfs2:1 notify on xen-2 * Resource action: ocfs2:3 notify on xen-1 * Resource action: ocfs2:0 notify on xen-4 * Pseudo action: ocfs2-clone_confirmed-pre_notify_stop_0 * Pseudo action: ocfs2-clone_stop_0 * Resource action: ocfs2:1 stop on xen-2 * Pseudo action: ocfs2-clone_stopped_0 * Pseudo action: ocfs2-clone_post_notify_stopped_0 * Resource action: ocfs2:1 notify on xen-3 * Resource action: ocfs2:3 notify on xen-1 * Resource action: ocfs2:0 notify on xen-4 * Pseudo action: ocfs2-clone_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node xen-2 (e3aa8547-3d52-47df-a8a3-ca94538a5282): standby Online: [ xen-1 xen-3 xen-4 ] Clone Set: DoFencing [child_DoFencing] Started: [ xen-1 xen-3 xen-4 ] Stopped: [ xen-2 ] Clone Set: ocfs2-clone [ocfs2] Started: [ xen-1 xen-3 xen-4 ] Stopped: [ xen-2 ] diff --git a/pengine/test10/inc12.summary b/pengine/test10/inc12.summary index 5068b7e95a..e950c6727c 100644 --- a/pengine/test10/inc12.summary +++ b/pengine/test10/inc12.summary @@ -1,137 +1,137 @@ Current cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ] Stopped: [ c001n03 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave c001n04 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave c001n04 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave c001n05 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave c001n05 ocf_msdummy:6 (ocf::heartbeat:Stateful): Slave c001n06 ocf_msdummy:7 (ocf::heartbeat:Stateful): Slave c001n06 ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave c001n07 ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave c001n02 ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave c001n02 Transition Summary: * Shutdown c001n07 * Shutdown c001n06 * Shutdown c001n05 * Shutdown c001n04 * Shutdown c001n03 * Shutdown c001n02 - * Stop ocf_192.168.100.181 (c001n02) - * Stop heartbeat_192.168.100.182 (c001n02) - * Stop ocf_192.168.100.183 (c001n02) + * Stop ocf_192.168.100.181 (c001n02) due to node availability + * Stop heartbeat_192.168.100.182 (c001n02) due to node availability + * Stop ocf_192.168.100.183 (c001n02) due to node availability * Stop lsb_dummy (c001n04) * Stop rsc_c001n03 (c001n05) * Stop rsc_c001n02 (c001n02) * Stop rsc_c001n04 (c001n04) * Stop rsc_c001n05 (c001n05) * Stop rsc_c001n06 (c001n06) * Stop rsc_c001n07 (c001n07) - * Stop child_DoFencing:0 (c001n02) - * Stop child_DoFencing:1 (c001n04) - * Stop child_DoFencing:2 (c001n05) - * Stop child_DoFencing:3 (c001n06) - * Stop child_DoFencing:4 (c001n07) - * Stop ocf_msdummy:10 (c001n02) - * Stop ocf_msdummy:11 (c001n02) - * Stop ocf_msdummy:2 (c001n04) - * Stop ocf_msdummy:3 (c001n04) - * Stop ocf_msdummy:4 (c001n05) - * Stop ocf_msdummy:5 (c001n05) - * Stop ocf_msdummy:6 (c001n06) - * Stop ocf_msdummy:7 (c001n06) - * Stop ocf_msdummy:8 (c001n07) - * Stop ocf_msdummy:9 (c001n07) + * Stop child_DoFencing:0 (c001n02) due to node availability + * Stop child_DoFencing:1 (c001n04) due to node availability + * Stop child_DoFencing:2 (c001n05) due to node availability + * Stop child_DoFencing:3 (c001n06) due to node availability + * Stop child_DoFencing:4 (c001n07) due to node availability + * Stop ocf_msdummy:10 (c001n02) due to node availability + * Stop ocf_msdummy:11 (c001n02) due to node availability + * Stop ocf_msdummy:2 (c001n04) due to node availability + * Stop ocf_msdummy:3 (c001n04) due to node availability + * Stop ocf_msdummy:4 (c001n05) due to node availability + * Stop ocf_msdummy:5 (c001n05) due to node availability + * Stop ocf_msdummy:6 (c001n06) due to node availability + * Stop ocf_msdummy:7 (c001n06) due to node availability + * Stop ocf_msdummy:8 (c001n07) due to node availability + * Stop ocf_msdummy:9 (c001n07) due to node availability Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n02 * Resource action: lsb_dummy stop on c001n04 * Resource action: rsc_c001n03 stop on c001n05 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n04 stop on c001n04 * Resource action: rsc_c001n05 stop on c001n05 * Resource action: rsc_c001n06 stop on c001n06 * Resource action: rsc_c001n07 stop on c001n07 * Pseudo action: DoFencing_stop_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n02 * Resource action: child_DoFencing:1 stop on c001n02 * Resource action: child_DoFencing:2 stop on c001n04 * Resource action: child_DoFencing:3 stop on c001n05 * Resource action: child_DoFencing:4 stop on c001n06 * Resource action: child_DoFencing:5 stop on c001n07 * Pseudo action: DoFencing_stopped_0 * Resource action: ocf_msdummy:10 stop on c001n02 * Resource action: ocf_msdummy:11 stop on c001n02 * Resource action: ocf_msdummy:2 stop on c001n04 * Resource action: ocf_msdummy:3 stop on c001n04 * Resource action: ocf_msdummy:4 stop on c001n05 * Resource action: ocf_msdummy:5 stop on c001n05 * Resource action: ocf_msdummy:6 stop on c001n06 * Resource action: ocf_msdummy:7 stop on c001n06 * Resource action: ocf_msdummy:8 stop on c001n07 * Resource action: ocf_msdummy:9 stop on c001n07 * Pseudo action: master_rsc_1_stopped_0 * Cluster action: do_shutdown on c001n07 * Cluster action: do_shutdown on c001n06 * Cluster action: do_shutdown on c001n05 * Cluster action: do_shutdown on c001n04 * Resource action: ocf_192.168.100.181 stop on c001n02 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Cluster action: do_shutdown on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped heartbeat_192.168.100.182 (heartbeat:IPaddr): Stopped ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped diff --git a/pengine/test10/inc2.summary b/pengine/test10/inc2.summary index 7c6a9bb078..898729fd41 100644 --- a/pengine/test10/inc2.summary +++ b/pengine/test10/inc2.summary @@ -1,43 +1,43 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node1 child_rsc1:3 (heartbeat:apache): Started node1 child_rsc1:4 (heartbeat:apache): Started node1 Transition Summary: * Move child_rsc1:2 (Started node1 -> node2) * Move child_rsc1:3 (Started node1 -> node2) - * Stop child_rsc1:4 (node1) + * Stop child_rsc1:4 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:4 monitor on node2 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:2 stop on node1 * Resource action: child_rsc1:3 stop on node1 * Resource action: child_rsc1:4 stop on node1 * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Pseudo action: all_stopped * Resource action: child_rsc1:2 start on node2 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node2 child_rsc1:3 (heartbeat:apache): Started node2 child_rsc1:4 (heartbeat:apache): Stopped diff --git a/pengine/test10/inc3.summary b/pengine/test10/inc3.summary index adc59e7fff..955ff3b30f 100644 --- a/pengine/test10/inc3.summary +++ b/pengine/test10/inc3.summary @@ -1,70 +1,70 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node1 child_rsc1:3 (heartbeat:apache): Started node1 child_rsc1:4 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node2 child_rsc2:4 (heartbeat:apache): Started node2 Transition Summary: * Move child_rsc1:2 (Started node1 -> node2) * Move child_rsc1:3 (Started node1 -> node2) - * Stop child_rsc1:4 (node1) + * Stop child_rsc1:4 (node1) due to node availability * Move child_rsc2:3 (Started node2 -> node1) * Move child_rsc2:4 (Started node2 -> node1) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc2:0 monitor on node1 * Resource action: child_rsc2:1 monitor on node1 * Resource action: child_rsc2:2 monitor on node1 * Resource action: child_rsc2:3 monitor on node1 * Resource action: child_rsc2:4 monitor on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc2:3 stop on node2 * Resource action: child_rsc2:4 stop on node2 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:2 stop on node1 * Resource action: child_rsc1:3 stop on node1 * Resource action: child_rsc1:4 stop on node1 * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Pseudo action: all_stopped * Resource action: child_rsc1:2 start on node2 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc2:3 start on node1 * Resource action: child_rsc2:4 start on node1 * Pseudo action: rsc2_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node2 child_rsc1:3 (heartbeat:apache): Started node2 child_rsc1:4 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node1 child_rsc2:4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/inc4.summary b/pengine/test10/inc4.summary index 03947cdf90..e730360541 100644 --- a/pengine/test10/inc4.summary +++ b/pengine/test10/inc4.summary @@ -1,70 +1,70 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node1 child_rsc1:3 (heartbeat:apache): Started node1 child_rsc1:4 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node2 child_rsc2:4 (heartbeat:apache): Started node2 Transition Summary: * Move child_rsc1:2 (Started node1 -> node2) * Move child_rsc1:3 (Started node1 -> node2) - * Stop child_rsc1:4 (node1) + * Stop child_rsc1:4 (node1) due to node availability * Move child_rsc2:3 (Started node2 -> node1) * Move child_rsc2:4 (Started node2 -> node1) Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc2:0 monitor on node1 * Resource action: child_rsc2:1 monitor on node1 * Resource action: child_rsc2:2 monitor on node1 * Resource action: child_rsc2:3 monitor on node1 * Resource action: child_rsc2:4 monitor on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc2:4 stop on node2 * Resource action: child_rsc2:3 stop on node2 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:4 stop on node1 * Resource action: child_rsc1:3 stop on node1 * Resource action: child_rsc1:2 stop on node1 * Pseudo action: rsc1_stopped_0 * Pseudo action: rsc1_start_0 * Pseudo action: all_stopped * Resource action: child_rsc1:2 start on node2 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc2:3 start on node1 * Resource action: child_rsc2:4 start on node1 * Pseudo action: rsc2_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 child_rsc1:2 (heartbeat:apache): Started node2 child_rsc1:3 (heartbeat:apache): Started node2 child_rsc1:4 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node2 child_rsc2:1 (heartbeat:apache): Started node2 child_rsc2:2 (heartbeat:apache): Started node2 child_rsc2:3 (heartbeat:apache): Started node1 child_rsc2:4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/inc6.summary b/pengine/test10/inc6.summary index cf84c1fb38..4c754d7c91 100644 --- a/pengine/test10/inc6.summary +++ b/pengine/test10/inc6.summary @@ -1,100 +1,100 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] Started: [ node1 node2 ] Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Started node1 child_rsc2:2 (heartbeat:apache): Stopped Clone Set: rsc3 [child_rsc3] Started: [ node1 node2 ] Clone Set: rsc4 [child_rsc4] (unique) child_rsc4:0 (heartbeat:apache): Started node1 child_rsc4:1 (heartbeat:apache): Started node1 child_rsc4:2 (heartbeat:apache): Stopped Clone Set: rsc5 [child_rsc5] (unique) child_rsc5:0 (heartbeat:apache): Started node2 child_rsc5:1 (heartbeat:apache): Started node2 child_rsc5:2 (heartbeat:apache): Stopped Clone Set: rsc6 [child_rsc6] Started: [ node1 node2 ] Clone Set: rsc7 [child_rsc7] (unique) child_rsc7:0 (heartbeat:apache): Started node2 child_rsc7:1 (heartbeat:apache): Started node2 child_rsc7:2 (heartbeat:apache): Stopped Clone Set: rsc8 [child_rsc8] Started: [ node1 node2 ] Transition Summary: * Move child_rsc2:1 (Started node1 -> node2) * Move child_rsc4:1 (Started node1 -> node2) * Move child_rsc5:1 (Started node2 -> node1) - * Restart child_rsc6:0 (Started node1) - * Restart child_rsc6:1 (Started node2) + * Restart child_rsc6:0 (Started node1) due to required rsc5 running + * Restart child_rsc6:1 (Started node2) due to required rsc5 running * Move child_rsc7:1 (Started node2 -> node1) Executing cluster transition: * Pseudo action: rsc2_stop_0 * Pseudo action: rsc4_stop_0 * Pseudo action: rsc6_stop_0 * Pseudo action: rsc7_stop_0 * Resource action: child_rsc2:1 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc2_start_0 * Resource action: child_rsc4:1 stop on node1 * Pseudo action: rsc4_stopped_0 * Pseudo action: rsc4_start_0 * Resource action: child_rsc6:0 stop on node1 * Resource action: child_rsc6:1 stop on node2 * Pseudo action: rsc6_stopped_0 * Resource action: child_rsc7:1 stop on node2 * Pseudo action: rsc7_stopped_0 * Pseudo action: rsc7_start_0 * Resource action: child_rsc2:1 start on node2 * Pseudo action: rsc2_running_0 * Resource action: child_rsc4:1 start on node2 * Pseudo action: rsc4_running_0 * Pseudo action: rsc5_stop_0 * Resource action: child_rsc7:1 start on node1 * Pseudo action: rsc7_running_0 * Resource action: child_rsc5:1 stop on node2 * Pseudo action: rsc5_stopped_0 * Pseudo action: rsc5_start_0 * Pseudo action: all_stopped * Resource action: child_rsc5:1 start on node1 * Pseudo action: rsc5_running_0 * Pseudo action: rsc6_start_0 * Resource action: child_rsc6:0 start on node1 * Resource action: child_rsc6:1 start on node2 * Pseudo action: rsc6_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] Started: [ node1 node2 ] Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Started [ node1 node2 ] child_rsc2:2 (heartbeat:apache): Stopped Clone Set: rsc3 [child_rsc3] Started: [ node1 node2 ] Clone Set: rsc4 [child_rsc4] (unique) child_rsc4:0 (heartbeat:apache): Started node1 child_rsc4:1 (heartbeat:apache): Started [ node1 node2 ] child_rsc4:2 (heartbeat:apache): Stopped Clone Set: rsc5 [child_rsc5] (unique) child_rsc5:0 (heartbeat:apache): Started node2 child_rsc5:1 (heartbeat:apache): Started node1 child_rsc5:2 (heartbeat:apache): Stopped Clone Set: rsc6 [child_rsc6] Started: [ node1 node2 ] Clone Set: rsc7 [child_rsc7] (unique) child_rsc7:0 (heartbeat:apache): Started node2 child_rsc7:1 (heartbeat:apache): Started node1 child_rsc7:2 (heartbeat:apache): Stopped Clone Set: rsc8 [child_rsc8] Started: [ node1 node2 ] diff --git a/pengine/test10/inc9.summary b/pengine/test10/inc9.summary index 5a7f123d0b..3c35aee7c9 100644 --- a/pengine/test10/inc9.summary +++ b/pengine/test10/inc9.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] child_rsc1 (heartbeat:apache): ORPHANED Started node1 child_rsc1 (heartbeat:apache): ORPHANED Started node1 child_rsc1 (heartbeat:apache): ORPHANED Started node2 Started: [ node1 node2 ] Transition Summary: - * Stop child_rsc1:5 (node1) - * Stop child_rsc1:6 (node1) - * Stop child_rsc1:7 (node2) + * Stop child_rsc1:5 (node1) due to node availability + * Stop child_rsc1:6 (node1) due to node availability + * Stop child_rsc1:7 (node2) due to node availability Executing cluster transition: * Pseudo action: rsc1_stop_0 * Resource action: child_rsc1:1 stop on node1 * Resource action: child_rsc1:2 stop on node1 * Resource action: child_rsc1:1 stop on node2 * Pseudo action: rsc1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] Started: [ node1 node2 ] diff --git a/pengine/test10/interleave-pseudo-stop.summary b/pengine/test10/interleave-pseudo-stop.summary index 01ce0ecc40..ee3fa29d6b 100644 --- a/pengine/test10/interleave-pseudo-stop.summary +++ b/pengine/test10/interleave-pseudo-stop.summary @@ -1,83 +1,83 @@ Current cluster status: Node node1 (f6d93040-a9ad-4745-a647-57ed32444ca8): UNCLEAN (offline) Online: [ node2 ] Clone Set: stonithcloneset [stonithclone] stonithclone (stonith:external/ssh): Started node1 (UNCLEAN) Started: [ node2 ] Clone Set: evmscloneset [evmsclone] evmsclone (ocf::heartbeat:EvmsSCC): Started node1 (UNCLEAN) Started: [ node2 ] Clone Set: imagestorecloneset [imagestoreclone] imagestoreclone (ocf::heartbeat:Filesystem): Started node1 (UNCLEAN) Started: [ node2 ] Clone Set: configstorecloneset [configstoreclone] configstoreclone (ocf::heartbeat:Filesystem): Started node1 (UNCLEAN) Started: [ node2 ] Transition Summary: * Fence (reboot) node1 'peer is no longer part of the cluster' - * Stop stonithclone:1 (node1) - * Stop evmsclone:1 (node1) - * Stop imagestoreclone:1 (node1) - * Stop configstoreclone:1 (node1) + * Stop stonithclone:1 (node1) due to node availability + * Stop evmsclone:1 (node1) due to node availability + * Stop imagestoreclone:1 (node1) due to node availability + * Stop configstoreclone:1 (node1) due to node availability Executing cluster transition: * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Fencing node1 (reboot) * Pseudo action: stonithcloneset_stop_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmsclone:0_post_notify_stop_0 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestoreclone:0_post_notify_stop_0 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstoreclone:0_post_notify_stop_0 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Pseudo action: stonith_complete * Pseudo action: stonithclone:0_stop_0 * Pseudo action: stonithcloneset_stopped_0 * Pseudo action: imagestoreclone:0_stop_0 * Pseudo action: imagestorecloneset_stopped_0 * Pseudo action: configstoreclone:0_stop_0 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestoreclone:0_notified_0 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstoreclone:0_notified_0 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_stop_0 * Pseudo action: evmsclone:0_stop_0 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmsclone:0_notified_0 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node2 ] Stopped (disabled): [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/interleave-restart.summary b/pengine/test10/interleave-restart.summary index 7f1b1e4368..5ac19b9c98 100644 --- a/pengine/test10/interleave-restart.summary +++ b/pengine/test10/interleave-restart.summary @@ -1,96 +1,96 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] evmsclone (ocf::heartbeat:EvmsSCC): FAILED node1 Started: [ node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] Transition Summary: * Recover evmsclone:1 (Started node1) - * Restart imagestoreclone:1 (Started node1) - * Restart configstoreclone:1 (Started node1) + * Restart imagestoreclone:1 (Started node1) due to required evmsclone:1 start + * Restart configstoreclone:1 (Started node1) due to required evmsclone:1 start Executing cluster transition: * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Resource action: evmsclone:1 notify on node2 * Resource action: evmsclone:0 notify on node1 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:1 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Resource action: imagestoreclone:0 stop on node1 * Pseudo action: imagestorecloneset_stopped_0 * Resource action: configstoreclone:0 stop on node1 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: imagestorecloneset_pre_notify_start_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: configstorecloneset_pre_notify_start_0 * Pseudo action: evmscloneset_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 * Resource action: evmsclone:0 stop on node1 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_pre_notify_start_0 * Pseudo action: all_stopped * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 * Pseudo action: evmscloneset_start_0 * Resource action: evmsclone:0 start on node1 * Pseudo action: evmscloneset_running_0 * Pseudo action: evmscloneset_post_notify_running_0 * Resource action: evmsclone:1 notify on node2 * Resource action: evmsclone:0 notify on node1 * Pseudo action: evmscloneset_confirmed-post_notify_running_0 * Pseudo action: imagestorecloneset_start_0 * Pseudo action: configstorecloneset_start_0 * Resource action: imagestoreclone:0 start on node1 * Pseudo action: imagestorecloneset_running_0 * Resource action: configstoreclone:0 start on node1 * Pseudo action: configstorecloneset_running_0 * Pseudo action: imagestorecloneset_post_notify_running_0 * Pseudo action: configstorecloneset_post_notify_running_0 * Resource action: imagestoreclone:1 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 * Resource action: configstoreclone:1 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 * Resource action: imagestoreclone:0 monitor=20000 on node1 * Resource action: configstoreclone:0 monitor=20000 on node1 Revised cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] diff --git a/pengine/test10/interleave-stop.summary b/pengine/test10/interleave-stop.summary index b5a1a98e19..529e33a7ea 100644 --- a/pengine/test10/interleave-stop.summary +++ b/pengine/test10/interleave-stop.summary @@ -1,73 +1,73 @@ Current cluster status: Node node1 (f6d93040-a9ad-4745-a647-57ed32444ca8): standby Online: [ node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] Transition Summary: - * Stop stonithclone:1 (node1) - * Stop evmsclone:1 (node1) - * Stop imagestoreclone:1 (node1) - * Stop configstoreclone:1 (node1) + * Stop stonithclone:1 (node1) due to node availability + * Stop evmsclone:1 (node1) due to node availability + * Stop imagestoreclone:1 (node1) due to node availability + * Stop configstoreclone:1 (node1) due to node availability Executing cluster transition: * Pseudo action: stonithcloneset_stop_0 * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Resource action: stonithclone:0 stop on node1 * Pseudo action: stonithcloneset_stopped_0 * Resource action: evmsclone:1 notify on node2 * Resource action: evmsclone:0 notify on node1 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:1 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:1 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Resource action: imagestoreclone:0 stop on node1 * Pseudo action: imagestorecloneset_stopped_0 * Resource action: configstoreclone:0 stop on node1 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: imagestoreclone:1 notify on node2 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Resource action: configstoreclone:1 notify on node2 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_stop_0 * Resource action: evmsclone:0 stop on node1 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:1 notify on node2 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node node1 (f6d93040-a9ad-4745-a647-57ed32444ca8): standby Online: [ node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node2 ] Stopped (disabled): [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/master-7.summary b/pengine/test10/master-7.summary index 4c58184a27..58ef2758f5 100644 --- a/pengine/test10/master-7.summary +++ b/pengine/test10/master-7.summary @@ -1,121 +1,121 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 ( UNCLEAN ) ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: * Fence (reboot) c001n01 'peer is no longer part of the cluster' * Move DcIPaddr (Started c001n01 -> c001n03) * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move lsb_dummy (Started c001n02 -> c001n08) * Move rsc_c001n01 (Started c001n01 -> c001n03) - * Stop child_DoFencing:0 (c001n01) + * Stop child_DoFencing:0 (c001n01) due to node availability * Demote ocf_msdummy:0 (Master -> Stopped c001n01) - * Stop ocf_msdummy:4 (c001n01) + * Stop ocf_msdummy:4 (c001n01) due to node availability Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: ocf_msdummy:4_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-8.summary b/pengine/test10/master-8.summary index e8d90d20e6..c18e8848a1 100644 --- a/pengine/test10/master-8.summary +++ b/pengine/test10/master-8.summary @@ -1,125 +1,125 @@ Current cluster status: Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline) Online: [ c001n02 c001n03 c001n08 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN) rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 Transition Summary: * Fence (reboot) c001n01 'peer is no longer part of the cluster' * Move DcIPaddr (Started c001n01 -> c001n03) * Move ocf_192.168.100.181 (Started c001n03 -> c001n02) * Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02) * Move ocf_192.168.100.183 (Started c001n03 -> c001n02) * Move lsb_dummy (Started c001n02 -> c001n08) * Move rsc_c001n01 (Started c001n01 -> c001n03) - * Stop child_DoFencing:0 (c001n01) + * Stop child_DoFencing:0 (c001n01) due to node availability * Demote ocf_msdummy:0 (Master -> Slave c001n01 - blocked) * Move ocf_msdummy:0 (Slave c001n01 -> c001n03) Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: DoFencing_stop_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Pseudo action: all_stopped * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Resource action: ocf_msdummy:0 start on c001n03 * Pseudo action: master_rsc_1_running_0 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 * Resource action: ocf_msdummy:0 monitor=5000 on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n08 ] OFFLINE: [ c001n01 ] DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03 Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03 rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Stopped child_DoFencing:1 (stonith:ssh): Started c001n03 child_DoFencing:2 (stonith:ssh): Started c001n02 child_DoFencing:3 (stonith:ssh): Started c001n08 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 diff --git a/pengine/test10/master-9.summary b/pengine/test10/master-9.summary index c1cd4a1cbf..ef596022e7 100644 --- a/pengine/test10/master-9.summary +++ b/pengine/test10/master-9.summary @@ -1,100 +1,100 @@ Current cluster status: Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline) Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline) Online: [ ibm1 va1 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_127.0.0.11 (ocf::heartbeat:IPaddr): Stopped heartbeat_127.0.0.12 (heartbeat:IPaddr): Stopped ocf_127.0.0.13 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped rsc_ibm1 (ocf::heartbeat:IPaddr): Stopped rsc_va1 (ocf::heartbeat:IPaddr): Stopped rsc_test02 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started va1 child_DoFencing:1 (stonith:ssh): Started ibm1 child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped Transition Summary: * Shutdown ibm1 - * Start DcIPaddr (va1 - blocked) - * Start ocf_127.0.0.11 (va1 - blocked) - * Start heartbeat_127.0.0.12 (va1 - blocked) - * Start ocf_127.0.0.13 (va1 - blocked) - * Start lsb_dummy (va1 - blocked) - * Start rsc_sgi2 (va1 - blocked) - * Start rsc_ibm1 (va1 - blocked) - * Start rsc_va1 (va1 - blocked) - * Start rsc_test02 (va1 - blocked) - * Stop child_DoFencing:1 (ibm1) - * Start ocf_msdummy:0 (va1 - blocked) - * Start ocf_msdummy:1 (va1 - blocked) + * Start DcIPaddr (va1 - blocked) due to no quorum + * Start ocf_127.0.0.11 (va1 - blocked) due to no quorum + * Start heartbeat_127.0.0.12 (va1 - blocked) due to no quorum + * Start ocf_127.0.0.13 (va1 - blocked) due to no quorum + * Start lsb_dummy (va1 - blocked) due to no quorum + * Start rsc_sgi2 (va1 - blocked) due to no quorum + * Start rsc_ibm1 (va1 - blocked) due to no quorum + * Start rsc_va1 (va1 - blocked) due to no quorum + * Start rsc_test02 (va1 - blocked) due to no quorum + * Stop child_DoFencing:1 (ibm1) due to node availability + * Start ocf_msdummy:0 (va1 - blocked) due to no quorum + * Start ocf_msdummy:1 (va1 - blocked) due to no quorum Executing cluster transition: * Resource action: child_DoFencing:1 monitor on va1 * Resource action: child_DoFencing:2 monitor on va1 * Resource action: child_DoFencing:2 monitor on ibm1 * Resource action: child_DoFencing:3 monitor on va1 * Resource action: child_DoFencing:3 monitor on ibm1 * Pseudo action: DoFencing_stop_0 * Resource action: ocf_msdummy:2 monitor on va1 * Resource action: ocf_msdummy:2 monitor on ibm1 * Resource action: ocf_msdummy:3 monitor on va1 * Resource action: ocf_msdummy:3 monitor on ibm1 * Resource action: ocf_msdummy:4 monitor on va1 * Resource action: ocf_msdummy:4 monitor on ibm1 * Resource action: ocf_msdummy:5 monitor on va1 * Resource action: ocf_msdummy:5 monitor on ibm1 * Resource action: ocf_msdummy:6 monitor on va1 * Resource action: ocf_msdummy:6 monitor on ibm1 * Resource action: ocf_msdummy:7 monitor on va1 * Resource action: ocf_msdummy:7 monitor on ibm1 * Resource action: child_DoFencing:1 stop on ibm1 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on ibm1 * Pseudo action: all_stopped Revised cluster status: Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline) Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline) Online: [ ibm1 va1 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_127.0.0.11 (ocf::heartbeat:IPaddr): Stopped heartbeat_127.0.0.12 (heartbeat:IPaddr): Stopped ocf_127.0.0.13 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped rsc_ibm1 (ocf::heartbeat:IPaddr): Stopped rsc_va1 (ocf::heartbeat:IPaddr): Stopped rsc_test02 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started va1 child_DoFencing:1 (stonith:ssh): Stopped child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:1 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:5 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped diff --git a/pengine/test10/master-asymmetrical-order.summary b/pengine/test10/master-asymmetrical-order.summary index 1ff2836e1e..d09f62e342 100644 --- a/pengine/test10/master-asymmetrical-order.summary +++ b/pengine/test10/master-asymmetrical-order.summary @@ -1,35 +1,35 @@ 2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Master/Slave Set: ms2 [rsc2] Masters: [ node2 ] Slaves: [ node1 ] Transition Summary: * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:0 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:0 stop on node1 * Resource action: rsc1:1 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] Master/Slave Set: ms1 [rsc1] Stopped (disabled): [ node1 node2 ] Master/Slave Set: ms2 [rsc2] Masters: [ node2 ] Slaves: [ node1 ] diff --git a/pengine/test10/master-failed-demote-2.summary b/pengine/test10/master-failed-demote-2.summary index 847e0a1536..086d02e16b 100644 --- a/pengine/test10/master-failed-demote-2.summary +++ b/pengine/test10/master-failed-demote-2.summary @@ -1,46 +1,46 @@ Current cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): FAILED dl380g5b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Slave dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Slave dl380g5a Transition Summary: - * Stop stateful-1:0 (dl380g5b) + * Stop stateful-1:0 (dl380g5b) due to node availability * Promote stateful-1:1 (Slave -> Master dl380g5a) * Promote stateful-2:1 (Slave -> Master dl380g5a) Executing cluster transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: all_stopped * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a * Pseudo action: ms-sf_promoted_0 Revised cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Stopped stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Master dl380g5a diff --git a/pengine/test10/master-failed-demote.summary b/pengine/test10/master-failed-demote.summary index cc3fbee717..0f6c410bf1 100644 --- a/pengine/test10/master-failed-demote.summary +++ b/pengine/test10/master-failed-demote.summary @@ -1,63 +1,63 @@ Current cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): FAILED dl380g5b stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Slave dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Slave dl380g5a Transition Summary: - * Stop stateful-1:0 (dl380g5b) + * Stop stateful-1:0 (dl380g5b) due to node availability * Promote stateful-1:1 (Slave -> Master dl380g5a) * Promote stateful-2:1 (Slave -> Master dl380g5a) Executing cluster transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_pre_notify_stop_0 * Resource action: stateful-1:0 notify on dl380g5b * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_stop_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_post_notify_stopped_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms-sf_pre_notify_promote_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_promote_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 * Pseudo action: ms-sf_post_notify_promoted_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a Revised cluster status: Online: [ dl380g5a dl380g5b ] Master/Slave Set: ms-sf [group] (unique) Resource Group: group:0 stateful-1:0 (ocf::heartbeat:Stateful): Stopped stateful-2:0 (ocf::heartbeat:Stateful): Stopped Resource Group: group:1 stateful-1:1 (ocf::heartbeat:Stateful): Master dl380g5a stateful-2:1 (ocf::heartbeat:Stateful): Master dl380g5a diff --git a/pengine/test10/master-pseudo.summary b/pengine/test10/master-pseudo.summary index 4ac7605517..2ee2d03b50 100644 --- a/pengine/test10/master-pseudo.summary +++ b/pengine/test10/master-pseudo.summary @@ -1,60 +1,60 @@ Current cluster status: Node raki.linbit: standby Online: [ sambuca.linbit ] ip_float_right (ocf::heartbeat:IPaddr2): Stopped Master/Slave Set: ms_drbd_float [drbd_float] Slaves: [ sambuca.linbit ] Resource Group: nfsexport ip_nfs (ocf::heartbeat:IPaddr2): Stopped fs_float (ocf::heartbeat:Filesystem): Stopped Transition Summary: * Start ip_float_right (sambuca.linbit) - * Restart drbd_float:0 (Slave sambuca.linbit) + * Restart drbd_float:0 (Slave sambuca.linbit) due to required ip_float_right start * Promote drbd_float:0 (Slave -> Master sambuca.linbit) * Start ip_nfs (sambuca.linbit) Executing cluster transition: * Resource action: ip_float_right start on sambuca.linbit * Pseudo action: ms_drbd_float_pre_notify_stop_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_float_stop_0 * Resource action: drbd_float:0 stop on sambuca.linbit * Pseudo action: ms_drbd_float_stopped_0 * Pseudo action: ms_drbd_float_post_notify_stopped_0 * Pseudo action: ms_drbd_float_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_float_pre_notify_start_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_float_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_float_start_0 * Resource action: drbd_float:0 start on sambuca.linbit * Pseudo action: ms_drbd_float_running_0 * Pseudo action: ms_drbd_float_post_notify_running_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-post_notify_running_0 * Pseudo action: ms_drbd_float_pre_notify_promote_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_float_promote_0 * Resource action: drbd_float:0 promote on sambuca.linbit * Pseudo action: ms_drbd_float_promoted_0 * Pseudo action: ms_drbd_float_post_notify_promoted_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-post_notify_promoted_0 * Pseudo action: nfsexport_start_0 * Resource action: ip_nfs start on sambuca.linbit Revised cluster status: Node raki.linbit: standby Online: [ sambuca.linbit ] ip_float_right (ocf::heartbeat:IPaddr2): Started sambuca.linbit Master/Slave Set: ms_drbd_float [drbd_float] Masters: [ sambuca.linbit ] Resource Group: nfsexport ip_nfs (ocf::heartbeat:IPaddr2): Started sambuca.linbit fs_float (ocf::heartbeat:Filesystem): Stopped diff --git a/pengine/test10/master-stop.summary b/pengine/test10/master-stop.summary index cbe6683715..8b802d4d7a 100644 --- a/pengine/test10/master-stop.summary +++ b/pengine/test10/master-stop.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ node1 node2 node3 ] Master/Slave Set: m [dummy] Slaves: [ node1 node2 node3 ] Transition Summary: - * Stop dummy:2 (node3) + * Stop dummy:2 (node3) due to node availability Executing cluster transition: * Pseudo action: m_stop_0 * Resource action: dummy:2 stop on node3 * Pseudo action: m_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 node3 ] Master/Slave Set: m [dummy] Slaves: [ node1 node2 ] Stopped: [ node3 ] diff --git a/pengine/test10/migrate-5.summary b/pengine/test10/migrate-5.summary index 9a98330c7a..a9d5771ff3 100644 --- a/pengine/test10/migrate-5.summary +++ b/pengine/test10/migrate-5.summary @@ -1,34 +1,34 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Transition Summary: * Migrate domU-test01 (Started dom0-02 -> dom0-01) - * Stop dom0-iscsi1-cnx1:1 (dom0-02) + * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability Executing cluster transition: * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-both-vms.summary b/pengine/test10/migrate-both-vms.summary index 531fd4ca40..5b60727bb7 100644 --- a/pengine/test10/migrate-both-vms.summary +++ b/pengine/test10/migrate-both-vms.summary @@ -1,101 +1,101 @@ Current cluster status: Node cvmh03: standby Node cvmh04: standby Online: [ cvmh01 cvmh02 ] fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 Clone Set: c-fs-libvirt-VM-xcm [fs-libvirt-VM-xcm] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-p-libvirtd [p-libvirtd] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-watch-ib0 [p-watch-ib0] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] Clone Set: c-fs-gpfs [p-fs-gpfs] Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] vm-compute-test (ocf::ccni:xcatVirtualDomain): Started cvmh03 vm-swbuildsl6 (ocf::ccni:xcatVirtualDomain): Started cvmh04 Transition Summary: - * Stop fs-libvirt-VM-xcm:0 (cvmh04) - * Stop fs-libvirt-VM-xcm:2 (cvmh03) - * Stop p-watch-ib0:0 (cvmh04) - * Stop p-watch-ib0:2 (cvmh03) - * Stop p-fs-gpfs:0 (cvmh04) - * Stop p-fs-gpfs:2 (cvmh03) - * Stop p-libvirtd:0 (cvmh04) - * Stop p-libvirtd:2 (cvmh03) - * Stop fs-bind-libvirt-VM-cvmh:0 (cvmh04) - * Stop fs-bind-libvirt-VM-cvmh:2 (cvmh03) + * Stop fs-libvirt-VM-xcm:0 (cvmh04) due to node availability + * Stop fs-libvirt-VM-xcm:2 (cvmh03) due to node availability + * Stop p-watch-ib0:0 (cvmh04) due to node availability + * Stop p-watch-ib0:2 (cvmh03) due to node availability + * Stop p-fs-gpfs:0 (cvmh04) due to node availability + * Stop p-fs-gpfs:2 (cvmh03) due to node availability + * Stop p-libvirtd:0 (cvmh04) due to node availability + * Stop p-libvirtd:2 (cvmh03) due to node availability + * Stop fs-bind-libvirt-VM-cvmh:0 (cvmh04) due to node availability + * Stop fs-bind-libvirt-VM-cvmh:2 (cvmh03) due to node availability * Migrate vm-compute-test (Started cvmh03 -> cvmh01) * Migrate vm-swbuildsl6 (Started cvmh04 -> cvmh02) Executing cluster transition: * Pseudo action: c-watch-ib0_stop_0 * Pseudo action: load_stopped_cvmh01 * Pseudo action: load_stopped_cvmh02 * Resource action: p-watch-ib0 stop on cvmh03 * Resource action: vm-compute-test migrate_to on cvmh03 * Resource action: p-watch-ib0 stop on cvmh04 * Pseudo action: c-watch-ib0_stopped_0 * Resource action: vm-compute-test migrate_from on cvmh01 * Resource action: vm-swbuildsl6 migrate_to on cvmh04 * Resource action: vm-swbuildsl6 migrate_from on cvmh02 * Resource action: vm-swbuildsl6 stop on cvmh04 * Pseudo action: load_stopped_cvmh04 * Resource action: vm-compute-test stop on cvmh03 * Pseudo action: load_stopped_cvmh03 * Pseudo action: c-p-libvirtd_stop_0 * Pseudo action: vm-compute-test_start_0 * Pseudo action: vm-swbuildsl6_start_0 * Resource action: p-libvirtd stop on cvmh03 * Resource action: vm-compute-test monitor=45000 on cvmh01 * Resource action: vm-swbuildsl6 monitor=45000 on cvmh02 * Resource action: p-libvirtd stop on cvmh04 * Pseudo action: c-p-libvirtd_stopped_0 * Pseudo action: c-fs-bind-libvirt-VM-cvmh_stop_0 * Pseudo action: c-fs-libvirt-VM-xcm_stop_0 * Resource action: fs-bind-libvirt-VM-cvmh stop on cvmh03 * Resource action: fs-libvirt-VM-xcm stop on cvmh03 * Resource action: fs-bind-libvirt-VM-cvmh stop on cvmh04 * Pseudo action: c-fs-bind-libvirt-VM-cvmh_stopped_0 * Resource action: fs-libvirt-VM-xcm stop on cvmh04 * Pseudo action: c-fs-libvirt-VM-xcm_stopped_0 * Pseudo action: c-fs-gpfs_stop_0 * Resource action: p-fs-gpfs stop on cvmh03 * Resource action: p-fs-gpfs stop on cvmh04 * Pseudo action: c-fs-gpfs_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node cvmh03: standby Node cvmh04: standby Online: [ cvmh01 cvmh02 ] fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 Clone Set: c-fs-libvirt-VM-xcm [fs-libvirt-VM-xcm] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-p-libvirtd [p-libvirtd] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-watch-ib0 [p-watch-ib0] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] Clone Set: c-fs-gpfs [p-fs-gpfs] Started: [ cvmh01 cvmh02 ] Stopped: [ cvmh03 cvmh04 ] vm-compute-test (ocf::ccni:xcatVirtualDomain): Started cvmh01 vm-swbuildsl6 (ocf::ccni:xcatVirtualDomain): Started cvmh02 diff --git a/pengine/test10/migrate-fencing.summary b/pengine/test10/migrate-fencing.summary index 7468fe235f..cfd4eb8f9f 100644 --- a/pengine/test10/migrate-fencing.summary +++ b/pengine/test10/migrate-fencing.summary @@ -1,108 +1,108 @@ Current cluster status: Node pcmk-4: UNCLEAN (online) Online: [ pcmk-1 pcmk-2 pcmk-3 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-4 r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-4 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-4 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-4 migrator (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-4 ] Slaves: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: * Fence (reboot) pcmk-4 'termination was requested' - * Stop FencingChild:0 (pcmk-4) + * Stop FencingChild:0 (pcmk-4) due to node availability * Move r192.168.101.181 (Started pcmk-4 -> pcmk-1) * Move r192.168.101.182 (Started pcmk-4 -> pcmk-1) * Move r192.168.101.183 (Started pcmk-4 -> pcmk-1) * Move rsc_pcmk-4 (Started pcmk-4 -> pcmk-2) * Move lsb-dummy (Started pcmk-4 -> pcmk-1) * Migrate migrator (Started pcmk-1 -> pcmk-3) - * Stop ping-1:0 (pcmk-4) + * Stop ping-1:0 (pcmk-4) due to node availability * Demote stateful-1:0 (Master -> Stopped pcmk-4) * Promote stateful-1:1 (Slave -> Master pcmk-1) Executing cluster transition: * Resource action: stateful-1:3 monitor=15000 on pcmk-3 * Resource action: stateful-1:2 monitor=15000 on pcmk-2 * Fencing pcmk-4 (reboot) * Pseudo action: Fencing_stop_0 * Pseudo action: rsc_pcmk-4_stop_0 * Pseudo action: lsb-dummy_stop_0 * Pseudo action: Connectivity_stop_0 * Pseudo action: stonith_complete * Pseudo action: FencingChild:0_stop_0 * Pseudo action: Fencing_stopped_0 * Pseudo action: group-1_stop_0 * Pseudo action: r192.168.101.183_stop_0 * Resource action: rsc_pcmk-4 start on pcmk-2 * Resource action: migrator migrate_to on pcmk-1 * Pseudo action: ping-1:0_stop_0 * Pseudo action: Connectivity_stopped_0 * Pseudo action: r192.168.101.182_stop_0 * Resource action: rsc_pcmk-4 monitor=5000 on pcmk-2 * Resource action: migrator migrate_from on pcmk-3 * Resource action: migrator stop on pcmk-1 * Pseudo action: r192.168.101.181_stop_0 * Pseudo action: migrator_start_0 * Pseudo action: group-1_stopped_0 * Resource action: migrator monitor=10000 on pcmk-3 * Pseudo action: master-1_demote_0 * Pseudo action: stateful-1:0_demote_0 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Pseudo action: stateful-1:0_stop_0 * Pseudo action: master-1_stopped_0 * Pseudo action: all_stopped * Pseudo action: master-1_promote_0 * Resource action: stateful-1:1 promote on pcmk-1 * Pseudo action: master-1_promoted_0 * Pseudo action: group-1_start_0 * Resource action: r192.168.101.181 start on pcmk-1 * Resource action: r192.168.101.182 start on pcmk-1 * Resource action: r192.168.101.183 start on pcmk-1 * Resource action: stateful-1:1 monitor=16000 on pcmk-1 * Pseudo action: group-1_running_0 * Resource action: r192.168.101.181 monitor=5000 on pcmk-1 * Resource action: r192.168.101.182 monitor=5000 on pcmk-1 * Resource action: r192.168.101.183 monitor=5000 on pcmk-1 * Resource action: lsb-dummy start on pcmk-1 * Resource action: lsb-dummy monitor=5000 on pcmk-1 Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 ] OFFLINE: [ pcmk-4 ] Clone Set: Fencing [FencingChild] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Resource Group: group-1 r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-1 r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-2 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 migrator (ocf::pacemaker:Dummy): Started pcmk-3 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 pcmk-3 ] Stopped: [ pcmk-4 ] diff --git a/pengine/test10/migrate-shutdown.summary b/pengine/test10/migrate-shutdown.summary index 630d58da3c..b9aa5b2d88 100644 --- a/pengine/test10/migrate-shutdown.summary +++ b/pengine/test10/migrate-shutdown.summary @@ -1,95 +1,95 @@ Current cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Fencing (stonith:fence_xvm): Started pcmk-1 Resource Group: group-1 r192.168.122.105 (ocf::heartbeat:IPaddr): Started pcmk-2 r192.168.122.106 (ocf::heartbeat:IPaddr): Started pcmk-2 r192.168.122.107 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 rsc_pcmk-3 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 migrator (ocf::pacemaker:Dummy): Started pcmk-1 Clone Set: Connectivity [ping-1] Started: [ pcmk-1 pcmk-2 pcmk-4 ] Stopped: [ pcmk-3 ] Master/Slave Set: master-1 [stateful-1] Masters: [ pcmk-2 ] Slaves: [ pcmk-1 pcmk-4 ] Stopped: [ pcmk-3 ] Transition Summary: * Shutdown pcmk-4 * Shutdown pcmk-3 * Shutdown pcmk-2 * Shutdown pcmk-1 * Stop Fencing (pcmk-1) - * Stop r192.168.122.105 (pcmk-2) - * Stop r192.168.122.106 (pcmk-2) - * Stop r192.168.122.107 (pcmk-2) + * Stop r192.168.122.105 (pcmk-2) due to node availability + * Stop r192.168.122.106 (pcmk-2) due to node availability + * Stop r192.168.122.107 (pcmk-2) due to node availability * Stop rsc_pcmk-1 (pcmk-1) * Stop rsc_pcmk-2 (pcmk-2) * Stop rsc_pcmk-4 (pcmk-4) * Stop lsb-dummy (pcmk-2) * Stop migrator (pcmk-1) - * Stop ping-1:0 (pcmk-1) - * Stop ping-1:1 (pcmk-2) - * Stop ping-1:2 (pcmk-4) - * Stop stateful-1:0 (pcmk-1) + * Stop ping-1:0 (pcmk-1) due to node availability + * Stop ping-1:1 (pcmk-2) due to node availability + * Stop ping-1:2 (pcmk-4) due to node availability + * Stop stateful-1:0 (pcmk-1) due to node availability * Demote stateful-1:1 (Master -> Stopped pcmk-2) - * Stop stateful-1:2 (pcmk-4) + * Stop stateful-1:2 (pcmk-4) due to node availability Executing cluster transition: * Resource action: Fencing stop on pcmk-1 * Resource action: rsc_pcmk-1 stop on pcmk-1 * Resource action: rsc_pcmk-2 stop on pcmk-2 * Resource action: rsc_pcmk-4 stop on pcmk-4 * Resource action: lsb-dummy stop on pcmk-2 * Resource action: migrator stop on pcmk-1 * Resource action: migrator stop on pcmk-3 * Pseudo action: Connectivity_stop_0 * Cluster action: do_shutdown on pcmk-3 * Pseudo action: group-1_stop_0 * Resource action: r192.168.122.107 stop on pcmk-2 * Resource action: ping-1:0 stop on pcmk-1 * Resource action: ping-1:1 stop on pcmk-2 * Resource action: ping-1:3 stop on pcmk-4 * Pseudo action: Connectivity_stopped_0 * Resource action: r192.168.122.106 stop on pcmk-2 * Resource action: r192.168.122.105 stop on pcmk-2 * Pseudo action: group-1_stopped_0 * Pseudo action: master-1_demote_0 * Resource action: stateful-1:0 demote on pcmk-2 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Resource action: stateful-1:2 stop on pcmk-1 * Resource action: stateful-1:0 stop on pcmk-2 * Resource action: stateful-1:3 stop on pcmk-4 * Pseudo action: master-1_stopped_0 * Cluster action: do_shutdown on pcmk-4 * Cluster action: do_shutdown on pcmk-2 * Cluster action: do_shutdown on pcmk-1 * Pseudo action: all_stopped Revised cluster status: Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Fencing (stonith:fence_xvm): Stopped Resource Group: group-1 r192.168.122.105 (ocf::heartbeat:IPaddr): Stopped r192.168.122.106 (ocf::heartbeat:IPaddr): Stopped r192.168.122.107 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-1 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-2 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-3 (ocf::heartbeat:IPaddr): Stopped rsc_pcmk-4 (ocf::heartbeat:IPaddr): Stopped lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped migrator (ocf::pacemaker:Dummy): Stopped Clone Set: Connectivity [ping-1] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Master/Slave Set: master-1 [stateful-1] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] diff --git a/pengine/test10/migrate-stop-complex.summary b/pengine/test10/migrate-stop-complex.summary index e8f148c3e7..04dce1a4fb 100644 --- a/pengine/test10/migrate-stop-complex.summary +++ b/pengine/test10/migrate-stop-complex.summary @@ -1,48 +1,48 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-02 domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 dom0-02 ] Transition Summary: * Move top (Started dom0-02 -> dom0-01) * Migrate domU-test01 (Started dom0-02 -> dom0-01) - * Stop dom0-iscsi1-cnx1:1 (dom0-02) - * Stop bottom:1 (dom0-02) + * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability + * Stop bottom:1 (dom0-02) due to node availability Executing cluster transition: * Resource action: top stop on dom0-02 * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Pseudo action: clone-bottom_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Resource action: bottom:0 stop on dom0-02 * Pseudo action: clone-bottom_stopped_0 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 * Resource action: top start on dom0-01 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-stop-start-complex.summary b/pengine/test10/migrate-stop-start-complex.summary index 162f73b441..5408bb6117 100644 --- a/pengine/test10/migrate-stop-start-complex.summary +++ b/pengine/test10/migrate-stop-start-complex.summary @@ -1,49 +1,49 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-02 ] Stopped: [ dom0-01 ] Transition Summary: * Migrate domU-test01 (Started dom0-02 -> dom0-01) - * Stop dom0-iscsi1-cnx1:1 (dom0-02) + * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability * Move bottom:0 (Started dom0-02 -> dom0-01) Executing cluster transition: * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 * Pseudo action: clone-bottom_stop_0 * Resource action: bottom:0 stop on dom0-02 * Pseudo action: clone-bottom_stopped_0 * Pseudo action: clone-bottom_start_0 * Pseudo action: all_stopped * Resource action: bottom:0 start on dom0-01 * Pseudo action: clone-bottom_running_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] top (ocf::heartbeat:Dummy): Started dom0-01 domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] Clone Set: clone-bottom [bottom] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/migrate-stop.summary b/pengine/test10/migrate-stop.summary index 9a98330c7a..a9d5771ff3 100644 --- a/pengine/test10/migrate-stop.summary +++ b/pengine/test10/migrate-stop.summary @@ -1,34 +1,34 @@ Current cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-02 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 dom0-02 ] Transition Summary: * Migrate domU-test01 (Started dom0-02 -> dom0-01) - * Stop dom0-iscsi1-cnx1:1 (dom0-02) + * Stop dom0-iscsi1-cnx1:1 (dom0-02) due to node availability Executing cluster transition: * Resource action: domU-test01 migrate_to on dom0-02 * Pseudo action: clone-dom0-iscsi1_stop_0 * Resource action: domU-test01 migrate_from on dom0-01 * Resource action: domU-test01 stop on dom0-02 * Pseudo action: dom0-iscsi1:1_stop_0 * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 * Pseudo action: all_stopped * Pseudo action: domU-test01_start_0 * Pseudo action: dom0-iscsi1:1_stopped_0 * Pseudo action: clone-dom0-iscsi1_stopped_0 Revised cluster status: Node dom0-02: standby Online: [ dom0-01 ] domU-test01 (ocf::heartbeat:Xen): Started dom0-01 Clone Set: clone-dom0-iscsi1 [dom0-iscsi1] Started: [ dom0-01 ] Stopped: [ dom0-02 ] diff --git a/pengine/test10/monitor-recovery.summary b/pengine/test10/monitor-recovery.summary index cae6d07814..d8a13fedbb 100644 --- a/pengine/test10/monitor-recovery.summary +++ b/pengine/test10/monitor-recovery.summary @@ -1,31 +1,31 @@ Current cluster status: Online: [ CSE-1 ] OFFLINE: [ CSE-2 ] Resource Group: svc-cse ip_19 (ocf::heartbeat:IPaddr2): Stopped ip_11 (ocf::heartbeat:IPaddr2): Stopped Clone Set: cl_tomcat [d_tomcat] Started: [ CSE-1 ] Stopped: [ CSE-2 ] Transition Summary: - * Stop d_tomcat:0 (CSE-1) + * Stop d_tomcat:0 (CSE-1) due to node availability Executing cluster transition: * Pseudo action: cl_tomcat_stop_0 * Resource action: d_tomcat stop on CSE-1 * Pseudo action: cl_tomcat_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ CSE-1 ] OFFLINE: [ CSE-2 ] Resource Group: svc-cse ip_19 (ocf::heartbeat:IPaddr2): Stopped ip_11 (ocf::heartbeat:IPaddr2): Stopped Clone Set: cl_tomcat [d_tomcat] Stopped: [ CSE-1 CSE-2 ] diff --git a/pengine/test10/notify-0.summary b/pengine/test10/notify-0.summary index 2d95b261c9..69112dab64 100644 --- a/pengine/test10/notify-0.summary +++ b/pengine/test10/notify-0.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 ] OFFLINE: [ node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:1 (node1) - * Stop child_rsc2:0 (node1) + * Stop child_rsc2:0 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc2:1 monitor on node1 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc1:1 start on node1 * Pseudo action: rsc1_running_0 * Resource action: child_rsc2:0 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Stopped child_rsc2:1 (heartbeat:apache): Stopped diff --git a/pengine/test10/notify-1.summary b/pengine/test10/notify-1.summary index d670be73be..0a734a68e4 100644 --- a/pengine/test10/notify-1.summary +++ b/pengine/test10/notify-1.summary @@ -1,50 +1,50 @@ Current cluster status: Online: [ node1 ] OFFLINE: [ node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:1 (node1) - * Stop child_rsc2:0 (node1) + * Stop child_rsc2:0 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Resource action: child_rsc2:1 monitor on node1 * Pseudo action: rsc2_pre_notify_stop_0 * Resource action: child_rsc1:0 notify on node1 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc2:0 notify on node1 * Pseudo action: rsc2_confirmed-pre_notify_stop_0 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc1:1 start on node1 * Pseudo action: rsc1_running_0 * Resource action: child_rsc2:0 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_post_notify_running_0 * Pseudo action: rsc2_post_notify_stopped_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node1 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc2_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Stopped child_rsc2:1 (heartbeat:apache): Stopped diff --git a/pengine/test10/notify-2.summary b/pengine/test10/notify-2.summary index d670be73be..0a734a68e4 100644 --- a/pengine/test10/notify-2.summary +++ b/pengine/test10/notify-2.summary @@ -1,50 +1,50 @@ Current cluster status: Online: [ node1 ] OFFLINE: [ node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Stopped Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:1 (node1) - * Stop child_rsc2:0 (node1) + * Stop child_rsc2:0 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Resource action: child_rsc2:1 monitor on node1 * Pseudo action: rsc2_pre_notify_stop_0 * Resource action: child_rsc1:0 notify on node1 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc2:0 notify on node1 * Pseudo action: rsc2_confirmed-pre_notify_stop_0 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc1:1 start on node1 * Pseudo action: rsc1_running_0 * Resource action: child_rsc2:0 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_post_notify_running_0 * Pseudo action: rsc2_post_notify_stopped_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node1 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc2_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Stopped child_rsc2:1 (heartbeat:apache): Stopped diff --git a/pengine/test10/notify-3.summary b/pengine/test10/notify-3.summary index efc3a07874..03a3d4e3ce 100644 --- a/pengine/test10/notify-3.summary +++ b/pengine/test10/notify-3.summary @@ -1,61 +1,61 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node2 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Started node1 child_rsc2:1 (heartbeat:apache): Stopped Transition Summary: * Move child_rsc1:1 (Started node2 -> node1) - * Stop child_rsc2:0 (node1) + * Stop child_rsc2:0 (node1) due to node availability Executing cluster transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_pre_notify_stop_0 * Resource action: child_rsc2:0 monitor on node2 * Resource action: child_rsc2:1 monitor on node2 * Resource action: child_rsc2:1 monitor on node1 * Pseudo action: rsc2_pre_notify_stop_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_stop_0 * Pseudo action: rsc1_stop_0 * Resource action: child_rsc2:0 notify on node1 * Pseudo action: rsc2_confirmed-pre_notify_stop_0 * Pseudo action: rsc2_stop_0 * Resource action: child_rsc1:1 stop on node2 * Pseudo action: rsc1_stopped_0 * Resource action: child_rsc2:0 stop on node1 * Pseudo action: rsc2_stopped_0 * Pseudo action: rsc1_post_notify_stopped_0 * Pseudo action: rsc2_post_notify_stopped_0 * Resource action: child_rsc1:0 notify on node1 * Pseudo action: rsc1_confirmed-post_notify_stopped_0 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc2_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Resource action: child_rsc1:0 notify on node1 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:1 start on node1 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node1 * Pseudo action: rsc1_confirmed-post_notify_running_0 Revised cluster status: Online: [ node1 node2 ] Clone Set: rsc1 [child_rsc1] (unique) child_rsc1:0 (heartbeat:apache): Started node1 child_rsc1:1 (heartbeat:apache): Started node1 Clone Set: rsc2 [child_rsc2] (unique) child_rsc2:0 (heartbeat:apache): Stopped child_rsc2:1 (heartbeat:apache): Stopped diff --git a/pengine/test10/novell-252693.summary b/pengine/test10/novell-252693.summary index 23f0632d43..0682119d01 100644 --- a/pengine/test10/novell-252693.summary +++ b/pengine/test10/novell-252693.summary @@ -1,90 +1,90 @@ Current cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node1 node2 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node1 node2 ] Clone Set: evmscloneset [evmsclone] Started: [ node1 node2 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node1 node2 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node1 node2 ] sles10 (ocf::heartbeat:Xen): Started node1 Transition Summary: * Shutdown node1 - * Stop stonithclone:1 (node1) - * Stop evmsdclone:1 (node1) - * Stop evmsclone:1 (node1) - * Stop imagestoreclone:1 (node1) - * Stop configstoreclone:1 (node1) + * Stop stonithclone:1 (node1) due to node availability + * Stop evmsdclone:1 (node1) due to node availability + * Stop evmsclone:1 (node1) due to node availability + * Stop imagestoreclone:1 (node1) due to node availability + * Stop configstoreclone:1 (node1) due to node availability * Migrate sles10 (Started node1 -> node2) Executing cluster transition: * Pseudo action: stonithcloneset_stop_0 * Pseudo action: evmscloneset_pre_notify_stop_0 * Pseudo action: imagestorecloneset_pre_notify_stop_0 * Pseudo action: configstorecloneset_pre_notify_stop_0 * Resource action: sles10 migrate_to on node1 * Resource action: stonithclone:1 stop on node1 * Pseudo action: stonithcloneset_stopped_0 * Resource action: evmsclone:0 notify on node2 * Resource action: evmsclone:1 notify on node1 * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 * Resource action: imagestoreclone:0 notify on node2 * Resource action: imagestoreclone:0 notify on node1 * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: imagestorecloneset_stop_0 * Resource action: configstoreclone:0 notify on node2 * Resource action: configstoreclone:0 notify on node1 * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 * Pseudo action: configstorecloneset_stop_0 * Resource action: sles10 migrate_from on node2 * Resource action: sles10 stop on node1 * Resource action: imagestoreclone:0 stop on node1 * Pseudo action: imagestorecloneset_stopped_0 * Resource action: configstoreclone:0 stop on node1 * Pseudo action: configstorecloneset_stopped_0 * Pseudo action: sles10_start_0 * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Pseudo action: configstorecloneset_post_notify_stopped_0 * Resource action: sles10 monitor=10000 on node2 * Resource action: imagestoreclone:0 notify on node2 * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 * Resource action: configstoreclone:0 notify on node2 * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmscloneset_stop_0 * Resource action: evmsclone:1 stop on node1 * Pseudo action: evmscloneset_stopped_0 * Pseudo action: evmscloneset_post_notify_stopped_0 * Resource action: evmsclone:0 notify on node2 * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 * Pseudo action: evmsdcloneset_stop_0 * Resource action: evmsdclone:1 stop on node1 * Pseudo action: evmsdcloneset_stopped_0 * Cluster action: do_shutdown on node1 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] Clone Set: stonithcloneset [stonithclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmsdcloneset [evmsdclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: evmscloneset [evmsclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: imagestorecloneset [imagestoreclone] Started: [ node2 ] Stopped: [ node1 ] Clone Set: configstorecloneset [configstoreclone] Started: [ node2 ] Stopped: [ node1 ] sles10 (ocf::heartbeat:Xen): Started node2 diff --git a/pengine/test10/one-or-more-1.summary b/pengine/test10/one-or-more-1.summary index f26919c5f8..af31a4656f 100644 --- a/pengine/test10/one-or-more-1.summary +++ b/pengine/test10/one-or-more-1.summary @@ -1,31 +1,31 @@ 1 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped ( disabled ) B (ocf::pacemaker:Dummy): Stopped C (ocf::pacemaker:Dummy): Stopped D (ocf::pacemaker:Dummy): Stopped Transition Summary: - * Start B (fc16-builder - blocked) - * Start C (fc16-builder - blocked) - * Start D (fc16-builder - blocked) + * Start B (fc16-builder - blocked) due to unrunnable A start + * Start C (fc16-builder - blocked) due to unrunnable A start + * Start D (fc16-builder - blocked) due to unrunnable one-or-more:require-all-set-1 Executing cluster transition: * Resource action: A monitor on fc16-builder * Resource action: B monitor on fc16-builder * Resource action: C monitor on fc16-builder * Resource action: D monitor on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped ( disabled ) B (ocf::pacemaker:Dummy): Stopped C (ocf::pacemaker:Dummy): Stopped D (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/one-or-more-3.summary b/pengine/test10/one-or-more-3.summary index f0ee084109..abf4081c74 100644 --- a/pengine/test10/one-or-more-3.summary +++ b/pengine/test10/one-or-more-3.summary @@ -1,31 +1,31 @@ 2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Stopped ( disabled ) C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped Transition Summary: * Start A (fc16-builder) - * Start D (fc16-builder - blocked) + * Start D (fc16-builder - blocked) due to unrunnable one-or-more:require-all-set-1 Executing cluster transition: * Resource action: A monitor on fc16-builder * Resource action: B monitor on fc16-builder * Resource action: C monitor on fc16-builder * Resource action: D monitor on fc16-builder * Resource action: A start on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Stopped ( disabled ) C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/one-or-more-6.summary b/pengine/test10/one-or-more-6.summary index 18cab2e13a..665ac521da 100644 --- a/pengine/test10/one-or-more-6.summary +++ b/pengine/test10/one-or-more-6.summary @@ -1,25 +1,25 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) C (ocf::pacemaker:Dummy): Started fc16-builder Transition Summary: - * Stop B (fc16-builder) + * Stop B (fc16-builder) due to node availability Executing cluster transition: * Resource action: B stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Stopped ( disabled ) C (ocf::pacemaker:Dummy): Started fc16-builder diff --git a/pengine/test10/one-or-more-7.summary b/pengine/test10/one-or-more-7.summary index 4322db36c2..89b3416102 100644 --- a/pengine/test10/one-or-more-7.summary +++ b/pengine/test10/one-or-more-7.summary @@ -1,25 +1,25 @@ 1 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop C (fc16-builder) + * Stop C (fc16-builder) due to node availability Executing cluster transition: * Resource action: C stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Started fc16-builder B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/order-mandatory.summary b/pengine/test10/order-mandatory.summary index a1d2b4be54..021d8fe1f7 100644 --- a/pengine/test10/order-mandatory.summary +++ b/pengine/test10/order-mandatory.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) - * Stop rsc4 (Started node1) + * Restart rsc2 (Started node1) due to required rsc1 start + * Stop rsc4 (Started node1) due to unrunnable rsc3 start Executing cluster transition: * Resource action: rsc1 start on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Stopped diff --git a/pengine/test10/order-required.summary b/pengine/test10/order-required.summary index a1d2b4be54..021d8fe1f7 100644 --- a/pengine/test10/order-required.summary +++ b/pengine/test10/order-required.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) - * Stop rsc4 (Started node1) + * Restart rsc2 (Started node1) due to required rsc1 start + * Stop rsc4 (Started node1) due to unrunnable rsc3 start Executing cluster transition: * Resource action: rsc1 start on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc4 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Stopped diff --git a/pengine/test10/order-wrong-kind.summary b/pengine/test10/order-wrong-kind.summary index 8525b52100..c498cafd9b 100644 --- a/pengine/test10/order-wrong-kind.summary +++ b/pengine/test10/order-wrong-kind.summary @@ -1,28 +1,28 @@ Configuration validation is currently disabled. It is highly encouraged and prevents many common cluster issues. Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 Transition Summary: * Start rsc1 (node1) - * Restart rsc2 (Started node1) + * Restart rsc2 (Started node1) due to required rsc1 start Executing cluster transition: * Resource action: rsc1 start on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped rsc4 (heartbeat:apache): Started node1 diff --git a/pengine/test10/order7.summary b/pengine/test10/order7.summary index 74c4b606d7..6648bff2ea 100644 --- a/pengine/test10/order7.summary +++ b/pengine/test10/order7.summary @@ -1,36 +1,36 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Stopped rsc3 (heartbeat:apache): Stopped rscA (heartbeat:apache): FAILED node1 ( blocked ) rscB (heartbeat:apache): Stopped rscC (heartbeat:apache): Stopped Transition Summary: * Start rsc2 (node1) * Start rsc3 (node1) * Start rscB (node1) - * Start rscC (node1 - blocked) + * Start rscC (node1 - blocked) due to unrunnable rscA start Executing cluster transition: * Resource action: rsc2 monitor on node1 * Resource action: rsc3 monitor on node1 * Resource action: rscB monitor on node1 * Resource action: rscC monitor on node1 * Resource action: rsc2 start on node1 * Resource action: rsc3 start on node1 * Resource action: rscB start on node1 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Started node1 rscA (heartbeat:apache): FAILED node1 ( blocked ) rscB (heartbeat:apache): Started node1 rscC (heartbeat:apache): Stopped diff --git a/pengine/test10/order_constraint_stops_master.summary b/pengine/test10/order_constraint_stops_master.summary index f1125a12a5..d3d8891395 100644 --- a/pengine/test10/order_constraint_stops_master.summary +++ b/pengine/test10/order_constraint_stops_master.summary @@ -1,42 +1,42 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Masters: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder2 ( disabled ) Transition Summary: - * Stop NATIVE_RSC_A:0 (Master fc16-builder) - * Stop NATIVE_RSC_B (fc16-builder2) + * Stop NATIVE_RSC_A:0 (Master fc16-builder) due to required NATIVE_RSC_B start + * Stop NATIVE_RSC_B (fc16-builder2) due to node availability Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_demote_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_demote_0 * Pseudo action: MASTER_RSC_A_demote_0 * Resource action: NATIVE_RSC_A:0 demote on fc16-builder * Pseudo action: MASTER_RSC_A_demoted_0 * Pseudo action: MASTER_RSC_A_post_notify_demoted_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-post_notify_demoted_0 * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Resource action: NATIVE_RSC_B stop on fc16-builder2 * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/order_constraint_stops_slave.summary b/pengine/test10/order_constraint_stops_slave.summary index 843d3b61a2..896c9c3174 100644 --- a/pengine/test10/order_constraint_stops_slave.summary +++ b/pengine/test10/order_constraint_stops_slave.summary @@ -1,34 +1,34 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Slaves: [ fc16-builder ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Started fc16-builder ( disabled ) Transition Summary: - * Stop NATIVE_RSC_A:0 (Slave fc16-builder) - * Stop NATIVE_RSC_B (fc16-builder) + * Stop NATIVE_RSC_A:0 (Slave fc16-builder) due to required NATIVE_RSC_B start + * Stop NATIVE_RSC_B (fc16-builder) due to node availability Executing cluster transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Resource action: NATIVE_RSC_B stop on fc16-builder * Pseudo action: all_stopped Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A] Stopped: [ fc16-builder fc16-builder2 ] NATIVE_RSC_B (ocf::pacemaker:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/ordered-set-basic-startup.summary b/pengine/test10/ordered-set-basic-startup.summary index 182dd08616..cfa8f8b3de 100644 --- a/pengine/test10/ordered-set-basic-startup.summary +++ b/pengine/test10/ordered-set-basic-startup.summary @@ -1,39 +1,39 @@ 2 of 6 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Stopped C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped ( disabled ) E (ocf::pacemaker:Dummy): Stopped F (ocf::pacemaker:Dummy): Stopped Transition Summary: - * Start A (fc16-builder - blocked) + * Start A (fc16-builder - blocked) due to unrunnable C start * Start B (fc16-builder) - * Start E (fc16-builder - blocked) - * Start F (fc16-builder - blocked) + * Start E (fc16-builder - blocked) due to unrunnable A start + * Start F (fc16-builder - blocked) due to unrunnable D start Executing cluster transition: * Resource action: A monitor on fc16-builder * Resource action: B monitor on fc16-builder * Resource action: C monitor on fc16-builder * Resource action: D monitor on fc16-builder * Resource action: E monitor on fc16-builder * Resource action: F monitor on fc16-builder * Resource action: B start on fc16-builder Revised cluster status: Online: [ fc16-builder ] OFFLINE: [ fc16-builder2 ] A (ocf::pacemaker:Dummy): Stopped B (ocf::pacemaker:Dummy): Started fc16-builder C (ocf::pacemaker:Dummy): Stopped ( disabled ) D (ocf::pacemaker:Dummy): Stopped ( disabled ) E (ocf::pacemaker:Dummy): Stopped F (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ordered-set-natural.summary b/pengine/test10/ordered-set-natural.summary index a3ba4530c9..1888d66e31 100644 --- a/pengine/test10/ordered-set-natural.summary +++ b/pengine/test10/ordered-set-natural.summary @@ -1,52 +1,52 @@ 4 of 15 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ node1 node2 ] Resource Group: rgroup dummy1-1 (ocf::heartbeat:Dummy): Stopped dummy1-2 (ocf::heartbeat:Dummy): Stopped dummy1-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy1-4 (ocf::heartbeat:Dummy): Stopped dummy1-5 (ocf::heartbeat:Dummy): Stopped dummy2-1 (ocf::heartbeat:Dummy): Stopped dummy2-2 (ocf::heartbeat:Dummy): Stopped dummy2-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-1 (ocf::heartbeat:Dummy): Stopped dummy3-2 (ocf::heartbeat:Dummy): Stopped dummy3-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-4 (ocf::heartbeat:Dummy): Stopped dummy3-5 (ocf::heartbeat:Dummy): Stopped dummy2-4 (ocf::heartbeat:Dummy): Stopped dummy2-5 (ocf::heartbeat:Dummy): Stopped Transition Summary: - * Start dummy1-1 (node1 - blocked) - * Start dummy1-2 (node1 - blocked) - * Start dummy2-1 (node2 - blocked) - * Start dummy2-2 (node2 - blocked) - * Start dummy3-4 (node1 - blocked) - * Start dummy3-5 (node1 - blocked) + * Start dummy1-1 (node1 - blocked) due to no quorum + * Start dummy1-2 (node1 - blocked) due to no quorum + * Start dummy2-1 (node2 - blocked) due to no quorum + * Start dummy2-2 (node2 - blocked) due to no quorum + * Start dummy3-4 (node1 - blocked) due to no quorum + * Start dummy3-5 (node1 - blocked) due to no quorum Executing cluster transition: Revised cluster status: Online: [ node1 node2 ] Resource Group: rgroup dummy1-1 (ocf::heartbeat:Dummy): Stopped dummy1-2 (ocf::heartbeat:Dummy): Stopped dummy1-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy1-4 (ocf::heartbeat:Dummy): Stopped dummy1-5 (ocf::heartbeat:Dummy): Stopped dummy2-1 (ocf::heartbeat:Dummy): Stopped dummy2-2 (ocf::heartbeat:Dummy): Stopped dummy2-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-1 (ocf::heartbeat:Dummy): Stopped dummy3-2 (ocf::heartbeat:Dummy): Stopped dummy3-3 (ocf::heartbeat:Dummy): Stopped ( disabled ) dummy3-4 (ocf::heartbeat:Dummy): Stopped dummy3-5 (ocf::heartbeat:Dummy): Stopped dummy2-4 (ocf::heartbeat:Dummy): Stopped dummy2-5 (ocf::heartbeat:Dummy): Stopped diff --git a/pengine/test10/probe-2.summary b/pengine/test10/probe-2.summary index e8a2269ed4..05588c845c 100644 --- a/pengine/test10/probe-2.summary +++ b/pengine/test10/probe-2.summary @@ -1,162 +1,162 @@ Current cluster status: Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby Online: [ wc01 ] Resource Group: group_www_data fs_www_data (ocf::heartbeat:Filesystem): Started wc01 nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 intip_nfs (ocf::heartbeat:IPaddr2): Started wc01 Master/Slave Set: ms_drbd_mysql [drbd_mysql] Masters: [ wc02 ] Slaves: [ wc01 ] Resource Group: group_mysql fs_mysql (ocf::heartbeat:Filesystem): Started wc02 intip_sql (ocf::heartbeat:IPaddr2): Started wc02 mysql-server (ocf::heartbeat:mysql): Started wc02 Master/Slave Set: ms_drbd_www [drbd_www] Masters: [ wc01 ] Slaves: [ wc02 ] Clone Set: clone_nfs-common [group_nfs-common] Started: [ wc01 wc02 ] Clone Set: clone_mysql-proxy [group_mysql-proxy] Started: [ wc01 wc02 ] Clone Set: clone_webservice [group_webservice] Started: [ wc01 wc02 ] Resource Group: group_ftpd extip_ftp (ocf::heartbeat:IPaddr2): Started wc01 pure-ftpd (ocf::heartbeat:Pure-FTPd): Started wc01 Clone Set: DoFencing [stonith_rackpdu] (unique) stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 stonith_rackpdu:1 (stonith:external/rackpdu): Started wc02 Transition Summary: * Promote drbd_mysql:0 (Slave -> Master wc01) * Demote drbd_mysql:1 (Master -> Stopped wc02) * Move fs_mysql (Started wc02 -> wc01) * Move intip_sql (Started wc02 -> wc01) * Move mysql-server (Started wc02 -> wc01) - * Stop drbd_www:1 (wc02) - * Stop nfs-common:1 (wc02) - * Stop mysql-proxy:1 (wc02) - * Stop fs_www:1 (wc02) - * Stop apache2:1 (wc02) + * Stop drbd_www:1 (wc02) due to node availability + * Stop nfs-common:1 (wc02) due to node availability + * Stop mysql-proxy:1 (wc02) due to node availability + * Stop fs_www:1 (wc02) due to node availability + * Stop apache2:1 (wc02) due to node availability * Restart stonith_rackpdu:0 (Started wc01) - * Stop stonith_rackpdu:1 (wc02) + * Stop stonith_rackpdu:1 (wc02) due to node availability Executing cluster transition: * Resource action: drbd_mysql:0 cancel=10000 on wc01 * Pseudo action: ms_drbd_mysql_pre_notify_demote_0 * Pseudo action: group_mysql_stop_0 * Resource action: mysql-server stop on wc02 * Pseudo action: ms_drbd_www_pre_notify_stop_0 * Pseudo action: clone_mysql-proxy_stop_0 * Pseudo action: clone_webservice_stop_0 * Pseudo action: DoFencing_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_demote_0 * Resource action: intip_sql stop on wc02 * Resource action: drbd_www:0 notify on wc01 * Resource action: drbd_www:1 notify on wc02 * Pseudo action: ms_drbd_www_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_www_stop_0 * Pseudo action: group_mysql-proxy:1_stop_0 * Resource action: mysql-proxy:1 stop on wc02 * Pseudo action: group_webservice:1_stop_0 * Resource action: apache2:1 stop on wc02 * Resource action: stonith_rackpdu:0 stop on wc01 * Resource action: stonith_rackpdu:1 stop on wc02 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Resource action: fs_mysql stop on wc02 * Resource action: drbd_www:1 stop on wc02 * Pseudo action: ms_drbd_www_stopped_0 * Pseudo action: group_mysql-proxy:1_stopped_0 * Pseudo action: clone_mysql-proxy_stopped_0 * Resource action: fs_www:1 stop on wc02 * Resource action: stonith_rackpdu:0 start on wc01 * Pseudo action: DoFencing_running_0 * Pseudo action: group_mysql_stopped_0 * Pseudo action: ms_drbd_www_post_notify_stopped_0 * Pseudo action: group_webservice:1_stopped_0 * Pseudo action: clone_webservice_stopped_0 * Resource action: stonith_rackpdu:0 monitor=5000 on wc01 * Pseudo action: ms_drbd_mysql_demote_0 * Resource action: drbd_www:0 notify on wc01 * Pseudo action: ms_drbd_www_confirmed-post_notify_stopped_0 * Pseudo action: clone_nfs-common_stop_0 * Resource action: drbd_mysql:1 demote on wc02 * Pseudo action: ms_drbd_mysql_demoted_0 * Pseudo action: group_nfs-common:1_stop_0 * Resource action: nfs-common:1 stop on wc02 * Pseudo action: ms_drbd_mysql_post_notify_demoted_0 * Pseudo action: group_nfs-common:1_stopped_0 * Pseudo action: clone_nfs-common_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_mysql_pre_notify_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_mysql_stop_0 * Resource action: drbd_mysql:1 stop on wc02 * Pseudo action: ms_drbd_mysql_stopped_0 * Pseudo action: ms_drbd_mysql_post_notify_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_stopped_0 * Pseudo action: all_stopped * Pseudo action: ms_drbd_mysql_pre_notify_promote_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_mysql_promote_0 * Resource action: drbd_mysql:0 promote on wc01 * Pseudo action: ms_drbd_mysql_promoted_0 * Pseudo action: ms_drbd_mysql_post_notify_promoted_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_promoted_0 * Pseudo action: group_mysql_start_0 * Resource action: fs_mysql start on wc01 * Resource action: intip_sql start on wc01 * Resource action: mysql-server start on wc01 * Resource action: drbd_mysql:0 monitor=5000 on wc01 * Pseudo action: group_mysql_running_0 * Resource action: fs_mysql monitor=30000 on wc01 * Resource action: intip_sql monitor=30000 on wc01 * Resource action: mysql-server monitor=30000 on wc01 Revised cluster status: Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby Online: [ wc01 ] Resource Group: group_www_data fs_www_data (ocf::heartbeat:Filesystem): Started wc01 nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 intip_nfs (ocf::heartbeat:IPaddr2): Started wc01 Master/Slave Set: ms_drbd_mysql [drbd_mysql] Masters: [ wc01 ] Stopped: [ wc02 ] Resource Group: group_mysql fs_mysql (ocf::heartbeat:Filesystem): Started wc01 intip_sql (ocf::heartbeat:IPaddr2): Started wc01 mysql-server (ocf::heartbeat:mysql): Started wc01 Master/Slave Set: ms_drbd_www [drbd_www] Masters: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_nfs-common [group_nfs-common] Started: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_mysql-proxy [group_mysql-proxy] Started: [ wc01 ] Stopped: [ wc02 ] Clone Set: clone_webservice [group_webservice] Started: [ wc01 ] Stopped: [ wc02 ] Resource Group: group_ftpd extip_ftp (ocf::heartbeat:IPaddr2): Started wc01 pure-ftpd (ocf::heartbeat:Pure-FTPd): Started wc01 Clone Set: DoFencing [stonith_rackpdu] (unique) stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 stonith_rackpdu:1 (stonith:external/rackpdu): Stopped diff --git a/pengine/test10/quorum-2.summary b/pengine/test10/quorum-2.summary index 9854fda7d3..1dce96cebf 100644 --- a/pengine/test10/quorum-2.summary +++ b/pengine/test10/quorum-2.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped Transition Summary: * Move rsc2 (Started node1 -> node2) - * Start rsc3 (node1 - blocked) + * Start rsc3 (node1 - blocked) due to quorum freeze Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node2 Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node2 rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/quorum-3.summary b/pengine/test10/quorum-3.summary index 2ff7561fd8..e8e4bf3662 100644 --- a/pengine/test10/quorum-3.summary +++ b/pengine/test10/quorum-3.summary @@ -1,29 +1,29 @@ Current cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Started node1 rsc2 (heartbeat:apache): Started node1 rsc3 (heartbeat:apache): Stopped Transition Summary: - * Stop rsc1 (Started node1) - * Stop rsc2 (Started node1) - * Start rsc3 (node1 - blocked) + * Stop rsc1 (Started node1) due to no quorum + * Stop rsc2 (Started node1) due to no quorum + * Start rsc3 (node1 - blocked) due to no quorum Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/rec-node-10.summary b/pengine/test10/rec-node-10.summary index 6daec5cdb0..503dd0d13e 100644 --- a/pengine/test10/rec-node-10.summary +++ b/pengine/test10/rec-node-10.summary @@ -1,27 +1,27 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) Transition Summary: - * Start stonith-1 (node2 - blocked) - * Stop rsc1 (Started node1 - blocked) - * Stop rsc2 (Started node1 - blocked) + * Start stonith-1 (node2 - blocked) due to no quorum + * Stop rsc1 (Started node1 - blocked) due to no quorum + * Stop rsc2 (Started node1 - blocked) due to no quorum Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 Revised cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) diff --git a/pengine/test10/rec-node-13.summary b/pengine/test10/rec-node-13.summary index 9873757ebf..de2fa28a26 100644 --- a/pengine/test10/rec-node-13.summary +++ b/pengine/test10/rec-node-13.summary @@ -1,80 +1,80 @@ Current cluster status: Node c001n04 (9e080e6d-7a25-4dac-be89-f6f4f128623d): UNCLEAN (online) Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04 ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 Transition Summary: * Fence (reboot) c001n04 'ocf_msdummy:6 failed there' - * Stop ocf_msdummy:6 (c001n04) + * Stop ocf_msdummy:6 (c001n04) due to node availability Executing cluster transition: * Fencing c001n04 (reboot) * Pseudo action: master_rsc_1_stop_0 * Pseudo action: stonith_complete * Pseudo action: ocf_msdummy:6_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ c001n02 c001n06 c001n07 ] OFFLINE: [ c001n03 c001n04 c001n05 ] Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n06 c001n07 ] Stopped: [ c001n03 c001n04 c001n05 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 diff --git a/pengine/test10/rec-node-8.summary b/pengine/test10/rec-node-8.summary index 4cd6724cee..1a7232e605 100644 --- a/pengine/test10/rec-node-8.summary +++ b/pengine/test10/rec-node-8.summary @@ -1,31 +1,31 @@ Current cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) rsc3 (heartbeat:apache): Stopped Transition Summary: - * Start stonith-1 (node2 - blocked) + * Start stonith-1 (node2 - blocked) due to quorum freeze * Stop rsc1 (Started node1 - blocked) * Stop rsc2 (Started node1 - blocked) - * Start rsc3 (node2 - blocked) + * Start rsc3 (node2 - blocked) due to quorum freeze Executing cluster transition: * Resource action: stonith-1 monitor on node2 * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 * Resource action: rsc3 monitor on node2 Revised cluster status: Node node1 (uuid1): UNCLEAN (offline) Online: [ node2 ] stonith-1 (stonith:dummy): Stopped rsc1 (heartbeat:apache): Started node1 (UNCLEAN) rsc2 (heartbeat:apache): Started node1 (UNCLEAN) rsc3 (heartbeat:apache): Stopped diff --git a/pengine/test10/rec-node-9.summary b/pengine/test10/rec-node-9.summary index 09ca805852..7f6d8aea7e 100644 --- a/pengine/test10/rec-node-9.summary +++ b/pengine/test10/rec-node-9.summary @@ -1,23 +1,23 @@ Current cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped Transition Summary: - * Start rsc1 (node2 - blocked) - * Start rsc2 (node2 - blocked) + * Start rsc1 (node2 - blocked) due to no quorum + * Start rsc2 (node2 - blocked) due to no quorum Executing cluster transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc2 monitor on node2 Revised cluster status: Online: [ node2 ] OFFLINE: [ node1 ] rsc1 (heartbeat:apache): Stopped rsc2 (heartbeat:apache): Stopped diff --git a/pengine/test10/rec-rsc-9.summary b/pengine/test10/rec-rsc-9.summary index 1c06087a2e..02a2fe5f43 100644 --- a/pengine/test10/rec-rsc-9.summary +++ b/pengine/test10/rec-rsc-9.summary @@ -1,41 +1,41 @@ Current cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 Resource Group: foo rsc2 (heartbeat:apache): Started node1 Resource Group: bar rsc3 (heartbeat:apache): FAILED node1 Transition Summary: - * Restart rsc1 (Started node1) - * Restart rsc2 (Started node1) + * Restart rsc1 (Started node1) due to required bar running + * Restart rsc2 (Started node1) due to required bar running * Recover rsc3 (Started node1) Executing cluster transition: * Resource action: rsc1 stop on node1 * Pseudo action: foo_stop_0 * Resource action: rsc2 stop on node1 * Pseudo action: foo_stopped_0 * Pseudo action: bar_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: all_stopped * Pseudo action: bar_stopped_0 * Pseudo action: bar_start_0 * Resource action: rsc3 start on node1 * Pseudo action: bar_running_0 * Resource action: rsc1 start on node1 * Pseudo action: foo_start_0 * Resource action: rsc2 start on node1 * Pseudo action: foo_running_0 Revised cluster status: Online: [ node1 ] rsc1 (heartbeat:apache): Started node1 Resource Group: foo rsc2 (heartbeat:apache): Started node1 Resource Group: bar rsc3 (heartbeat:apache): Started node1 diff --git a/pengine/test10/remote-disable.summary b/pengine/test10/remote-disable.summary index 69b4b62ede..2ec1748985 100644 --- a/pengine/test10/remote-disable.summary +++ b/pengine/test10/remote-disable.summary @@ -1,33 +1,33 @@ 2 of 6 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOnline: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Started 18builder ( disabled ) FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Started remote1 FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 Transition Summary: - * Stop remote1 (18builder) + * Stop remote1 (18builder) due to node availability * Stop FAKE2 (remote1) Executing cluster transition: * Resource action: FAKE2 stop on remote1 * Resource action: remote1 stop on 18builder * Pseudo action: all_stopped Revised cluster status: Online: [ 18builder 18node1 18node2 ] RemoteOFFLINE: [ remote1 ] shooter (stonith:fence_xvm): Started 18node1 remote1 (ocf::pacemaker:remote): Stopped ( disabled ) FAKE1 (ocf::heartbeat:Dummy): Started 18node2 FAKE2 (ocf::heartbeat:Dummy): Stopped FAKE3 (ocf::heartbeat:Dummy): Started 18builder FAKE4 (ocf::heartbeat:Dummy): Started 18node1 diff --git a/pengine/test10/remote-orphaned.summary b/pengine/test10/remote-orphaned.summary index 84762b218a..2b9add7618 100644 --- a/pengine/test10/remote-orphaned.summary +++ b/pengine/test10/remote-orphaned.summary @@ -1,68 +1,68 @@ Current cluster status: Online: [ 18node1 18node3 ] OFFLINE: [ 18node2 ] RemoteOnline: [ remote1 ] Fencing (stonith:fence_xvm): Started 18node3 FencingPass (stonith:fence_dummy): Started 18node1 FencingFail (stonith:fence_dummy): Started 18node3 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started remote1 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node3 remote1 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node3 ] Stopped: [ 18node2 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 remote1 (ocf::pacemaker:remote): ORPHANED Started 18node1 Transition Summary: * Move rsc_18node2 (Started remote1 -> 18node1) - * Stop ping-1:2 (remote1) - * Stop remote1 (18node1) + * Stop ping-1:2 (remote1) due to node availability + * Stop remote1 (18node1) due to node availability Executing cluster transition: * Resource action: rsc_18node2 stop on remote1 * Pseudo action: Connectivity_stop_0 * Resource action: rsc_18node2 start on 18node1 * Resource action: ping-1 stop on remote1 * Pseudo action: Connectivity_stopped_0 * Resource action: remote1 stop on 18node1 * Resource action: remote1 delete on 18node3 * Resource action: remote1 delete on 18node1 * Pseudo action: all_stopped * Resource action: rsc_18node2 monitor=5000 on 18node1 Revised cluster status: Online: [ 18node1 18node3 ] OFFLINE: [ 18node2 ] RemoteOFFLINE: [ remote1 ] Fencing (stonith:fence_xvm): Started 18node3 FencingPass (stonith:fence_dummy): Started 18node1 FencingFail (stonith:fence_dummy): Started 18node3 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node3 ] Stopped: [ 18node2 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node3 ] Stopped: [ 18node2 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 diff --git a/pengine/test10/remote-recover-all.summary b/pengine/test10/remote-recover-all.summary index 35f3d58e9c..a8dac38be0 100644 --- a/pengine/test10/remote-recover-all.summary +++ b/pengine/test10/remote-recover-all.summary @@ -1,154 +1,154 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) galera-2 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Stop messaging-1 (controller-1) + * Stop messaging-1 (controller-1) due to node availability * Move galera-0 (Started controller-1 -> controller-2) - * Stop galera-2 (controller-1) - * Stop rabbitmq:2 (messaging-1) + * Stop galera-2 (controller-1) due to node availability + * Stop rabbitmq:2 (messaging-1) due to node availability * Demote galera:1 (Master -> Stopped galera-2) - * Stop redis:0 (controller-1) + * Stop redis:0 (controller-1) due to node availability * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) - * Stop haproxy:0 (controller-1) + * Stop haproxy:0 (controller-1) due to node availability * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: galera-master_demote_0 * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Pseudo action: galera_demote_0 * Pseudo action: galera-master_demoted_0 * Pseudo action: galera-master_stop_0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Pseudo action: galera_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: galera-master_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: galera-0 monitor=20000 on controller-2 * Pseudo action: galera-2_stop_0 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: messaging-1_stop_0 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-connection.summary b/pengine/test10/remote-recover-connection.summary index e0a9d0e503..7b5b5fc378 100644 --- a/pengine/test10/remote-recover-connection.summary +++ b/pengine/test10/remote-recover-connection.summary @@ -1,140 +1,140 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Move messaging-1 (Started controller-1 -> controller-2) * Move galera-0 (Started controller-1 -> controller-2) * Move galera-2 (Started controller-1 -> controller-2) - * Stop redis:0 (controller-1) + * Stop redis:0 (controller-1) due to node availability * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) - * Stop haproxy:0 (controller-1) + * Stop haproxy:0 (controller-1) due to node availability * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Pseudo action: stonith_complete * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-2 messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-2 Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-no-resources.summary b/pengine/test10/remote-recover-no-resources.summary index 19b5181559..4383e8123f 100644 --- a/pengine/test10/remote-recover-no-resources.summary +++ b/pengine/test10/remote-recover-no-resources.summary @@ -1,145 +1,145 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Stop messaging-1 (controller-1) + * Stop messaging-1 (controller-1) due to node availability * Move galera-0 (Started controller-1 -> controller-2) * Stop galera-2 (controller-1) - * Stop rabbitmq:2 (messaging-1) - * Stop redis:0 (controller-1) + * Stop rabbitmq:2 (messaging-1) due to node availability + * Stop redis:0 (controller-1) due to node availability * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) - * Stop haproxy:0 (controller-1) + * Stop haproxy:0 (controller-1) due to node availability * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Pseudo action: messaging-1_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recover-unknown.summary b/pengine/test10/remote-recover-unknown.summary index 772a699a20..3ffb5f4b3d 100644 --- a/pengine/test10/remote-recover-unknown.summary +++ b/pengine/test10/remote-recover-unknown.summary @@ -1,147 +1,147 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) galera-2 'resources are in an unknown state and the connection is unrecoverable' * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Stop messaging-1 (controller-1) + * Stop messaging-1 (controller-1) due to node availability * Move galera-0 (Started controller-1 -> controller-2) - * Stop galera-2 (controller-1) - * Stop rabbitmq:2 (messaging-1) - * Stop redis:0 (controller-1) + * Stop galera-2 (controller-1) due to node availability + * Stop rabbitmq:2 (messaging-1) due to node availability + * Stop redis:0 (controller-1) due to node availability * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) - * Stop haproxy:0 (controller-1) + * Stop haproxy:0 (controller-1) due to node availability * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Fencing messaging-1 (reboot) * Pseudo action: stonith_complete * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Pseudo action: messaging-1_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] RemoteOFFLINE: [ galera-2 messaging-1 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Stopped messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Stopped Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 ] Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/remote-recovery.summary b/pengine/test10/remote-recovery.summary index e0a9d0e503..7b5b5fc378 100644 --- a/pengine/test10/remote-recovery.summary +++ b/pengine/test10/remote-recovery.summary @@ -1,140 +1,140 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: Node controller-1 (2): UNCLEAN (offline) Online: [ controller-0 controller-2 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN) Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN) Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN) Clone Set: haproxy-clone [haproxy] haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) Started: [ controller-0 controller-2 ] Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Move messaging-1 (Started controller-1 -> controller-2) * Move galera-0 (Started controller-1 -> controller-2) * Move galera-2 (Started controller-1 -> controller-2) - * Stop redis:0 (controller-1) + * Stop redis:0 (controller-1) due to node availability * Move ip-172.17.1.14 (Started controller-1 -> controller-2) * Move ip-172.17.1.17 (Started controller-1 -> controller-2) * Move ip-172.17.4.11 (Started controller-1 -> controller-2) - * Stop haproxy:0 (controller-1) + * Stop haproxy:0 (controller-1) due to node availability * Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0) * Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0) * Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2) Executing cluster transition: * Pseudo action: redis-master_pre_notify_stop_0 * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Pseudo action: stonith_complete * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 * Pseudo action: all_stopped Using the original execution date of: 2017-05-03 13:33:24Z Revised cluster status: Online: [ controller-0 controller-2 ] OFFLINE: [ controller-1 ] RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] messaging-0 (ocf::pacemaker:remote): Started controller-0 messaging-1 (ocf::pacemaker:remote): Started controller-2 messaging-2 (ocf::pacemaker:remote): Started controller-0 galera-0 (ocf::pacemaker:remote): Started controller-2 galera-1 (ocf::pacemaker:remote): Started controller-0 galera-2 (ocf::pacemaker:remote): Started controller-2 Clone Set: rabbitmq-clone [rabbitmq] Started: [ messaging-0 messaging-1 messaging-2 ] Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] Master/Slave Set: galera-master [galera] Masters: [ galera-0 galera-1 galera-2 ] Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] Master/Slave Set: redis-master [redis] Masters: [ controller-0 ] Slaves: [ controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0 ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2 Clone Set: haproxy-clone [haproxy] Started: [ controller-0 controller-2 ] Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/pengine/test10/rsc-sets-clone.summary b/pengine/test10/rsc-sets-clone.summary index 7ee23a26a4..70570e6803 100644 --- a/pengine/test10/rsc-sets-clone.summary +++ b/pengine/test10/rsc-sets-clone.summary @@ -1,37 +1,37 @@ Current cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node1 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone-rsc [rsc] Started: [ node1 node2 ] Transition Summary: * Move rsc1 (Started node1 -> node2) * Move rsc3 (Started node1 -> node2) - * Stop rsc:0 (node1) + * Stop rsc:0 (node1) due to node availability Executing cluster transition: * Resource action: rsc1 stop on node1 * Resource action: rsc3 stop on node1 * Pseudo action: clone-rsc_stop_0 * Resource action: rsc1 start on node2 * Resource action: rsc3 start on node2 * Resource action: rsc:0 stop on node1 * Pseudo action: clone-rsc_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node node1: standby Online: [ node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 rsc3 (ocf::pacemaker:Dummy): Started node2 Clone Set: clone-rsc [rsc] Started: [ node2 ] Stopped: [ node1 ] diff --git a/pengine/test10/stop-failure-no-quorum.summary b/pengine/test10/stop-failure-no-quorum.summary index 4e337aba19..75945b17a7 100644 --- a/pengine/test10/stop-failure-no-quorum.summary +++ b/pengine/test10/stop-failure-no-quorum.summary @@ -1,46 +1,46 @@ Current cluster status: Node pcmk-2 (102): UNCLEAN (online) Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-2 clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked ) Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped Transition Summary: * Fence (reboot) pcmk-2 'clvm:0 failed there' - * Start dlm:0 (pcmk-1 - blocked) - * Stop clvm:0 (pcmk-2) - * Start clvm:2 (pcmk-1 - blocked) - * Start ClusterIP (pcmk-1 - blocked) - * Start Fencing (pcmk-1 - blocked) + * Start dlm:0 (pcmk-1 - blocked) due to no quorum + * Stop clvm:0 (pcmk-2) due to node availability + * Start clvm:2 (pcmk-1 - blocked) due to no quorum + * Start ClusterIP (pcmk-1 - blocked) due to no quorum + * Start Fencing (pcmk-1 - blocked) due to no quorum Executing cluster transition: * Fencing pcmk-2 (reboot) * Pseudo action: clvm-clone_stop_0 * Pseudo action: stonith_complete * Pseudo action: clvm_stop_0 * Pseudo action: clvm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] OFFLINE: [ pcmk-2 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked ) Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/stop-failure-with-fencing.summary b/pengine/test10/stop-failure-with-fencing.summary index cdb39b04c7..09680c8040 100644 --- a/pengine/test10/stop-failure-with-fencing.summary +++ b/pengine/test10/stop-failure-with-fencing.summary @@ -1,45 +1,45 @@ Current cluster status: Node pcmk-2 (102): UNCLEAN (online) Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] clvm (lsb:clvmd): FAILED pcmk-2 Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped Transition Summary: * Fence (reboot) pcmk-2 'clvm:0 failed there' - * Start dlm:0 (pcmk-1 - blocked) - * Stop clvm:0 (pcmk-2) - * Start clvm:1 (pcmk-1 - blocked) - * Start ClusterIP (pcmk-1 - blocked) - * Start Fencing (pcmk-1 - blocked) + * Start dlm:0 (pcmk-1 - blocked) due to no quorum + * Stop clvm:0 (pcmk-2) due to node availability + * Start clvm:1 (pcmk-1 - blocked) due to no quorum + * Start ClusterIP (pcmk-1 - blocked) due to no quorum + * Start Fencing (pcmk-1 - blocked) due to no quorum Executing cluster transition: * Resource action: Fencing monitor on pcmk-1 * Fencing pcmk-2 (reboot) * Pseudo action: clvm-clone_stop_0 * Pseudo action: stonith_complete * Pseudo action: clvm_stop_0 * Pseudo action: clvm-clone_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node pcmk-3 (103): UNCLEAN (offline) Node pcmk-4 (104): UNCLEAN (offline) Online: [ pcmk-1 ] OFFLINE: [ pcmk-2 ] Clone Set: dlm-clone [dlm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] Clone Set: clvm-clone [clvm] Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] ClusterIP (ocf::heartbeat:IPaddr2): Stopped Fencing (stonith:fence_xvm): Stopped diff --git a/pengine/test10/ticket-clone-14.summary b/pengine/test10/ticket-clone-14.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-14.summary +++ b/pengine/test10/ticket-clone-14.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-15.summary b/pengine/test10/ticket-clone-15.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-15.summary +++ b/pengine/test10/ticket-clone-15.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-17.summary b/pengine/test10/ticket-clone-17.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-17.summary +++ b/pengine/test10/ticket-clone-17.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-18.summary b/pengine/test10/ticket-clone-18.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-18.summary +++ b/pengine/test10/ticket-clone-18.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-20.summary b/pengine/test10/ticket-clone-20.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-20.summary +++ b/pengine/test10/ticket-clone-20.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-21.summary b/pengine/test10/ticket-clone-21.summary index ce5335f59b..6b18cfabc3 100644 --- a/pengine/test10/ticket-clone-21.summary +++ b/pengine/test10/ticket-clone-21.summary @@ -1,33 +1,33 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: * Fence (reboot) node2 'deadman ticket was lost' * Fence (reboot) node1 'deadman ticket was lost' * Stop rsc_stonith (node1) - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Fencing node1 (reboot) * Pseudo action: rsc_stonith_stop_0 * Fencing node2 (reboot) * Pseudo action: clone1_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1:1_stop_0 * Pseudo action: rsc1:0_stop_0 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ node1 node2 ] rsc_stonith (stonith:null): Stopped Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-23.summary b/pengine/test10/ticket-clone-23.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-23.summary +++ b/pengine/test10/ticket-clone-23.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-3.summary b/pengine/test10/ticket-clone-3.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-3.summary +++ b/pengine/test10/ticket-clone-3.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-6.summary b/pengine/test10/ticket-clone-6.summary index c8076560d8..ceb76bf92f 100644 --- a/pengine/test10/ticket-clone-6.summary +++ b/pengine/test10/ticket-clone-6.summary @@ -1,26 +1,26 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: clone1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-clone-9.summary b/pengine/test10/ticket-clone-9.summary index ce5335f59b..6b18cfabc3 100644 --- a/pengine/test10/ticket-clone-9.summary +++ b/pengine/test10/ticket-clone-9.summary @@ -1,33 +1,33 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Clone Set: clone1 [rsc1] Started: [ node1 node2 ] Transition Summary: * Fence (reboot) node2 'deadman ticket was lost' * Fence (reboot) node1 'deadman ticket was lost' * Stop rsc_stonith (node1) - * Stop rsc1:0 (node1) - * Stop rsc1:1 (node2) + * Stop rsc1:0 (node1) due to node availability + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Fencing node1 (reboot) * Pseudo action: rsc_stonith_stop_0 * Fencing node2 (reboot) * Pseudo action: clone1_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1:1_stop_0 * Pseudo action: rsc1:0_stop_0 * Pseudo action: clone1_stopped_0 * Pseudo action: all_stopped Revised cluster status: OFFLINE: [ node1 node2 ] rsc_stonith (stonith:null): Stopped Clone Set: clone1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-group-14.summary b/pengine/test10/ticket-group-14.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-14.summary +++ b/pengine/test10/ticket-group-14.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-15.summary b/pengine/test10/ticket-group-15.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-15.summary +++ b/pengine/test10/ticket-group-15.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-17.summary b/pengine/test10/ticket-group-17.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-17.summary +++ b/pengine/test10/ticket-group-17.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-18.summary b/pengine/test10/ticket-group-18.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-18.summary +++ b/pengine/test10/ticket-group-18.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-20.summary b/pengine/test10/ticket-group-20.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-20.summary +++ b/pengine/test10/ticket-group-20.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-21.summary b/pengine/test10/ticket-group-21.summary index 385e44f804..e397a427c6 100644 --- a/pengine/test10/ticket-group-21.summary +++ b/pengine/test10/ticket-group-21.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: * Fence (reboot) node2 'deadman ticket was lost' - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Fencing node2 (reboot) * Pseudo action: group1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-23.summary b/pengine/test10/ticket-group-23.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-23.summary +++ b/pengine/test10/ticket-group-23.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-3.summary b/pengine/test10/ticket-group-3.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-3.summary +++ b/pengine/test10/ticket-group-3.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-6.summary b/pengine/test10/ticket-group-6.summary index c0aaf74ddc..824bb707df 100644 --- a/pengine/test10/ticket-group-6.summary +++ b/pengine/test10/ticket-group-6.summary @@ -1,28 +1,28 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 * Resource action: rsc2 stop on node2 * Resource action: rsc1 stop on node2 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-group-9.summary b/pengine/test10/ticket-group-9.summary index 385e44f804..e397a427c6 100644 --- a/pengine/test10/ticket-group-9.summary +++ b/pengine/test10/ticket-group-9.summary @@ -1,32 +1,32 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started node2 rsc2 (ocf::pacemaker:Dummy): Started node2 Transition Summary: * Fence (reboot) node2 'deadman ticket was lost' - * Stop rsc1 (node2) - * Stop rsc2 (node2) + * Stop rsc1 (node2) due to node availability + * Stop rsc2 (node2) due to node availability Executing cluster transition: * Fencing node2 (reboot) * Pseudo action: group1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: stonith_complete * Pseudo action: rsc1_stop_0 * Pseudo action: all_stopped * Pseudo action: group1_stopped_0 Revised cluster status: Online: [ node1 ] OFFLINE: [ node2 ] rsc_stonith (stonith:null): Started node1 Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/ticket-master-14.summary b/pengine/test10/ticket-master-14.summary index 9f7b89d239..9a42d7882a 100644 --- a/pengine/test10/ticket-master-14.summary +++ b/pengine/test10/ticket-master-14.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-master-15.summary b/pengine/test10/ticket-master-15.summary index 9f7b89d239..9a42d7882a 100644 --- a/pengine/test10/ticket-master-15.summary +++ b/pengine/test10/ticket-master-15.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-master-3.summary b/pengine/test10/ticket-master-3.summary index 9f7b89d239..9a42d7882a 100644 --- a/pengine/test10/ticket-master-3.summary +++ b/pengine/test10/ticket-master-3.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Demote rsc1:0 (Master -> Stopped node1) - * Stop rsc1:1 (node2) + * Stop rsc1:1 (node2) due to node availability Executing cluster transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 * Pseudo action: all_stopped Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 Master/Slave Set: ms1 [rsc1] Stopped: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-10.summary b/pengine/test10/ticket-rsc-sets-10.summary index 66f807a48c..8ef03e0fb5 100644 --- a/pengine/test10/ticket-rsc-sets-10.summary +++ b/pengine/test10/ticket-rsc-sets-10.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Started: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) - * Stop rsc4:0 (node1) - * Stop rsc4:1 (node2) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability + * Stop rsc4:0 (node1) due to node availability + * Stop rsc4:1 (node2) due to node availability * Demote rsc5:0 (Master -> Slave node1) Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-12.summary b/pengine/test10/ticket-rsc-sets-12.summary index 6801c64c2e..616a2a242e 100644 --- a/pengine/test10/ticket-rsc-sets-12.summary +++ b/pengine/test10/ticket-rsc-sets-12.summary @@ -1,40 +1,40 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-13.summary b/pengine/test10/ticket-rsc-sets-13.summary index 66f807a48c..8ef03e0fb5 100644 --- a/pengine/test10/ticket-rsc-sets-13.summary +++ b/pengine/test10/ticket-rsc-sets-13.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Started: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) - * Stop rsc4:0 (node1) - * Stop rsc4:1 (node2) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability + * Stop rsc4:0 (node1) due to node availability + * Stop rsc4:1 (node2) due to node availability * Demote rsc5:0 (Master -> Slave node1) Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-14.summary b/pengine/test10/ticket-rsc-sets-14.summary index 66f807a48c..8ef03e0fb5 100644 --- a/pengine/test10/ticket-rsc-sets-14.summary +++ b/pengine/test10/ticket-rsc-sets-14.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Started: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) - * Stop rsc4:0 (node1) - * Stop rsc4:1 (node2) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability + * Stop rsc4:0 (node1) due to node availability + * Stop rsc4:1 (node2) due to node availability * Demote rsc5:0 (Master -> Slave node1) Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-3.summary b/pengine/test10/ticket-rsc-sets-3.summary index 66f807a48c..8ef03e0fb5 100644 --- a/pengine/test10/ticket-rsc-sets-3.summary +++ b/pengine/test10/ticket-rsc-sets-3.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Started: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) - * Stop rsc4:0 (node1) - * Stop rsc4:1 (node2) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability + * Stop rsc4:0 (node1) due to node availability + * Stop rsc4:1 (node2) due to node availability * Demote rsc5:0 (Master -> Slave node1) Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-7.summary b/pengine/test10/ticket-rsc-sets-7.summary index 66f807a48c..8ef03e0fb5 100644 --- a/pengine/test10/ticket-rsc-sets-7.summary +++ b/pengine/test10/ticket-rsc-sets-7.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Started: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) - * Stop rsc4:0 (node1) - * Stop rsc4:1 (node2) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability + * Stop rsc4:0 (node1) due to node availability + * Stop rsc4:1 (node2) due to node availability * Demote rsc5:0 (Master -> Slave node1) Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/ticket-rsc-sets-9.summary b/pengine/test10/ticket-rsc-sets-9.summary index 66f807a48c..8ef03e0fb5 100644 --- a/pengine/test10/ticket-rsc-sets-9.summary +++ b/pengine/test10/ticket-rsc-sets-9.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone4 [rsc4] Started: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Masters: [ node1 ] Slaves: [ node2 ] Transition Summary: * Stop rsc1 (node2) - * Stop rsc2 (node1) - * Stop rsc3 (node1) - * Stop rsc4:0 (node1) - * Stop rsc4:1 (node2) + * Stop rsc2 (node1) due to node availability + * Stop rsc3 (node1) due to node availability + * Stop rsc4:0 (node1) due to node availability + * Stop rsc4:1 (node2) due to node availability * Demote rsc5:0 (Master -> Slave node1) Executing cluster transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: all_stopped * Pseudo action: group2_stopped_0 Revised cluster status: Online: [ node1 node2 ] rsc_stonith (stonith:null): Started node1 rsc1 (ocf::pacemaker:Dummy): Stopped Resource Group: group2 rsc2 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Stopped Clone Set: clone4 [rsc4] Stopped: [ node1 node2 ] Master/Slave Set: ms5 [rsc5] Slaves: [ node1 node2 ] diff --git a/pengine/test10/unfence-definition.summary b/pengine/test10/unfence-definition.summary index d77622876f..f728e39f60 100644 --- a/pengine/test10/unfence-definition.summary +++ b/pengine/test10/unfence-definition.summary @@ -1,65 +1,65 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: * Fence (reboot) virt-4 'node is unclean' * Fence (on) virt-3 'Required by dlm:2' * Fence (on) virt-1 'Device definition changed' * Restart fencing (Started virt-1) - * Restart dlm:0 (Started virt-1) + * Restart dlm:0 (Started virt-1) due to required stonith * Start dlm:2 (virt-3) - * Restart clvmd:0 (Started virt-1) + * Restart clvmd:0 (Started virt-1) due to required stonith * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: fencing monitor on virt-3 * Resource action: fencing stop on virt-1 * Resource action: clvmd monitor on virt-2 * Pseudo action: clvmd-clone_stop_0 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Resource action: fencing delete on virt-1 * Resource action: dlm monitor on virt-3 * Resource action: clvmd stop on virt-1 * Resource action: clvmd monitor on virt-3 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-1 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: dlm-clone_start_0 * Fencing virt-1 (on) * Pseudo action: all_stopped * Resource action: fencing start on virt-1 * Resource action: dlm start on virt-1 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-1 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/unfence-parameters.summary b/pengine/test10/unfence-parameters.summary index e71eafcf32..e8b15ecb58 100644 --- a/pengine/test10/unfence-parameters.summary +++ b/pengine/test10/unfence-parameters.summary @@ -1,69 +1,69 @@ Current cluster status: Node virt-4 (4): UNCLEAN (offline) Online: [ virt-1 virt-2 virt-3 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 ] Stopped: [ virt-3 virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 ] Stopped: [ virt-2 virt-3 virt-4 ] Transition Summary: * Fence (reboot) virt-4 'node is unclean' * Fence (on) virt-3 'Device parameters changed (reload)' * Fence (on) virt-2 'Device parameters changed (reload)' * Fence (on) virt-1 'Device parameters changed (reload)' * Restart fencing (Started virt-1) - * Restart dlm:0 (Started virt-1) - * Restart dlm:1 (Started virt-2) + * Restart dlm:0 (Started virt-1) due to required stonith + * Restart dlm:1 (Started virt-2) due to required stonith * Start dlm:2 (virt-3) - * Restart clvmd:0 (Started virt-1) + * Restart clvmd:0 (Started virt-1) due to required stonith * Start clvmd:1 (virt-2) * Start clvmd:2 (virt-3) Executing cluster transition: * Resource action: fencing monitor on virt-3 * Pseudo action: clvmd-clone_stop_0 * Fencing virt-4 (reboot) * Pseudo action: stonith_complete * Fencing virt-3 (on) * Resource action: fencing stop on virt-1 * Resource action: dlm monitor on virt-3 * Resource action: clvmd stop on virt-1 * Resource action: clvmd monitor on virt-3 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-2 * Fencing virt-2 (on) * Resource action: dlm stop on virt-1 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: dlm-clone_start_0 * Resource action: clvmd monitor on virt-2 * Fencing virt-1 (on) * Pseudo action: all_stopped * Resource action: fencing start on virt-1 * Resource action: dlm start on virt-1 * Resource action: dlm start on virt-2 * Resource action: dlm start on virt-3 * Pseudo action: dlm-clone_running_0 * Pseudo action: clvmd-clone_start_0 * Resource action: clvmd start on virt-1 * Resource action: clvmd start on virt-2 * Resource action: clvmd start on virt-3 * Pseudo action: clvmd-clone_running_0 Revised cluster status: Online: [ virt-1 virt-2 virt-3 ] OFFLINE: [ virt-4 ] fencing (stonith:fence_scsi): Started virt-1 Clone Set: dlm-clone [dlm] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] Clone Set: clvmd-clone [clvmd] Started: [ virt-1 virt-2 virt-3 ] Stopped: [ virt-4 ] diff --git a/pengine/test10/unmanaged-block-restart.summary b/pengine/test10/unmanaged-block-restart.summary index 0b70481845..01dffeb1d0 100644 --- a/pengine/test10/unmanaged-block-restart.summary +++ b/pengine/test10/unmanaged-block-restart.summary @@ -1,30 +1,30 @@ Current cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Stopped rsc2 (ocf::pacemaker:Dummy): Started yingying.site rsc3 (ocf::pacemaker:Dummy): Started yingying.site rsc4 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) Transition Summary: * Start rsc1 (yingying.site) - * Stop rsc2 (Started yingying.site - blocked) - * Stop rsc3 (Started yingying.site - blocked) + * Stop rsc2 (Started yingying.site - blocked) due to required rsc1 start + * Stop rsc3 (Started yingying.site - blocked) due to required rsc2 start Executing cluster transition: * Pseudo action: group1_stop_0 * Pseudo action: group1_start_0 * Resource action: rsc1 start on yingying.site * Resource action: rsc1 monitor=10000 on yingying.site Revised cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site rsc2 (ocf::pacemaker:Dummy): Started yingying.site rsc3 (ocf::pacemaker:Dummy): Started yingying.site rsc4 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) diff --git a/pengine/test10/unmanaged-stop-1.summary b/pengine/test10/unmanaged-stop-1.summary index 978ddc1681..94e0908467 100644 --- a/pengine/test10/unmanaged-stop-1.summary +++ b/pengine/test10/unmanaged-stop-1.summary @@ -1,19 +1,19 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) + * Stop rsc1 (yingying.site - blocked) due to node availability Executing cluster transition: Revised cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) diff --git a/pengine/test10/unmanaged-stop-2.summary b/pengine/test10/unmanaged-stop-2.summary index 978ddc1681..94e0908467 100644 --- a/pengine/test10/unmanaged-stop-2.summary +++ b/pengine/test10/unmanaged-stop-2.summary @@ -1,19 +1,19 @@ 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) + * Stop rsc1 (yingying.site - blocked) due to node availability Executing cluster transition: Revised cluster status: Online: [ yingying.site ] rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( blocked ) diff --git a/pengine/test10/unmanaged-stop-3.summary b/pengine/test10/unmanaged-stop-3.summary index 74829a1746..c72d4514d3 100644 --- a/pengine/test10/unmanaged-stop-3.summary +++ b/pengine/test10/unmanaged-stop-3.summary @@ -1,22 +1,22 @@ 4 of 2 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) + * Stop rsc1 (yingying.site - blocked) unrunnable rsc2 stop due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 Revised cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) diff --git a/pengine/test10/unmanaged-stop-4.summary b/pengine/test10/unmanaged-stop-4.summary index 457e41f34a..2704e98c89 100644 --- a/pengine/test10/unmanaged-stop-4.summary +++ b/pengine/test10/unmanaged-stop-4.summary @@ -1,24 +1,24 @@ 6 of 3 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) rsc3 (ocf::heartbeat:Dummy): Stopped ( disabled ) Transition Summary: - * Stop rsc1 (yingying.site - blocked) + * Stop rsc1 (yingying.site - blocked) unrunnable rsc2 stop due to node availability Executing cluster transition: * Pseudo action: group1_stop_0 Revised cluster status: Online: [ yingying.site ] Resource Group: group1 rsc1 (ocf::pacemaker:Dummy): Started yingying.site ( disabled ) rsc2 (ocf::pacemaker:Dummy): FAILED yingying.site ( disabled, blocked ) rsc3 (ocf::heartbeat:Dummy): Stopped ( disabled ) diff --git a/pengine/test10/unrunnable-1.summary b/pengine/test10/unrunnable-1.summary index 92f40310de..8c27e45f10 100644 --- a/pengine/test10/unrunnable-1.summary +++ b/pengine/test10/unrunnable-1.summary @@ -1,65 +1,65 @@ Current cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.182 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n03 child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped Transition Summary: - * Start DcIPaddr (c001n03 - blocked) - * Start child_192.168.100.181 (c001n03 - blocked) - * Start child_192.168.100.182 (c001n03 - blocked) - * Start child_192.168.100.183 (c001n03 - blocked) - * Start rsc_c001n08 (c001n03 - blocked) - * Start rsc_c001n02 (c001n03 - blocked) - * Start rsc_c001n03 (c001n03 - blocked) - * Start rsc_c001n01 (c001n03 - blocked) - * Stop child_DoFencing:1 (c001n02 - blocked) + * Start DcIPaddr (c001n03 - blocked) due to no quorum + * Start child_192.168.100.181 (c001n03 - blocked) due to no quorum + * Start child_192.168.100.182 (c001n03 - blocked) due to no quorum + * Start child_192.168.100.183 (c001n03 - blocked) due to no quorum + * Start rsc_c001n08 (c001n03 - blocked) due to no quorum + * Start rsc_c001n02 (c001n03 - blocked) due to no quorum + * Start rsc_c001n03 (c001n03 - blocked) due to no quorum + * Start rsc_c001n01 (c001n03 - blocked) due to no quorum + * Stop child_DoFencing:1 (c001n02 - blocked) due to node availability Executing cluster transition: * Resource action: DcIPaddr monitor on c001n03 * Resource action: child_192.168.100.181 monitor on c001n03 * Resource action: child_192.168.100.182 monitor on c001n03 * Resource action: child_192.168.100.183 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n03 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Pseudo action: DoFencing_stop_0 * Pseudo action: DoFencing_stopped_0 Revised cluster status: Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline) Online: [ c001n03 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 child_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.182 (ocf::heartbeat:IPaddr): Stopped child_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started c001n03 child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN) child_DoFencing:2 (stonith:ssh): Stopped child_DoFencing:3 (stonith:ssh): Stopped diff --git a/pengine/test10/unrunnable-2.summary b/pengine/test10/unrunnable-2.summary index 0c0c765451..b67f7cb421 100644 --- a/pengine/test10/unrunnable-2.summary +++ b/pengine/test10/unrunnable-2.summary @@ -1,175 +1,175 @@ 6 of 117 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] ip-192.0.2.12 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 Clone Set: haproxy-clone [haproxy] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: galera-master [galera] Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: memcached-clone [memcached] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: rabbitmq-clone [rabbitmq] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-core-clone [openstack-core] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: redis-master [redis] Masters: [ overcloud-controller-1 ] Slaves: [ overcloud-controller-0 overcloud-controller-2 ] ip-192.0.2.11 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 Clone Set: mongod-clone [mongod] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped Clone Set: openstack-heat-engine-clone [openstack-heat-engine] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-clone [openstack-heat-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-api-clone [openstack-glance-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-api-clone [openstack-nova-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-api-clone [openstack-sahara-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-registry-clone [openstack-glance-registry] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-api-clone [openstack-cinder-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: delay-clone [delay] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-server-clone [neutron-server] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: httpd-clone [httpd] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Transition Summary: - * Start openstack-cinder-volume (overcloud-controller-2 - blocked) + * Start openstack-cinder-volume (overcloud-controller-2 - blocked) due to unrunnable openstack-cinder-scheduler-clone running Executing cluster transition: Revised cluster status: Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] ip-192.0.2.12 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 Clone Set: haproxy-clone [haproxy] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: galera-master [galera] Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: memcached-clone [memcached] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: rabbitmq-clone [rabbitmq] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-core-clone [openstack-core] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Master/Slave Set: redis-master [redis] Masters: [ overcloud-controller-1 ] Slaves: [ overcloud-controller-0 overcloud-controller-2 ] ip-192.0.2.11 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 Clone Set: mongod-clone [mongod] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-l3-agent-clone [neutron-l3-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped Clone Set: openstack-heat-engine-clone [openstack-heat-engine] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-clone [openstack-heat-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-api-clone [openstack-glance-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-api-clone [openstack-nova-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-api-clone [openstack-sahara-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-glance-registry-clone [openstack-glance-registry] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification] Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-cinder-api-clone [openstack-cinder-api] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: delay-clone [delay] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: neutron-server-clone [neutron-server] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: httpd-clone [httpd] Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor] Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] diff --git a/pengine/test10/utilization-order2.summary b/pengine/test10/utilization-order2.summary index 7871579871..db65a164f6 100644 --- a/pengine/test10/utilization-order2.summary +++ b/pengine/test10/utilization-order2.summary @@ -1,38 +1,38 @@ Current cluster status: Online: [ node1 node2 ] rsc4 (ocf::pacemaker:Dummy): Stopped rsc3 (ocf::pacemaker:Dummy): Started node1 Clone Set: clone-rsc2 [rsc2] Started: [ node1 node2 ] rsc1 (ocf::pacemaker:Dummy): Started node2 Transition Summary: * Start rsc4 (node1) * Move rsc3 (Started node1 -> node2) - * Stop rsc2:0 (node1) + * Stop rsc2:0 (node1) due to node availability * Stop rsc1 (node2) Executing cluster transition: * Resource action: rsc3 stop on node1 * Pseudo action: clone-rsc2_stop_0 * Resource action: rsc1 stop on node2 * Pseudo action: load_stopped_node2 * Resource action: rsc3 start on node2 * Resource action: rsc2:1 stop on node1 * Pseudo action: clone-rsc2_stopped_0 * Pseudo action: load_stopped_node1 * Pseudo action: all_stopped * Resource action: rsc4 start on node1 Revised cluster status: Online: [ node1 node2 ] rsc4 (ocf::pacemaker:Dummy): Started node1 rsc3 (ocf::pacemaker:Dummy): Started node2 Clone Set: clone-rsc2 [rsc2] Started: [ node2 ] Stopped: [ node1 ] rsc1 (ocf::pacemaker:Dummy): Stopped diff --git a/pengine/test10/utilization-order4.summary b/pengine/test10/utilization-order4.summary index efb59582c8..09a8c31fe8 100644 --- a/pengine/test10/utilization-order4.summary +++ b/pengine/test10/utilization-order4.summary @@ -1,61 +1,61 @@ 2 of 13 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen002 degllx63-vm (ocf::heartbeat:Xen): Stopped ( disabled ) degllx61-vm (ocf::heartbeat:Xen): Started deglxen001 degllx64-vm (ocf::heartbeat:Xen): Stopped ( disabled ) stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 deglxen002 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 deglxen002 ] Transition Summary: * Migrate degllx62-vm (Started deglxen002 -> deglxen001) * Stop degllx61-vm (deglxen001) - * Stop nfs-xen_config:1 (deglxen002) - * Stop nfs-xen_swapfiles:1 (deglxen002) - * Stop nfs-xen_images:1 (deglxen002) - * Stop prim-ping:1 (deglxen002) + * Stop nfs-xen_config:1 (deglxen002) due to node availability + * Stop nfs-xen_swapfiles:1 (deglxen002) due to node availability + * Stop nfs-xen_images:1 (deglxen002) due to node availability + * Stop prim-ping:1 (deglxen002) due to node availability Executing cluster transition: * Resource action: degllx61-vm stop on deglxen001 * Pseudo action: load_stopped_deglxen001 * Resource action: degllx62-vm migrate_to on deglxen002 * Resource action: degllx62-vm migrate_from on deglxen001 * Resource action: degllx62-vm stop on deglxen002 * Pseudo action: clone-nfs_stop_0 * Pseudo action: load_stopped_deglxen002 * Pseudo action: degllx62-vm_start_0 * Pseudo action: grp-nfs:1_stop_0 * Resource action: nfs-xen_images:1 stop on deglxen002 * Resource action: degllx62-vm monitor=30000 on deglxen001 * Resource action: nfs-xen_swapfiles:1 stop on deglxen002 * Resource action: nfs-xen_config:1 stop on deglxen002 * Pseudo action: grp-nfs:1_stopped_0 * Pseudo action: clone-nfs_stopped_0 * Pseudo action: clone-ping_stop_0 * Resource action: prim-ping:0 stop on deglxen002 * Pseudo action: clone-ping_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen001 degllx63-vm (ocf::heartbeat:Xen): Stopped ( disabled ) degllx61-vm (ocf::heartbeat:Xen): Stopped deglxen002 degllx64-vm (ocf::heartbeat:Xen): Stopped ( disabled ) stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 ] Stopped: [ deglxen002 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 ] Stopped: [ deglxen002 ] diff --git a/pengine/test10/whitebox-fail1.summary b/pengine/test10/whitebox-fail1.summary index 14b1ee20fd..1d065520e7 100644 --- a/pengine/test10/whitebox-fail1.summary +++ b/pengine/test10/whitebox-fail1.summary @@ -1,56 +1,56 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): FAILED lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' * Recover container1 (Started 18node2) * Recover M:4 (Started lxc1) * Recover B (Started lxc1) - * Restart lxc1 (Started 18node2) + * Restart lxc1 (Started 18node2) due to required container1 start Executing cluster transition: * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Pseudo action: stonith_complete * Resource action: container1 start on 18node2 * Pseudo action: M-clone_stop_0 * Pseudo action: B_stop_0 * Resource action: lxc1 start on 18node2 * Resource action: lxc1 monitor=30000 on 18node2 * Pseudo action: M_stop_0 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: B start on lxc1 * Pseudo action: all_stopped * Resource action: M start on lxc1 * Pseudo action: M-clone_running_0 * Resource action: B monitor=10000 on lxc1 * Resource action: M monitor=10000 on lxc1 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-imply-stop-on-fence.summary b/pengine/test10/whitebox-imply-stop-on-fence.summary index 9dab410f6c..bea75a20e5 100644 --- a/pengine/test10/whitebox-imply-stop-on-fence.summary +++ b/pengine/test10/whitebox-imply-stop-on-fence.summary @@ -1,96 +1,96 @@ Current cluster status: Node kiff-01 (1): UNCLEAN (offline) Online: [ kiff-02 ] Containers: [ lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-02:R-lxc-02_kiff-02 ] fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN) Clone Set: dlm-clone [dlm] dlm (ocf::pacemaker:controld): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: clvmd-clone [clvmd] clvmd (ocf::heartbeat:clvm): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: shared0-clone [shared0] shared0 (ocf::heartbeat:Filesystem): Started kiff-01 (UNCLEAN) Started: [ kiff-02 ] Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN) R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN) R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 vm-fs (ocf::heartbeat:Filesystem): FAILED lxc-01_kiff-01 Transition Summary: * Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean' * Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean' * Fence (reboot) kiff-01 'peer is no longer part of the cluster' * Move fence-kiff-02 (Started kiff-01 -> kiff-02) - * Stop dlm:0 (kiff-01) - * Stop clvmd:0 (kiff-01) - * Stop shared0:0 (kiff-01) + * Stop dlm:0 (kiff-01) due to node availability + * Stop clvmd:0 (kiff-01) due to node availability + * Stop shared0:0 (kiff-01) due to node availability * Recover R-lxc-01_kiff-01 (Started kiff-01 -> kiff-02) * Move R-lxc-02_kiff-01 (Started kiff-01 -> kiff-02) * Recover vm-fs (Started lxc-01_kiff-01) * Move lxc-01_kiff-01 (Started kiff-01 -> kiff-02) * Move lxc-02_kiff-01 (Started kiff-01 -> kiff-02) Executing cluster transition: * Pseudo action: fence-kiff-02_stop_0 * Fencing kiff-01 (reboot) * Pseudo action: lxc-01_kiff-01_stop_0 * Pseudo action: lxc-02_kiff-01_stop_0 * Pseudo action: R-lxc-01_kiff-01_stop_0 * Pseudo action: R-lxc-02_kiff-01_stop_0 * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01 * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01 * Pseudo action: stonith_complete * Pseudo action: shared0-clone_stop_0 * Resource action: R-lxc-01_kiff-01 start on kiff-02 * Resource action: R-lxc-02_kiff-01 start on kiff-02 * Pseudo action: vm-fs_stop_0 * Resource action: lxc-01_kiff-01 start on kiff-02 * Resource action: lxc-02_kiff-01 start on kiff-02 * Pseudo action: shared0_stop_0 * Pseudo action: shared0-clone_stopped_0 * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02 * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02 * Resource action: vm-fs start on lxc-01_kiff-01 * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 * Pseudo action: clvmd-clone_stop_0 * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 * Pseudo action: clvmd_stop_0 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Pseudo action: dlm_stop_0 * Pseudo action: dlm-clone_stopped_0 * Pseudo action: all_stopped * Resource action: fence-kiff-02 start on kiff-02 * Resource action: fence-kiff-02 monitor=60000 on kiff-02 Revised cluster status: Online: [ kiff-02 ] OFFLINE: [ kiff-01 ] Containers: [ lxc-01_kiff-01:R-lxc-01_kiff-01 lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-01:R-lxc-02_kiff-01 lxc-02_kiff-02:R-lxc-02_kiff-02 ] fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02 Clone Set: dlm-clone [dlm] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: clvmd-clone [clvmd] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] Clone Set: shared0-clone [shared0] Started: [ kiff-02 ] Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02 vm-fs (ocf::heartbeat:Filesystem): Started lxc-01_kiff-01 diff --git a/pengine/test10/whitebox-move.summary b/pengine/test10/whitebox-move.summary index 3422ac7890..dbf0780532 100644 --- a/pengine/test10/whitebox-move.summary +++ b/pengine/test10/whitebox-move.summary @@ -1,47 +1,47 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node1 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started lxc1 Transition Summary: * Move container1 (Started 18node1 -> 18node2) - * Restart M:3 (Started lxc1) - * Restart A (Started lxc1) + * Restart M:3 (Started lxc1) due to required container1 start + * Restart A (Started lxc1) due to required container1 start * Move lxc1 (Started 18node1 -> 18node2) Executing cluster transition: * Pseudo action: M-clone_stop_0 * Resource action: A stop on lxc1 * Resource action: M stop on lxc1 * Pseudo action: M-clone_stopped_0 * Pseudo action: M-clone_start_0 * Resource action: lxc1 stop on 18node1 * Resource action: container1 stop on 18node1 * Pseudo action: all_stopped * Resource action: container1 start on 18node2 * Resource action: lxc1 start on 18node2 * Resource action: M start on lxc1 * Resource action: M monitor=10000 on lxc1 * Pseudo action: M-clone_running_0 * Resource action: A start on lxc1 * Resource action: A monitor=10000 on lxc1 * Resource action: lxc1 monitor=30000 on 18node2 Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started lxc1 diff --git a/pengine/test10/whitebox-ms-ordering-move.summary b/pengine/test10/whitebox-ms-ordering-move.summary index d5f133b62e..b48d988759 100644 --- a/pengine/test10/whitebox-ms-ordering-move.summary +++ b/pengine/test10/whitebox-ms-ordering-move.summary @@ -1,94 +1,94 @@ Current cluster status: Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started rhel7-3 FencingPass (stonith:fence_dummy): Started rhel7-4 FencingFail (stonith:fence_dummy): Started rhel7-5 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 rsc_rhel7-2 (ocf::heartbeat:IPaddr2): Started rhel7-2 rsc_rhel7-3 (ocf::heartbeat:IPaddr2): Started rhel7-3 rsc_rhel7-4 (ocf::heartbeat:IPaddr2): Started rhel7-4 rsc_rhel7-5 (ocf::heartbeat:IPaddr2): Started rhel7-5 migrator (ocf::pacemaker:Dummy): Started rhel7-4 Clone Set: Connectivity [ping-1] Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Stopped: [ lxc1 lxc2 ] Master/Slave Set: master-1 [stateful-1] Masters: [ rhel7-3 ] Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] Resource Group: group-1 r192.168.122.207 (ocf::heartbeat:IPaddr2): Started rhel7-3 petulant (service:DummySD): Started rhel7-3 r192.168.122.208 (ocf::heartbeat:IPaddr2): Started rhel7-3 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 container1 (ocf::heartbeat:VirtualDomain): Started rhel7-1 container2 (ocf::heartbeat:VirtualDomain): Started rhel7-1 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] Transition Summary: * Move container1 (Started rhel7-1 -> rhel7-2) - * Restart lxc-ms:0 (Master lxc1) + * Restart lxc-ms:0 (Master lxc1) due to required container1 start * Move lxc1 (Started rhel7-1 -> rhel7-2) Executing cluster transition: * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on rhel7-5 * Resource action: lxc1 monitor on rhel7-4 * Resource action: lxc1 monitor on rhel7-3 * Resource action: lxc1 monitor on rhel7-2 * Resource action: lxc2 monitor on rhel7-5 * Resource action: lxc2 monitor on rhel7-4 * Resource action: lxc2 monitor on rhel7-3 * Resource action: lxc2 monitor on rhel7-2 * Resource action: lxc-ms demote on lxc1 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc-ms stop on lxc1 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 stop on rhel7-1 * Resource action: container1 stop on rhel7-1 * Pseudo action: all_stopped * Resource action: container1 start on rhel7-2 * Resource action: lxc1 start on rhel7-2 * Resource action: lxc-ms start on lxc1 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc1 monitor=30000 on rhel7-2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised cluster status: Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started rhel7-3 FencingPass (stonith:fence_dummy): Started rhel7-4 FencingFail (stonith:fence_dummy): Started rhel7-5 rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 rsc_rhel7-2 (ocf::heartbeat:IPaddr2): Started rhel7-2 rsc_rhel7-3 (ocf::heartbeat:IPaddr2): Started rhel7-3 rsc_rhel7-4 (ocf::heartbeat:IPaddr2): Started rhel7-4 rsc_rhel7-5 (ocf::heartbeat:IPaddr2): Started rhel7-5 migrator (ocf::pacemaker:Dummy): Started rhel7-4 Clone Set: Connectivity [ping-1] Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Stopped: [ lxc1 lxc2 ] Master/Slave Set: master-1 [stateful-1] Masters: [ rhel7-3 ] Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] Resource Group: group-1 r192.168.122.207 (ocf::heartbeat:IPaddr2): Started rhel7-3 petulant (service:DummySD): Started rhel7-3 r192.168.122.208 (ocf::heartbeat:IPaddr2): Started rhel7-3 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 container1 (ocf::heartbeat:VirtualDomain): Started rhel7-2 container2 (ocf::heartbeat:VirtualDomain): Started rhel7-1 Master/Slave Set: lxc-ms-master [lxc-ms] Masters: [ lxc1 ] Slaves: [ lxc2 ] diff --git a/pengine/test10/whitebox-orphan-ms.summary b/pengine/test10/whitebox-orphan-ms.summary index 30c6a3b65c..3efa6bd245 100644 --- a/pengine/test10/whitebox-orphan-ms.summary +++ b/pengine/test10/whitebox-orphan-ms.summary @@ -1,85 +1,85 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] Fencing (stonith:fence_xvm): Started 18node2 FencingPass (stonith:fence_dummy): Started 18node3 FencingFail (stonith:fence_dummy): Started 18node3 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started 18node2 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node2 18node3 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node2 18node3 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 container2 (ocf::heartbeat:VirtualDomain): ORPHANED Started 18node1 lxc1 (ocf::pacemaker:remote): ORPHANED Started 18node1 lxc-ms (ocf::pacemaker:Stateful): ORPHANED Master [ lxc1 lxc2 ] lxc2 (ocf::pacemaker:remote): ORPHANED Started 18node1 container1 (ocf::heartbeat:VirtualDomain): ORPHANED Started 18node1 Transition Summary: * Move FencingFail (Started 18node3 -> 18node1) - * Stop container2 (18node1) - * Stop lxc1 (18node1) + * Stop container2 (18node1) due to node availability + * Stop lxc1 (18node1) due to node availability * Demote lxc-ms (Master -> Stopped lxc1) - * Stop lxc2 (18node1) - * Stop container1 (18node1) + * Stop lxc2 (18node1) due to node availability + * Stop container1 (18node1) due to node availability Executing cluster transition: * Resource action: FencingFail stop on 18node3 * Resource action: lxc-ms demote on lxc2 * Resource action: lxc-ms demote on lxc1 * Resource action: FencingFail start on 18node1 * Resource action: lxc-ms stop on lxc2 * Resource action: lxc-ms stop on lxc1 * Resource action: lxc-ms delete on 18node3 * Resource action: lxc-ms delete on 18node2 * Resource action: lxc-ms delete on 18node1 * Resource action: lxc2 stop on 18node1 * Resource action: lxc2 delete on 18node3 * Resource action: lxc2 delete on 18node2 * Resource action: lxc2 delete on 18node1 * Resource action: container2 stop on 18node1 * Resource action: container2 delete on 18node3 * Resource action: container2 delete on 18node2 * Resource action: container2 delete on 18node1 * Resource action: lxc1 stop on 18node1 * Resource action: lxc1 delete on 18node3 * Resource action: lxc1 delete on 18node2 * Resource action: lxc1 delete on 18node1 * Resource action: container1 stop on 18node1 * Resource action: container1 delete on 18node3 * Resource action: container1 delete on 18node2 * Resource action: container1 delete on 18node1 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] Fencing (stonith:fence_xvm): Started 18node2 FencingPass (stonith:fence_dummy): Started 18node3 FencingFail (stonith:fence_dummy): Started 18node1 rsc_18node1 (ocf::heartbeat:IPaddr2): Started 18node1 rsc_18node2 (ocf::heartbeat:IPaddr2): Started 18node2 rsc_18node3 (ocf::heartbeat:IPaddr2): Started 18node3 migrator (ocf::pacemaker:Dummy): Started 18node1 Clone Set: Connectivity [ping-1] Started: [ 18node1 18node2 18node3 ] Master/Slave Set: master-1 [stateful-1] Masters: [ 18node1 ] Slaves: [ 18node2 18node3 ] Resource Group: group-1 r192.168.122.87 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.88 (ocf::heartbeat:IPaddr2): Started 18node1 r192.168.122.89 (ocf::heartbeat:IPaddr2): Started 18node1 lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 diff --git a/pengine/test10/whitebox-orphaned.summary b/pengine/test10/whitebox-orphaned.summary index 7be845326a..52b54aa5a3 100644 --- a/pengine/test10/whitebox-orphaned.summary +++ b/pengine/test10/whitebox-orphaned.summary @@ -1,55 +1,55 @@ Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] M (ocf::pacemaker:Dummy): ORPHANED Started lxc1 Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 container1 (ocf::heartbeat:VirtualDomain): ORPHANED Started 18node2 lxc1 (ocf::pacemaker:remote): ORPHANED Started 18node2 Transition Summary: - * Stop M:4 (lxc1) + * Stop M:4 (lxc1) due to node availability * Move B (Started lxc1 -> lxc2) - * Stop container1 (18node2) - * Stop lxc1 (18node2) + * Stop container1 (18node2) due to node availability + * Stop lxc1 (18node2) due to node availability Executing cluster transition: * Pseudo action: M-clone_stop_0 * Resource action: B stop on lxc1 * Cluster action: clear_failcount for container1 on 18node2 * Cluster action: clear_failcount for lxc1 on 18node2 * Resource action: M stop on lxc1 * Pseudo action: M-clone_stopped_0 * Resource action: B start on lxc2 * Resource action: lxc1 stop on 18node2 * Resource action: lxc1 delete on 18node3 * Resource action: lxc1 delete on 18node2 * Resource action: lxc1 delete on 18node1 * Resource action: B monitor=10000 on lxc2 * Resource action: container1 stop on 18node2 * Resource action: container1 delete on 18node3 * Resource action: container1 delete on 18node2 * Resource action: container1 delete on 18node1 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc2 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 diff --git a/pengine/test10/whitebox-stop.summary b/pengine/test10/whitebox-stop.summary index b6a2954ffe..89094dacae 100644 --- a/pengine/test10/whitebox-stop.summary +++ b/pengine/test10/whitebox-stop.summary @@ -1,48 +1,48 @@ 1 of 14 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc1:container1 lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Started 18node2 ( disabled ) container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc1 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1 Transition Summary: - * Stop container1 (18node2) - * Stop M:4 (lxc1) + * Stop container1 (18node2) due to node availability + * Stop M:4 (lxc1) due to node availability * Move B (Started lxc1 -> lxc2) - * Stop lxc1 (18node2) + * Stop lxc1 (18node2) due to node availability Executing cluster transition: * Pseudo action: M-clone_stop_0 * Resource action: B stop on lxc1 * Resource action: M stop on lxc1 * Pseudo action: M-clone_stopped_0 * Resource action: B start on lxc2 * Resource action: lxc1 stop on 18node2 * Resource action: container1 stop on 18node2 * Resource action: B monitor=10000 on lxc2 * Pseudo action: all_stopped Revised cluster status: Online: [ 18node1 18node2 18node3 ] Containers: [ lxc2:container2 ] container1 (ocf::heartbeat:VirtualDomain): Stopped ( disabled ) container2 (ocf::heartbeat:VirtualDomain): Started 18node2 shoot1 (stonith:fence_xvm): Started 18node3 Clone Set: M-clone [M] Started: [ 18node1 18node2 18node3 lxc2 ] Stopped: [ lxc1 ] A (ocf::pacemaker:Dummy): Started 18node1 B (ocf::pacemaker:Dummy): Started lxc2 C (ocf::pacemaker:Dummy): Started lxc2 D (ocf::pacemaker:Dummy): Started 18node1