Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 8caf522ce9..377100ca37 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,3492 +1,3498 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <glib.h>
#include <crm/crm.h>
#include <crm/services.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <unpack.h>
CRM_TRACE_INIT_DATA(pe_status);
#define set_config_flag(data_set, option, flag) do { \
const char *tmp = pe_pref(data_set->config_hash, option); \
if(tmp) { \
if(crm_is_true(tmp)) { \
set_bit(data_set->flags, flag); \
} else { \
clear_bit(data_set->flags, flag); \
} \
} \
} while(0)
gboolean unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, xmlNode ** last_failure,
enum action_fail_response *failed, pe_working_set_t * data_set);
static gboolean determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node);
static gboolean
is_dangling_container_remote_node(node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
* from both the config and the status section. */
if (is_remote_node(node) &&
node->details->remote_rsc &&
node->details->remote_rsc->container == NULL &&
is_set(node->details->remote_rsc->flags, pe_rsc_orphan_container_filler)) {
return TRUE;
}
return FALSE;
}
+
void
pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (is_container_remote_node(node)) {
resource_t *rsc = node->details->remote_rsc->container;
if (is_set(rsc->flags, pe_rsc_failed) == FALSE) {
if (!is_set(rsc->flags, pe_rsc_managed)) {
- crm_notice("Not fencing guest node %s because the container is "
- "unmanaged, otherwise we would do so recovering %s "
- "%s", node->details->uname, rsc->id, reason);
+ crm_notice("Not fencing node %s due to '%s': container %s is"
+ " unmanaged"
+ "%s", node->details->uname, reason, rsc->id);
} else {
- crm_warn("Guest node %s will be fenced (by recovering %s) %s",
- node->details->uname, rsc->id, reason);
+ crm_warn("Remote node %s will be fenced due to '%s' by recovering %s",
+ node->details->uname, rsc->id, reason);
/* We don't mark the node as unclean because that would prevent the
* node from running resources. We want to allow it to run resources
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
set_bit(rsc->flags, pe_rsc_failed);
}
}
+
} else if (is_dangling_container_remote_node(node)) {
- crm_info("Cleaning up dangling connection resource for guest node %s %s"
+ crm_info("Cleaning up dangling connection resource for guest node %s due to '%s'"
" (fencing is already done, guest resource no longer exists)",
node->details->uname, reason);
set_bit(node->details->remote_rsc->flags, pe_rsc_failed);
} else if (is_baremetal_remote_node(node)) {
resource_t *rsc = node->details->remote_rsc;
if (rsc && (!is_set(rsc->flags, pe_rsc_managed))) {
- crm_notice("Not fencing node %s because connection is unmanaged, "
- "otherwise would %s", node->details->uname, reason);
+ crm_notice("Not fencing node %s due to '%s': connection is unmanaged",
+ node->details->uname, reason);
} else if(node->details->remote_requires_reset == FALSE) {
node->details->remote_requires_reset = TRUE;
if (pe_can_fence(data_set, node)) {
- crm_warn("Node %s will be fenced %s", node->details->uname, reason);
+ crm_warn("Remote node %s will be fenced due to %s", node->details->uname, reason);
} else {
- crm_warn("Node %s is unclean %s", node->details->uname, reason);
+ crm_warn("Remote node %s is unclean due to %s", node->details->uname, reason);
}
}
node->details->unclean = TRUE;
- } else if (node->details->unclean == FALSE) {
+ } else if (node->details->unclean) {
if (pe_can_fence(data_set, node)) {
- crm_warn("Node %s will be fenced %s", node->details->uname, reason);
+ crm_trace("Node %s would also be fenced due to '%s'", node->details->uname, reason);
} else {
- crm_warn("Node %s is unclean %s", node->details->uname, reason);
+ crm_trace("Node %s is also unclean due to '%s'", node->details->uname, reason);
}
+
+ } else if (pe_can_fence(data_set, node)) {
+ crm_warn("Node %s will be fenced due to %s", node->details->uname, reason);
node->details->unclean = TRUE;
} else {
- crm_trace("Node %s would also be fenced '%s'", node->details->uname, reason);
+ crm_warn("Node %s is unclean due to %s", node->details->uname, reason);
+ node->details->unclean = TRUE;
}
}
gboolean
unpack_config(xmlNode * config, pe_working_set_t * data_set)
{
const char *value = NULL;
GHashTable *config_hash =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
xmlXPathObjectPtr xpathObj = NULL;
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
xpathObj = xpath_search(data_set->input, "//nvpair[@name='provides' and @value='unfencing']");
if(xpathObj && numXpathResults(xpathObj) > 0) {
set_bit(data_set->flags, pe_flag_enable_unfencing);
}
freeXpathObject(xpathObj);
}
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
xpathObj = xpath_search(data_set->input, "//nvpair[@name='requires' and @value='unfencing']");
if(xpathObj && numXpathResults(xpathObj) > 0) {
set_bit(data_set->flags, pe_flag_enable_unfencing);
}
freeXpathObject(xpathObj);
}
#ifdef REDHAT_COMPAT_6
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
xpathObj = xpath_search(data_set->input, "//primitive[@type='fence_scsi']");
if(xpathObj && numXpathResults(xpathObj) > 0) {
set_bit(data_set->flags, pe_flag_enable_unfencing);
}
freeXpathObject(xpathObj);
}
#endif
data_set->config_hash = config_hash;
unpack_instance_attributes(data_set->input, config, XML_CIB_TAG_PROPSET, NULL, config_hash,
CIB_OPTIONS_FIRST, FALSE, data_set->now);
verify_pe_options(data_set->config_hash);
set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
if(is_not_set(data_set->flags, pe_flag_startup_probes)) {
crm_info("Startup probes: disabled (dangerous)");
}
value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_notice("Watchdog will be used via SBD if fencing is required");
set_bit(data_set->flags, pe_flag_have_stonith_resource);
}
value = pe_pref(data_set->config_hash, "stonith-timeout");
data_set->stonith_timeout = crm_get_msec(value);
crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
crm_debug("STONITH of failed nodes is %s",
is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
crm_trace("STONITH will %s nodes", data_set->stonith_action);
set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
crm_debug("Concurrent fencing is %s",
is_set(data_set->flags, pe_flag_concurrent_fencing) ? "enabled" : "disabled");
set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
crm_debug("Stop all active resources: %s",
is_set(data_set->flags, pe_flag_stop_everything) ? "true" : "false");
set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
if (is_set(data_set->flags, pe_flag_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pe_pref(data_set->config_hash, "default-resource-stickiness");
data_set->default_resource_stickiness = char2score(value);
crm_debug("Default stickiness: %d", data_set->default_resource_stickiness);
value = pe_pref(data_set->config_hash, "no-quorum-policy");
if (safe_str_eq(value, "ignore")) {
data_set->no_quorum_policy = no_quorum_ignore;
} else if (safe_str_eq(value, "freeze")) {
data_set->no_quorum_policy = no_quorum_freeze;
} else if (safe_str_eq(value, "suicide")) {
gboolean do_panic = FALSE;
crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic);
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
crm_config_err
("Setting no-quorum-policy=suicide makes no sense if stonith-enabled=false");
}
if (do_panic && is_set(data_set->flags, pe_flag_stonith_enabled)) {
data_set->no_quorum_policy = no_quorum_suicide;
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && do_panic == FALSE) {
crm_notice("Resetting no-quorum-policy to 'stop': The cluster has never had quorum");
data_set->no_quorum_policy = no_quorum_stop;
}
} else {
data_set->no_quorum_policy = no_quorum_stop;
}
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
crm_debug("On loss of CCM Quorum: Freeze resources");
break;
case no_quorum_stop:
crm_debug("On loss of CCM Quorum: Stop ALL resources");
break;
case no_quorum_suicide:
crm_notice("On loss of CCM Quorum: Fence all remaining nodes");
break;
case no_quorum_ignore:
crm_notice("On loss of CCM Quorum: Ignore");
break;
}
set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
crm_trace("Orphan resources are %s",
is_set(data_set->flags, pe_flag_stop_rsc_orphans) ? "stopped" : "ignored");
set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
crm_trace("Orphan resource actions are %s",
is_set(data_set->flags, pe_flag_stop_action_orphans) ? "stopped" : "ignored");
set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop);
crm_trace("Stopped resources are removed from the status section: %s",
is_set(data_set->flags, pe_flag_remove_after_stop) ? "true" : "false");
set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
crm_trace("Maintenance mode: %s",
is_set(data_set->flags, pe_flag_maintenance_mode) ? "true" : "false");
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
clear_bit(data_set->flags, pe_flag_is_managed_default);
} else {
set_config_flag(data_set, "is-managed-default", pe_flag_is_managed_default);
}
crm_trace("By default resources are %smanaged",
is_set(data_set->flags, pe_flag_is_managed_default) ? "" : "not ");
set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
crm_trace("Start failures are %s",
is_set(data_set->flags,
pe_flag_start_failure_fatal) ? "always fatal" : "handled by failcount");
node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red"));
node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green"));
node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow"));
crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s",
pe_pref(data_set->config_hash, "node-health-red"),
pe_pref(data_set->config_hash, "node-health-yellow"),
pe_pref(data_set->config_hash, "node-health-green"));
data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
crm_trace("Placement strategy: %s", data_set->placement_strategy);
return TRUE;
}
static void
destroy_digest_cache(gpointer ptr)
{
op_digest_cache_t *data = ptr;
free_xml(data->params_all);
free_xml(data->params_secure);
free_xml(data->params_restart);
free(data->digest_all_calc);
free(data->digest_restart_calc);
free(data->digest_secure_calc);
free(data);
}
node_t *
pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set)
{
node_t *new_node = NULL;
if (pe_find_node(data_set->nodes, uname) != NULL) {
crm_config_warn("Detected multiple node entries with uname=%s"
" - this is rarely intended", uname);
}
new_node = calloc(1, sizeof(node_t));
if (new_node == NULL) {
return NULL;
}
new_node->weight = char2score(score);
new_node->fixed = FALSE;
new_node->details = calloc(1, sizeof(struct node_shared_s));
if (new_node->details == NULL) {
free(new_node);
return NULL;
}
crm_trace("Creating node for entry %s/%s", uname, id);
new_node->details->id = id;
new_node->details->uname = uname;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
new_node->details->type = node_ping;
if (safe_str_eq(type, "remote")) {
new_node->details->type = node_remote;
set_bit(data_set->flags, pe_flag_have_remote_nodes);
} else if (type == NULL || safe_str_eq(type, "member")
|| safe_str_eq(type, NORMALNODE)) {
new_node->details->type = node_member;
}
new_node->details->attrs = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str,
g_hash_destroy_str);
if (is_remote_node(new_node)) {
g_hash_table_insert(new_node->details->attrs, strdup("#kind"), strdup("remote"));
} else {
g_hash_table_insert(new_node->details->attrs, strdup("#kind"), strdup("cluster"));
}
new_node->details->utilization =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
g_hash_destroy_str);
new_node->details->digest_cache =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
destroy_digest_cache);
data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname);
return new_node;
}
bool
remote_id_conflict(const char *remote_name, pe_working_set_t *data)
{
bool match = FALSE;
#if 1
pe_find_resource(data->resources, remote_name);
#else
if (data->name_check == NULL) {
data->name_check = g_hash_table_new(crm_str_hash, g_str_equal);
for (xml_rsc = __xml_first_child(parent); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
const char *id = ID(xml_rsc);
/* avoiding heap allocation here because we know the duration of this hashtable allows us to */
g_hash_table_insert(data->name_check, (char *) id, (char *) id);
}
}
if (g_hash_table_lookup(data->name_check, remote_name)) {
match = TRUE;
}
#endif
if (match) {
crm_err("Invalid remote-node name, a resource called '%s' already exists.", remote_name);
return NULL;
}
return match;
}
static const char *
expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
{
xmlNode *xml_rsc = NULL;
xmlNode *xml_tmp = NULL;
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
const char *container_id = ID(xml_obj);
const char *remote_name = NULL;
const char *remote_server = NULL;
const char *remote_port = NULL;
const char *connect_timeout = "60s";
const char *remote_allow_migrate=NULL;
const char *container_managed = NULL;
for (attr_set = __xml_first_child(xml_obj); attr_set != NULL; attr_set = __xml_next_element(attr_set)) {
if (safe_str_neq((const char *)attr_set->name, XML_TAG_META_SETS)) {
continue;
}
for (attr = __xml_first_child(attr_set); attr != NULL; attr = __xml_next_element(attr)) {
const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
if (safe_str_eq(name, XML_RSC_ATTR_REMOTE_NODE)) {
remote_name = value;
} else if (safe_str_eq(name, "remote-addr")) {
remote_server = value;
} else if (safe_str_eq(name, "remote-port")) {
remote_port = value;
} else if (safe_str_eq(name, "remote-connect-timeout")) {
connect_timeout = value;
} else if (safe_str_eq(name, "remote-allow-migrate")) {
remote_allow_migrate=value;
} else if (safe_str_eq(name, XML_RSC_ATTR_MANAGED)) {
container_managed = value;
}
}
}
if (remote_name == NULL) {
return NULL;
}
if (remote_id_conflict(remote_name, data)) {
return NULL;
}
xml_rsc = create_xml_node(parent, XML_CIB_TAG_RESOURCE);
crm_xml_add(xml_rsc, XML_ATTR_ID, remote_name);
crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, "pacemaker");
crm_xml_add(xml_rsc, XML_ATTR_TYPE, "remote");
xml_tmp = create_xml_node(xml_rsc, XML_TAG_META_SETS);
crm_xml_set_id(xml_tmp, "%s_%s", remote_name, XML_TAG_META_SETS);
attr = create_xml_node(xml_tmp, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(attr, "%s_%s", remote_name, "meta-attributes-container");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_RSC_ATTR_CONTAINER);
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, container_id);
attr = create_xml_node(xml_tmp, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(attr, "%s_%s", remote_name, "meta-attributes-internal");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_RSC_ATTR_INTERNAL_RSC);
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, "true");
if (remote_allow_migrate) {
attr = create_xml_node(xml_tmp, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(attr, "%s_%s", remote_name, "meta-attributes-migrate");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_ALLOW_MIGRATE);
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, remote_allow_migrate);
}
if (container_managed) {
attr = create_xml_node(xml_tmp, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(attr, "%s_%s", remote_name, "meta-attributes-managed");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_RSC_ATTR_MANAGED);
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, container_managed);
}
xml_tmp = create_xml_node(xml_rsc, "operations");
attr = create_xml_node(xml_tmp, XML_ATTR_OP);
crm_xml_set_id(attr, "%s_%s", remote_name, "monitor-interval-30s");
crm_xml_add(attr, XML_ATTR_TIMEOUT, "30s");
crm_xml_add(attr, XML_LRM_ATTR_INTERVAL, "30s");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, "monitor");
if (connect_timeout) {
attr = create_xml_node(xml_tmp, XML_ATTR_OP);
crm_xml_set_id(attr, "%s_%s", remote_name, "start-interval-0");
crm_xml_add(attr, XML_ATTR_TIMEOUT, connect_timeout);
crm_xml_add(attr, XML_LRM_ATTR_INTERVAL, "0");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, "start");
}
if (remote_port || remote_server) {
xml_tmp = create_xml_node(xml_rsc, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_tmp, "%s_%s", remote_name, XML_TAG_ATTR_SETS);
if (remote_server) {
attr = create_xml_node(xml_tmp, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(attr, "%s_%s", remote_name, "instance-attributes-addr");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, "addr");
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, remote_server);
}
if (remote_port) {
attr = create_xml_node(xml_tmp, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(attr, "%s_%s", remote_name, "instance-attributes-port");
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, "port");
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, remote_port);
}
}
return remote_name;
}
static void
handle_startup_fencing(pe_working_set_t *data_set, node_t *new_node)
{
static const char *blind_faith = NULL;
static gboolean unseen_are_unclean = TRUE;
static gboolean need_warning = TRUE;
if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
*/
return;
}
blind_faith = pe_pref(data_set->config_hash, "startup-fencing");
if (crm_is_true(blind_faith) == FALSE) {
unseen_are_unclean = FALSE;
if (need_warning) {
crm_warn("Blind faith: not fencing unseen nodes");
/* Warn once per run, not per node and transition */
need_warning = FALSE;
}
}
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE
|| unseen_are_unclean == FALSE) {
/* blind faith... */
new_node->details->unclean = FALSE;
} else {
/* all nodes are unclean until we've seen their
* status entry
*/
new_node->details->unclean = TRUE;
}
/* We need to be able to determine if a node's status section
* exists or not separate from whether the node is unclean. */
new_node->details->unseen = TRUE;
}
gboolean
unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
const char *score = NULL;
for (xml_obj = __xml_first_child(xml_nodes); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) {
new_node = NULL;
id = crm_element_value(xml_obj, XML_ATTR_ID);
uname = crm_element_value(xml_obj, XML_ATTR_UNAME);
type = crm_element_value(xml_obj, XML_ATTR_TYPE);
score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
crm_config_err("Must specify id tag in <node>");
continue;
}
new_node = pe_create_node(id, uname, type, score, data_set);
if (new_node == NULL) {
return FALSE;
}
/* if(data_set->have_quorum == FALSE */
/* && data_set->no_quorum_policy == no_quorum_stop) { */
/* /\* start shutting resources down *\/ */
/* new_node->weight = -INFINITY; */
/* } */
handle_startup_fencing(data_set, new_node);
add_node_attrs(xml_obj, new_node, FALSE, data_set);
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_UTILIZATION, NULL,
new_node->details->utilization, NULL, FALSE, data_set->now);
crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
}
}
if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
crm_info("Creating a fake local node");
pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
data_set);
}
return TRUE;
}
static void
setup_container(resource_t * rsc, pe_working_set_t * data_set)
{
const char *container_id = NULL;
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
setup_container(child_rsc, data_set);
}
return;
}
container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
if (container_id && safe_str_neq(container_id, rsc->id)) {
resource_t *container = pe_find_resource(data_set->resources, container_id);
if (container) {
rsc->container = container;
container->fillers = g_list_append(container->fillers, rsc);
pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
} else {
pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id);
}
}
}
gboolean
unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
/* generate remote nodes from resource config before unpacking resources */
for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
const char *new_node_id = NULL;
/* first check if this is a bare metal remote node. Bare metal remote nodes
* are defined as a resource primitive only. */
if (xml_contains_remote_node(xml_obj)) {
new_node_id = ID(xml_obj);
/* The "pe_find_node" check is here to make sure we don't iterate over
* an expanded node that has already been added to the node list. */
if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
crm_trace("Found baremetal remote node %s in container resource %s", new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
data_set);
}
continue;
}
/* Now check for guest remote nodes.
* guest remote nodes are defined within a resource primitive.
* Example1: a vm resource might be configured as a remote node.
* Example2: a vm resource might be configured within a group to be a remote node.
* Note: right now we only support guest remote nodes in as a standalone primitive
* or a primitive within a group. No cloned primitives can be a guest remote node
* right now */
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, TRUE)) {
/* expands a metadata defined remote resource into the xml config
* as an actual rsc primitive to be unpacked later. */
new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
crm_trace("Found guest remote node %s in container resource %s", new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
data_set);
}
continue;
} else if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, TRUE)) {
xmlNode *xml_obj2 = NULL;
/* search through a group to see if any of the primitive contain a remote node. */
for (xml_obj2 = __xml_first_child(xml_obj); xml_obj2 != NULL; xml_obj2 = __xml_next_element(xml_obj2)) {
new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
crm_trace("Found guest remote node %s in container resource %s which is in group %s", new_node_id, ID(xml_obj2), ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
data_set);
}
}
}
}
return TRUE;
}
/* Call this after all the nodes and resources have been
* unpacked, but before the status section is read.
*
* A remote node's online status is reflected by the state
* of the remote node's connection resource. We need to link
* the remote node to this connection resource so we can have
* easy access to the connection resource during the PE calculations.
*/
static void
link_rsc2remotenode(pe_working_set_t *data_set, resource_t *new_rsc)
{
node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
if (is_set(data_set->flags, pe_flag_quick_location)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
print_resource(LOG_DEBUG_3, "Linking remote-node connection resource, ", new_rsc, FALSE);
remote_node = pe_find_node(data_set->nodes, new_rsc->id);
CRM_CHECK(remote_node != NULL, return;);
remote_node->details->remote_rsc = new_rsc;
/* If this is a baremetal remote-node (no container resource
* associated with it) then we need to handle startup fencing the same way
* as cluster nodes. */
if (new_rsc->container == NULL) {
handle_startup_fencing(data_set, remote_node);
} else {
/* At this point we know if the remote node is a container or baremetal
* remote node, update the #kind attribute if a container is involved */
g_hash_table_replace(remote_node->details->attrs, strdup("#kind"), strdup("container"));
}
}
static void
destroy_tag(gpointer data)
{
tag_t *tag = data;
if (tag) {
free(tag->id);
g_list_free_full(tag->refs, free);
free(tag);
}
}
gboolean
unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
GListPtr gIter = NULL;
data_set->template_rsc_sets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
destroy_tag);
for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
resource_t *new_rsc = NULL;
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, TRUE)) {
const char *template_id = ID(xml_obj);
if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets,
template_id, NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL);
}
continue;
}
crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj));
if (common_unpack(xml_obj, &new_rsc, NULL, data_set)) {
data_set->resources = g_list_append(data_set->resources, new_rsc);
print_resource(LOG_DEBUG_3, "Added ", new_rsc, FALSE);
} else {
crm_config_err("Failed unpacking %s %s",
crm_element_name(xml_obj), crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
}
}
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
setup_container(rsc, data_set);
link_rsc2remotenode(data_set, rsc);
}
data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority);
if (is_set(data_set->flags, pe_flag_quick_location)) {
/* Ignore */
} else if (is_set(data_set->flags, pe_flag_stonith_enabled)
&& is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
crm_config_err("Resource start-up disabled since no STONITH resources have been defined");
crm_config_err("Either configure some or disable STONITH with the stonith-enabled option");
crm_config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
gboolean
unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
{
xmlNode *xml_tag = NULL;
data_set->tags =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_tag);
for (xml_tag = __xml_first_child(xml_tags); xml_tag != NULL; xml_tag = __xml_next_element(xml_tag)) {
xmlNode *xml_obj_ref = NULL;
const char *tag_id = ID(xml_tag);
if (crm_str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, TRUE) == FALSE) {
continue;
}
if (tag_id == NULL) {
crm_config_err("Failed unpacking %s: %s should be specified",
crm_element_name(xml_tag), XML_ATTR_ID);
continue;
}
for (xml_obj_ref = __xml_first_child(xml_tag); xml_obj_ref != NULL; xml_obj_ref = __xml_next_element(xml_obj_ref)) {
const char *obj_ref = ID(xml_obj_ref);
if (crm_str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, TRUE) == FALSE) {
continue;
}
if (obj_ref == NULL) {
crm_config_err("Failed unpacking %s for tag %s: %s should be specified",
crm_element_name(xml_obj_ref), tag_id, XML_ATTR_ID);
continue;
}
if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
}
return TRUE;
}
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
{
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
ticket_t *ticket = NULL;
ticket_id = ID(xml_ticket);
if (ticket_id == NULL || strlen(ticket_id) == 0) {
return FALSE;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, data_set);
if (ticket == NULL) {
return FALSE;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_ticket, prop_name);
if (crm_str_eq(prop_name, XML_ATTR_ID, TRUE)) {
continue;
}
g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value));
}
granted = g_hash_table_lookup(ticket->state, "granted");
if (granted && crm_is_true(granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, "last-granted");
if (last_granted) {
ticket->last_granted = crm_parse_int(last_granted, 0);
}
standby = g_hash_table_lookup(ticket->state, "standby");
if (standby && crm_is_true(standby)) {
ticket->standby = TRUE;
if (ticket->granted) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
ticket->standby = FALSE;
}
crm_trace("Done with ticket state for %s", ticket_id);
return TRUE;
}
static gboolean
unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
for (xml_obj = __xml_first_child(xml_tickets); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, TRUE) == FALSE) {
continue;
}
unpack_ticket_state(xml_obj, data_set);
}
return TRUE;
}
/* Compatibility with the deprecated ticket state section:
* "/cib/status/tickets/instance_attributes" */
static void
get_ticket_state_legacy(gpointer key, gpointer value, gpointer user_data)
{
const char *long_key = key;
char *state_key = NULL;
const char *granted_prefix = "granted-ticket-";
const char *last_granted_prefix = "last-granted-";
static int granted_prefix_strlen = 0;
static int last_granted_prefix_strlen = 0;
const char *ticket_id = NULL;
const char *is_granted = NULL;
const char *last_granted = NULL;
const char *sep = NULL;
ticket_t *ticket = NULL;
pe_working_set_t *data_set = user_data;
if (granted_prefix_strlen == 0) {
granted_prefix_strlen = strlen(granted_prefix);
}
if (last_granted_prefix_strlen == 0) {
last_granted_prefix_strlen = strlen(last_granted_prefix);
}
if (strstr(long_key, granted_prefix) == long_key) {
ticket_id = long_key + granted_prefix_strlen;
if (strlen(ticket_id)) {
state_key = strdup("granted");
is_granted = value;
}
} else if (strstr(long_key, last_granted_prefix) == long_key) {
ticket_id = long_key + last_granted_prefix_strlen;
if (strlen(ticket_id)) {
state_key = strdup("last-granted");
last_granted = value;
}
} else if ((sep = strrchr(long_key, '-'))) {
ticket_id = sep + 1;
state_key = strndup(long_key, strlen(long_key) - strlen(sep));
}
if (ticket_id == NULL || strlen(ticket_id) == 0) {
free(state_key);
return;
}
if (state_key == NULL || strlen(state_key) == 0) {
free(state_key);
return;
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, data_set);
if (ticket == NULL) {
free(state_key);
return;
}
}
g_hash_table_replace(ticket->state, state_key, strdup(value));
if (is_granted) {
if (crm_is_true(is_granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
} else if (last_granted) {
ticket->last_granted = crm_parse_int(last_granted, 0);
}
}
static void
unpack_handle_remote_attrs(node_t *this_node, xmlNode *state, pe_working_set_t * data_set)
{
const char *resource_discovery_enabled = NULL;
xmlNode *attrs = NULL;
resource_t *rsc = NULL;
const char *shutdown = NULL;
if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
return;
}
if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) {
return;
}
crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname);
this_node->details->remote_maintenance =
crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0");
rsc = this_node->details->remote_rsc;
if (this_node->details->remote_requires_reset == FALSE) {
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
}
attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
add_node_attrs(attrs, this_node, TRUE, data_set);
shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN);
if (shutdown != NULL && safe_str_neq("0", shutdown)) {
crm_info("Node %s is shutting down", this_node->details->uname);
this_node->details->shutdown = TRUE;
if (rsc) {
rsc->next_role = RSC_ROLE_STOPPED;
}
}
if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) {
crm_info("Node %s is in standby-mode", this_node->details->uname);
this_node->details->standby = TRUE;
}
if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "maintenance")) ||
(rsc && !is_set(rsc->flags, pe_rsc_managed))) {
crm_info("Node %s is in maintenance-mode", this_node->details->uname);
this_node->details->maintenance = TRUE;
}
resource_discovery_enabled = g_hash_table_lookup(this_node->details->attrs, XML_NODE_ATTR_RSC_DISCOVERY);
if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
if (is_baremetal_remote_node(this_node) && is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_warn("ignoring %s attribute on baremetal remote node %s, disabling resource discovery requires stonith to be enabled.",
XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
} else {
/* if we're here, this is either a baremetal node and fencing is enabled,
* or this is a container node which we don't care if fencing is enabled
* or not on. container nodes are 'fenced' by recovering the container resource
* regardless of whether fencing is enabled. */
crm_info("Node %s has resource discovery disabled", this_node->details->uname);
this_node->details->rsc_discovery_enabled = FALSE;
}
}
}
static bool
unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set)
{
bool changed = false;
xmlNode *lrm_rsc = NULL;
for (xmlNode *state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
const char *id = NULL;
const char *uname = NULL;
node_t *this_node = NULL;
bool process = FALSE;
if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
continue;
}
id = crm_element_value(state, XML_ATTR_ID);
uname = crm_element_value(state, XML_ATTR_UNAME);
this_node = pe_find_node_any(data_set->nodes, id, uname);
if (this_node == NULL) {
crm_info("Node %s is unknown", id);
continue;
} else if (this_node->details->unpacked) {
crm_info("Node %s is already processed", id);
continue;
} else if (is_remote_node(this_node) == FALSE && is_set(data_set->flags, pe_flag_stonith_enabled)) {
// A redundant test, but preserves the order for regression tests
process = TRUE;
} else if (is_remote_node(this_node)) {
resource_t *rsc = this_node->details->remote_rsc;
if (fence || (rsc && rsc->role == RSC_ROLE_STARTED)) {
determine_remote_online_status(data_set, this_node);
unpack_handle_remote_attrs(this_node, state, data_set);
process = TRUE;
}
} else if (this_node->details->online) {
process = TRUE;
} else if (fence) {
process = TRUE;
}
if(process) {
crm_trace("Processing lrm resource entries on %shealthy%s node: %s",
fence?"un":"", is_remote_node(this_node)?" remote":"",
this_node->details->uname);
changed = TRUE;
this_node->details->unpacked = TRUE;
lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
unpack_lrm_resources(this_node, lrm_rsc, data_set);
}
}
return changed;
}
/* remove nodes that are down, stopping */
/* create +ve rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode * status, pe_working_set_t * data_set)
{
const char *id = NULL;
const char *uname = NULL;
xmlNode *state = NULL;
node_t *this_node = NULL;
crm_trace("Beginning unpack");
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket);
}
for (state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
if (crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) {
xmlNode *xml_tickets = state;
GHashTable *state_hash = NULL;
/* Compatibility with the deprecated ticket state section:
* Unpack the attributes in the deprecated "/cib/status/tickets/instance_attributes" if it exists. */
state_hash =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
g_hash_destroy_str);
unpack_instance_attributes(data_set->input, xml_tickets, XML_TAG_ATTR_SETS, NULL,
state_hash, NULL, TRUE, data_set->now);
g_hash_table_foreach(state_hash, get_ticket_state_legacy, data_set);
if (state_hash) {
g_hash_table_destroy(state_hash);
}
/* Unpack the new "/cib/status/tickets/ticket_state"s */
unpack_tickets_state(xml_tickets, data_set);
}
if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE)) {
xmlNode *attrs = NULL;
const char *resource_discovery_enabled = NULL;
id = crm_element_value(state, XML_ATTR_ID);
uname = crm_element_value(state, XML_ATTR_UNAME);
this_node = pe_find_node_any(data_set->nodes, id, uname);
if (uname == NULL) {
/* error */
continue;
} else if (this_node == NULL) {
crm_config_warn("Node %s in status section no longer exists", uname);
continue;
} else if (is_remote_node(this_node)) {
/* online state for remote nodes is determined by the
* rsc state after all the unpacking is done. we do however
* need to mark whether or not the node has been fenced as this plays
* a role during unpacking cluster node resource state */
this_node->details->remote_was_fenced =
crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0");
continue;
}
crm_trace("Processing node id=%s, uname=%s", id, uname);
/* Mark the node as provisionally clean
* - at least we have seen it in the current cluster's lifetime
*/
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
add_node_attrs(attrs, this_node, TRUE, data_set);
if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) {
crm_info("Node %s is in standby-mode", this_node->details->uname);
this_node->details->standby = TRUE;
}
if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "maintenance"))) {
crm_info("Node %s is in maintenance-mode", this_node->details->uname);
this_node->details->maintenance = TRUE;
}
resource_discovery_enabled = g_hash_table_lookup(this_node->details->attrs, XML_NODE_ATTR_RSC_DISCOVERY);
if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes",
XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
}
crm_trace("determining node state");
determine_online_status(state, this_node, data_set);
if (this_node->details->online && data_set->no_quorum_policy == no_quorum_suicide) {
/* Everything else should flow from this automatically
* At least until the PE becomes able to migrate off healthy resources
*/
- pe_fence_node(data_set, this_node, "because the cluster does not have quorum");
+ pe_fence_node(data_set, this_node, "cluster does not have quorum");
}
}
}
while(unpack_node_loop(status, FALSE, data_set)) {
crm_trace("Start another loop");
}
// Now catch any nodes we didnt see
unpack_node_loop(status, is_set(data_set->flags, pe_flag_stonith_enabled), data_set);
for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *this_node = gIter->data;
if (this_node == NULL) {
continue;
} else if(is_remote_node(this_node) == FALSE) {
continue;
} else if(this_node->details->unpacked) {
continue;
}
determine_remote_online_status(data_set, this_node);
}
return TRUE;
}
static gboolean
determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state,
node_t * this_node)
{
gboolean online = FALSE;
const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
if (!crm_is_true(in_cluster)) {
crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster));
} else if (safe_str_eq(is_peer, ONLINESTATUS)) {
if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) {
online = TRUE;
} else {
crm_debug("Node is not ready to run resources: %s", join);
}
} else if (this_node->details->expected_up == FALSE) {
crm_trace("CRMd is down: in_cluster=%s", crm_str(in_cluster));
crm_trace("\tis_peer=%s, join=%s, expected=%s",
crm_str(is_peer), crm_str(join), crm_str(exp_state));
} else {
/* mark it unclean */
- pe_fence_node(data_set, this_node, "because node is unexpectedly down");
+ pe_fence_node(data_set, this_node, "peer is unexpectedly down");
crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s",
crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state));
}
return online;
}
static gboolean
determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state,
node_t * this_node)
{
gboolean online = FALSE;
gboolean do_terminate = FALSE;
const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
const char *terminate = g_hash_table_lookup(this_node->details->attrs, "terminate");
/*
- XML_NODE_IN_CLUSTER ::= true|false
- XML_NODE_IS_PEER ::= true|false|online|offline
- XML_NODE_JOIN_STATE ::= member|down|pending|banned
- XML_NODE_EXPECTED ::= member|down
*/
if (crm_is_true(terminate)) {
do_terminate = TRUE;
} else if (terminate != NULL && strlen(terminate) > 0) {
/* could be a time() value */
char t = terminate[0];
if (t != '0' && isdigit(t)) {
do_terminate = TRUE;
}
}
crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d",
this_node->details->uname, crm_str(in_cluster), crm_str(is_peer),
crm_str(join), crm_str(exp_state), do_terminate);
online = crm_is_true(in_cluster);
if (safe_str_eq(is_peer, ONLINESTATUS)) {
is_peer = XML_BOOLEAN_YES;
}
if (exp_state == NULL) {
exp_state = CRMD_JOINSTATE_DOWN;
}
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", this_node->details->uname);
/* Slightly different criteria since we can't shut down a dead peer */
online = crm_is_true(is_peer);
} else if (in_cluster == NULL) {
- pe_fence_node(data_set, this_node, "because the peer has not been seen by the cluster");
+ pe_fence_node(data_set, this_node, "peer has not been seen by the cluster");
} else if (safe_str_eq(join, CRMD_JOINSTATE_NACK)) {
- pe_fence_node(data_set, this_node, "because it failed the pacemaker membership criteria");
+ pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria");
} else if (do_terminate == FALSE && safe_str_eq(exp_state, CRMD_JOINSTATE_DOWN)) {
if (crm_is_true(in_cluster) || crm_is_true(is_peer)) {
crm_info("- Node %s is not ready to run resources", this_node->details->uname);
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
crm_trace("%s is down or still coming up", this_node->details->uname);
}
} else if (do_terminate && safe_str_eq(join, CRMD_JOINSTATE_DOWN)
&& crm_is_true(in_cluster) == FALSE && crm_is_true(is_peer) == FALSE) {
crm_info("Node %s was just shot", this_node->details->uname);
online = FALSE;
} else if (crm_is_true(in_cluster) == FALSE) {
- pe_fence_node(data_set, this_node, "because the node is no longer part of the cluster");
+ pe_fence_node(data_set, this_node, "peer is no longer part of the cluster");
} else if (crm_is_true(is_peer) == FALSE) {
- pe_fence_node(data_set, this_node, "because our peer process is no longer available");
+ pe_fence_node(data_set, this_node, "peer process is no longer available");
/* Everything is running at this point, now check join state */
} else if (do_terminate) {
- pe_fence_node(data_set, this_node, "because termination was requested");
+ pe_fence_node(data_set, this_node, "termination was requested");
} else if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) {
crm_info("Node %s is active", this_node->details->uname);
} else if (safe_str_eq(join, CRMD_JOINSTATE_PENDING)
|| safe_str_eq(join, CRMD_JOINSTATE_DOWN)) {
crm_info("Node %s is not ready to run resources", this_node->details->uname);
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
- pe_fence_node(data_set, this_node, "because the peer was in an unknown state");
+ pe_fence_node(data_set, this_node, "peer was in an unknown state");
crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d",
this_node->details->uname, crm_str(in_cluster), crm_str(is_peer),
crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown);
}
return online;
}
static gboolean
determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node)
{
resource_t *rsc = this_node->details->remote_rsc;
resource_t *container = NULL;
pe_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
* be NULL. Consider it an offline remote node in that case.
*/
if (rsc == NULL) {
this_node->details->online = FALSE;
goto remote_online_done;
}
container = rsc->container;
if (container && (g_list_length(rsc->running_on) == 1)) {
host = rsc->running_on->data;
}
/* If the resource is currently started, mark it online. */
if (rsc->role == RSC_ROLE_STARTED) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
if(container && is_set(container->flags, pe_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
} else if(is_set(rsc->flags, pe_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
} else if (rsc->role == RSC_ROLE_STOPPED
|| (container && container->role == RSC_ROLE_STOPPED)) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = FALSE;
} else if (host && (host->details->online == FALSE)
&& host->details->unclean) {
crm_trace("Guest node %s UNCLEAN because host is unclean",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
}
remote_online_done:
crm_trace("Remote node %s online=%s",
this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
return this_node->details->online;
}
gboolean
determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set_t * data_set)
{
gboolean online = FALSE;
const char *shutdown = NULL;
const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
if (this_node == NULL) {
crm_config_err("No node to check");
return online;
}
this_node->details->shutdown = FALSE;
this_node->details->expected_up = FALSE;
shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN);
if (shutdown != NULL && safe_str_neq("0", shutdown)) {
this_node->details->shutdown = TRUE;
} else if (safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) {
this_node->details->expected_up = TRUE;
}
if (this_node->details->type == node_ping) {
this_node->details->unclean = FALSE;
online = FALSE; /* As far as resource management is concerned,
* the node is safely offline.
* Anyone caught abusing this logic will be shot
*/
} else if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
online = determine_online_status_no_fencing(data_set, node_state, this_node);
} else {
online = determine_online_status_fencing(data_set, node_state, this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->fixed = TRUE;
this_node->weight = -INFINITY;
}
if (online && this_node->details->shutdown) {
/* don't run resources here */
this_node->fixed = TRUE;
this_node->weight = -INFINITY;
}
if (this_node->details->type == node_ping) {
crm_info("Node %s is not a pacemaker node", this_node->details->uname);
} else if (this_node->details->unclean) {
pe_proc_warn("Node %s is unclean", this_node->details->uname);
} else if (this_node->details->online) {
crm_info("Node %s is %s", this_node->details->uname,
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
this_node->details->standby ? "standby" :
this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("Node %s is offline", this_node->details->uname);
}
return online;
}
char *
clone_strip(const char *last_rsc_id)
{
int lpc = 0;
char *zero = NULL;
CRM_CHECK(last_rsc_id != NULL, return NULL);
lpc = strlen(last_rsc_id);
while (--lpc > 0) {
switch (last_rsc_id[lpc]) {
case 0:
crm_err("Empty string: %s", last_rsc_id);
return NULL;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
zero = calloc(1, lpc + 1);
memcpy(zero, last_rsc_id, lpc);
zero[lpc] = 0;
return zero;
default:
goto done;
}
}
done:
zero = strdup(last_rsc_id);
return zero;
}
char *
clone_zero(const char *last_rsc_id)
{
int lpc = 0;
char *zero = NULL;
CRM_CHECK(last_rsc_id != NULL, return NULL);
if (last_rsc_id != NULL) {
lpc = strlen(last_rsc_id);
}
while (--lpc > 0) {
switch (last_rsc_id[lpc]) {
case 0:
return NULL;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
zero = calloc(1, lpc + 3);
memcpy(zero, last_rsc_id, lpc);
zero[lpc] = ':';
zero[lpc + 1] = '0';
zero[lpc + 2] = 0;
return zero;
default:
goto done;
}
}
done:
lpc = strlen(last_rsc_id);
zero = calloc(1, lpc + 3);
memcpy(zero, last_rsc_id, lpc);
zero[lpc] = ':';
zero[lpc + 1] = '0';
zero[lpc + 2] = 0;
crm_trace("%s -> %s", last_rsc_id, zero);
return zero;
}
static resource_t *
create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set)
{
resource_t *rsc = NULL;
xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
copy_in_properties(xml_rsc, rsc_entry);
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
node = pe_find_node(data_set->nodes, rsc_id);
if (node == NULL) {
node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
}
link_rsc2remotenode(data_set, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
node->details->shutdown = TRUE;
}
}
if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
set_bit(rsc->flags, pe_rsc_orphan_container_filler);
}
set_bit(rsc->flags, pe_rsc_orphan);
data_set->resources = g_list_append(data_set->resources, rsc);
return rsc;
}
extern resource_t *create_child_clone(resource_t * rsc, int sub_id, pe_working_set_t * data_set);
static resource_t *
find_anonymous_clone(pe_working_set_t * data_set, node_t * node, resource_t * parent,
const char *rsc_id)
{
GListPtr rIter = NULL;
resource_t *rsc = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(parent != NULL);
CRM_ASSERT(pe_rsc_is_clone(parent));
CRM_ASSERT(is_not_set(parent->flags, pe_rsc_unique));
/* Find an instance active (or partially active for grouped clones) on the specified node */
pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GListPtr nIter = NULL;
GListPtr locations = NULL;
resource_t *child = rIter->data;
child->fns->location(child, &locations, TRUE);
if (locations == NULL) {
pe_rsc_trace(child, "Resource %s, skip inactive", child->id);
continue;
}
for (nIter = locations; nIter && rsc == NULL; nIter = nIter->next) {
node_t *childnode = nIter->data;
if (childnode->details == node->details) {
/* ->find_rsc() because we might be a cloned group */
rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
if(rsc) {
pe_rsc_trace(rsc, "Resource %s, active", rsc->id);
}
}
/* Keep this block, it means we'll do the right thing if
* anyone toggles the unique flag to 'off'
*/
if (rsc && rsc->running_on) {
crm_notice("/Anonymous/ clone %s is already running on %s",
parent->id, node->details->uname);
skip_inactive = TRUE;
rsc = NULL;
}
}
g_list_free(locations);
}
/* Find an inactive instance */
if (skip_inactive == FALSE) {
pe_rsc_trace(parent, "Looking for %s anywhere", rsc_id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GListPtr locations = NULL;
resource_t *child = rIter->data;
if (is_set(child->flags, pe_rsc_block)) {
pe_rsc_trace(child, "Skip: blocked in stopped state");
continue;
}
child->fns->location(child, &locations, TRUE);
if (locations == NULL) {
/* ->find_rsc() because we might be a cloned group */
rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
pe_rsc_trace(parent, "Resource %s, empty slot", rsc->id);
}
g_list_free(locations);
}
}
if (rsc == NULL) {
/* Create an extra orphan */
resource_t *top = create_child_clone(parent, -1, data_set);
/* ->find_rsc() because we might be a cloned group */
rsc = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
CRM_ASSERT(rsc != NULL);
pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id,
node->details->uname);
}
if (safe_str_neq(rsc_id, rsc->id)) {
pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, node->details->uname, rsc->id,
is_set(rsc->flags, pe_rsc_orphan) ? " (ORPHAN)" : "");
}
return rsc;
}
static resource_t *
unpack_find_resource(pe_working_set_t * data_set, node_t * node, const char *rsc_id,
xmlNode * rsc_entry)
{
resource_t *rsc = NULL;
resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(data_set->resources, rsc_id);
/* no match */
if (rsc == NULL) {
/* Even when clone-max=0, we still create a single :0 orphan to match against */
char *tmp = clone_zero(rsc_id);
resource_t *clone0 = pe_find_resource(data_set->resources, tmp);
if (clone0 && is_not_set(clone0->flags, pe_rsc_unique)) {
rsc = clone0;
} else {
crm_trace("%s is not known as %s either", rsc_id, tmp);
}
parent = uber_parent(clone0);
free(tmp);
crm_trace("%s not found: %s", rsc_id, parent ? parent->id : "orphan");
} else if (rsc->variant > pe_native) {
crm_trace("%s is no longer a primitive resource, the lrm_resource entry is obsolete",
rsc_id);
return NULL;
} else {
parent = uber_parent(rsc);
}
if(parent && parent->parent) {
rsc = find_container_child(rsc_id, rsc, node);
} else if (pe_rsc_is_clone(parent)) {
if (is_not_set(parent->flags, pe_rsc_unique)) {
char *base = clone_strip(rsc_id);
rsc = find_anonymous_clone(data_set, node, parent, base);
CRM_ASSERT(rsc != NULL);
free(base);
}
if (rsc && safe_str_neq(rsc_id, rsc->id)) {
free(rsc->clone_name);
rsc->clone_name = strdup(rsc_id);
}
}
return rsc;
}
static resource_t *
process_orphan_resource(xmlNode * rsc_entry, node_t * node, pe_working_set_t * data_set)
{
resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname);
rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
clear_bit(rsc->flags, pe_rsc_managed);
} else {
print_resource(LOG_DEBUG_3, "Added orphan", rsc, FALSE);
CRM_CHECK(rsc != NULL, return NULL);
resource_location(rsc, NULL, -INFINITY, "__orphan_dont_run__", data_set);
}
return rsc;
}
static void
process_rsc_state(resource_t * rsc, node_t * node,
enum action_fail_response on_fail,
xmlNode * migrate_op, pe_working_set_t * data_set)
{
node_t *tmpnode = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail));
/* process current state */
if (rsc->role != RSC_ROLE_UNKNOWN) {
resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
node_t *n = node_copy(node);
pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name,
n->details->uname);
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
if (is_set(iter->flags, pe_rsc_unique)) {
break;
}
iter = iter->parent;
}
}
/* If a managed resource is believed to be running, but node is down ... */
if (rsc->role > RSC_ROLE_STOPPED
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
&& is_set(rsc->flags, pe_rsc_managed)) {
char *reason = NULL;
gboolean should_fence = FALSE;
/* If this is a guest node, fence it (regardless of whether fencing is
* enabled, because guest node fencing is done by recovery of the
* container resource rather than by stonithd). Mark the resource
* we're processing as failed. When the guest comes back up, its
* operation history in the CIB will be cleared, freeing the affected
* resource to run again once we are sure we know its state.
*/
if (is_container_remote_node(node)) {
set_bit(rsc->flags, pe_rsc_failed);
should_fence = TRUE;
} else if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
if (is_baremetal_remote_node(node) && node->details->remote_rsc && is_not_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
/* setting unseen = true means that fencing of the remote node will
* only occur if the connection resource is not going to start somewhere.
* This allows connection resources on a failed cluster-node to move to
* another node without requiring the baremetal remote nodes to be fenced
* as well. */
node->details->unseen = TRUE;
- reason = crm_strdup_printf("because %s is active there. Fencing will be revoked if remote-node connection can be re-established on another cluster-node.", rsc->id);
+ reason = crm_strdup_printf("%s is active there. Fencing will be revoked if remote-node connection can be re-established on another cluster-node.", rsc->id);
}
should_fence = TRUE;
}
if (should_fence) {
if (reason == NULL) {
- reason = crm_strdup_printf("because %s is thought to be active there", rsc->id);
+ reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
}
pe_fence_node(data_set, node, reason);
}
free(reason);
}
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = action_fail_ignore;
}
switch (on_fail) {
case action_fail_ignore:
/* nothing to do */
break;
case action_fail_fence:
/* treat it as if it is still running
* but also mark the node as unclean
*/
- pe_fence_node(data_set, node, "because of resource failure(s)");
+ pe_fence_node(data_set, node, "resource failure(s)");
break;
case action_fail_standby:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
case action_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
clear_bit(rsc->flags, pe_rsc_managed);
set_bit(rsc->flags, pe_rsc_block);
break;
case action_fail_migrate:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set);
break;
case action_fail_stop:
rsc->next_role = RSC_ROLE_STOPPED;
break;
case action_fail_recover:
if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
set_bit(rsc->flags, pe_rsc_failed);
stop_action(rsc, node, FALSE);
}
break;
case action_fail_restart_container:
set_bit(rsc->flags, pe_rsc_failed);
if (rsc->container) {
stop_action(rsc->container, node, FALSE);
} else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
stop_action(rsc, node, FALSE);
}
break;
case action_fail_reset_remote:
set_bit(rsc->flags, pe_rsc_failed);
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pe_find_node(data_set->nodes, rsc->id);
}
if (tmpnode &&
is_baremetal_remote_node(tmpnode) &&
tmpnode->details->remote_was_fenced == 0) {
/* connection resource to baremetal resource failed in a way that
* should result in fencing the remote-node. */
- pe_fence_node(data_set, tmpnode, "because of connection failure(s)");
+ pe_fence_node(data_set, tmpnode, "of connection failure(s)");
}
}
/* require the stop action regardless if fencing is occuring or not. */
if (rsc->role > RSC_ROLE_STOPPED) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_interval) {
rsc->next_role = RSC_ROLE_STOPPED;
}
break;
}
/* ensure a remote-node connection failure forces an unclean remote-node
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
if (is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
tmpnode = pe_find_node(data_set->nodes, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
if (is_set(rsc->flags, pe_rsc_orphan)) {
if (is_set(rsc->flags, pe_rsc_managed)) {
crm_config_warn("Detected active orphan %s running on %s",
rsc->id, node->details->uname);
} else {
crm_config_warn("Cluster configured not to stop active orphans."
" %s must be stopped manually on %s",
rsc->id, node->details->uname);
}
}
native_add_running(rsc, node, data_set);
if (on_fail != action_fail_ignore) {
set_bit(rsc->flags, pe_rsc_failed);
}
} else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
/* Only do this for older status sections that included instance numbers
* Otherwise stopped instances will appear as orphans
*/
pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id);
free(rsc->clone_name);
rsc->clone_name = NULL;
} else {
char *key = stop_key(rsc);
GListPtr possible_matches = find_actions(rsc->actions, key, node);
GListPtr gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
action_t *stop = (action_t *) gIter->data;
stop->flags |= pe_action_optional;
}
g_list_free(possible_matches);
free(key);
}
}
/* create active recurring operations as optional */
static void
process_recurring(node_t * node, resource_t * rsc,
int start_index, int stop_index,
GListPtr sorted_op_list, pe_working_set_t * data_set)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GListPtr gIter = sorted_op_list;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
int interval = 0;
char *key = NULL;
const char *id = ID(rsc_op);
const char *interval_s = NULL;
counter++;
if (node->details->online == FALSE) {
pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname);
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname);
continue;
} else if (counter < start_index) {
pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter);
continue;
}
interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL);
interval = crm_parse_int(interval_s, "0");
if (interval == 0) {
pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname);
continue;
}
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
if (safe_str_eq(status, "-1")) {
pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname);
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
/* create the action */
key = generate_op_key(rsc->id, task, interval);
pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname);
custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
}
}
void
calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index)
{
int counter = -1;
int implied_monitor_start = -1;
int implied_master_start = -1;
const char *task = NULL;
const char *status = NULL;
GListPtr gIter = sorted_op_list;
*stop_index = -1;
*start_index = -1;
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
if (safe_str_eq(task, CRMD_ACTION_STOP)
&& safe_str_eq(status, "0")) {
*stop_index = counter;
} else if (safe_str_eq(task, CRMD_ACTION_START) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
*start_index = counter;
} else if ((implied_monitor_start <= *stop_index) && safe_str_eq(task, CRMD_ACTION_STATUS)) {
const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
if (safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) {
implied_monitor_start = counter;
}
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE) || safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
implied_master_start = counter;
}
}
if (*start_index == -1) {
if (implied_master_start != -1) {
*start_index = implied_master_start;
} else if (implied_monitor_start != -1) {
*start_index = implied_monitor_start;
}
}
}
static resource_t *
unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
const char *task = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
xmlNode *migrate_op = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
enum action_fail_response on_fail = FALSE;
enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
crm_trace("[%s] Processing %s on %s",
crm_element_name(rsc_entry), rsc_id, node->details->uname);
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
/* find the resource */
rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry);
if (rsc == NULL) {
rsc = process_orphan_resource(rsc_entry, node, data_set);
}
CRM_ASSERT(rsc != NULL);
/* process operations */
saved_role = rsc->role;
on_fail = action_fail_ignore;
rsc->role = RSC_ROLE_UNKNOWN;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
migrate_op = rsc_op;
}
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail, migrate_op, data_set);
if (get_target_role(rsc, &req_role)) {
if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s"
" with requested next role %s",
rsc->id, role2text(rsc->next_role), role2text(req_role));
rsc->next_role = req_role;
} else if (req_role > rsc->next_role) {
pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, role2text(rsc->next_role), role2text(req_role));
}
}
if (saved_role > rsc->role) {
rsc->role = saved_role;
}
return rsc;
}
static void
handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
{
xmlNode *rsc_entry = NULL;
for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL;
rsc_entry = __xml_next_element(rsc_entry)) {
resource_t *rsc;
resource_t *container;
const char *rsc_id;
const char *container_id;
if (safe_str_neq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE)) {
continue;
}
container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER);
rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
if (container_id == NULL || rsc_id == NULL) {
continue;
}
container = pe_find_resource(data_set->resources, container_id);
if (container == NULL) {
continue;
}
rsc = pe_find_resource(data_set->resources, rsc_id);
if (rsc == NULL ||
is_set(rsc->flags, pe_rsc_orphan_container_filler) == FALSE ||
rsc->container != NULL) {
continue;
}
pe_rsc_trace(rsc, "Mapped orphaned rsc %s's container to %s", rsc->id, container_id);
rsc->container = container;
container->fillers = g_list_append(container->fillers, rsc);
}
}
gboolean
unpack_lrm_resources(node_t * node, xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
{
xmlNode *rsc_entry = NULL;
gboolean found_orphaned_container_filler = FALSE;
GListPtr unexpected_containers = NULL;
GListPtr gIter = NULL;
resource_t *remote = NULL;
CRM_CHECK(node != NULL, return FALSE);
crm_trace("Unpacking resources on %s", node->details->uname);
for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL;
rsc_entry = __xml_next_element(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
resource_t *rsc;
rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set);
if (!rsc) {
continue;
}
if (is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
found_orphaned_container_filler = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unexpectedly_running)) {
remote = rsc_contains_remote_node(data_set, rsc);
if (remote) {
unexpected_containers = g_list_append(unexpected_containers, remote);
}
}
}
}
/* If a container resource is unexpectedly up... and the remote-node
* connection resource for that container is not up, the entire container
* must be recovered. */
for (gIter = unexpected_containers; gIter != NULL; gIter = gIter->next) {
remote = (resource_t *) gIter->data;
if (remote->role != RSC_ROLE_STARTED) {
crm_warn("Recovering container resource %s. Resource is unexpectedly running and involves a remote-node.", remote->container->id);
set_bit(remote->container->flags, pe_rsc_failed);
}
}
/* now that all the resource state has been unpacked for this node
* we have to go back and map any orphaned container fillers to their
* container resource */
if (found_orphaned_container_filler) {
handle_orphaned_container_fillers(lrm_rsc_list, data_set);
}
g_list_free(unexpected_containers);
return TRUE;
}
static void
set_active(resource_t * rsc)
{
resource_t *top = uber_parent(rsc);
if (top && top->variant == pe_master) {
rsc->role = RSC_ROLE_SLAVE;
} else {
rsc->role = RSC_ROLE_STARTED;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
node_t *node = value;
int *score = user_data;
node->weight = *score;
}
#define STATUS_PATH_MAX 1024
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
pe_working_set_t * data_set)
{
int offset = 0;
char xpath[STATUS_PATH_MAX];
offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node);
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']",
resource);
/* Need to check against transition_magic too? */
if (source && safe_str_eq(op, CRMD_ACTION_MIGRATE)) {
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset,
"/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op,
source);
} else if (source && safe_str_eq(op, CRMD_ACTION_MIGRATED)) {
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset,
"/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op,
source);
} else {
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset,
"/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op);
}
CRM_LOG_ASSERT(offset > 0);
return get_xpath_object(xpath, data_set->input, LOG_DEBUG);
}
static void
unpack_rsc_migration(resource_t *rsc, node_t *node, xmlNode *xml_op, pe_working_set_t * data_set)
{
/*
* The normal sequence is (now): migrate_to(Src) -> migrate_from(Tgt) -> stop(Src)
*
* So if a migrate_to is followed by a stop, then we don't need to care what
* happened on the target node
*
* Without the stop, we need to look for a successful migrate_from.
* This would also imply we're no longer running on the source
*
* Without the stop, and without a migrate_from op we make sure the resource
* gets stopped on both source and target (assuming the target is up)
*
*/
int stop_id = 0;
int task_id = 0;
xmlNode *stop_op =
find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->id, NULL, data_set);
if (stop_op) {
crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id);
}
crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id);
if (stop_op == NULL || stop_id < task_id) {
int from_rc = 0, from_status = 0;
const char *migrate_source =
crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *migrate_target =
crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
node_t *target = pe_find_node(data_set->nodes, migrate_target);
node_t *source = pe_find_node(data_set->nodes, migrate_source);
xmlNode *migrate_from =
find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source,
data_set);
rsc->role = RSC_ROLE_STARTED; /* can be master? */
if (migrate_from) {
crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status);
pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d",
ID(migrate_from), migrate_target, from_status, from_rc);
}
if (migrate_from && from_rc == PCMK_OCF_OK
&& from_status == PCMK_LRM_OP_DONE) {
pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op),
migrate_source);
/* all good
* just need to arrange for the stop action to get sent
* but _without_ affecting the target somehow
*/
rsc->role = RSC_ROLE_STOPPED;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
} else if (migrate_from) { /* Failed */
if (target && target->details->online) {
pe_rsc_trace(rsc, "Marking active on %s %p %d", migrate_target, target,
target->details->online);
native_add_running(rsc, target, data_set);
}
} else { /* Pending or complete but erased */
if (target && target->details->online) {
pe_rsc_trace(rsc, "Marking active on %s %p %d", migrate_target, target,
target->details->online);
native_add_running(rsc, target, data_set);
if (source && source->details->online) {
/* If we make it here we have a partial migration. The migrate_to
* has completed but the migrate_from on the target has not. Hold on
* to the target and source on the resource. Later on if we detect that
* the resource is still going to run on that target, we may continue
* the migration */
rsc->partial_migration_target = target;
rsc->partial_migration_source = source;
}
} else {
/* Consider it failed here - forces a restart, prevents migration */
set_bit(rsc->flags, pe_rsc_failed);
clear_bit(rsc->flags, pe_rsc_allow_migrate);
}
}
}
}
static void
unpack_rsc_migration_failure(resource_t *rsc, node_t *node, xmlNode *xml_op, pe_working_set_t * data_set)
{
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
CRM_ASSERT(rsc);
if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
int stop_id = 0;
int migrate_id = 0;
const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
xmlNode *stop_op =
find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_source, NULL, data_set);
xmlNode *migrate_op =
find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, migrate_source, migrate_target,
data_set);
if (stop_op) {
crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id);
}
if (migrate_op) {
crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id);
}
/* Get our state right */
rsc->role = RSC_ROLE_STARTED; /* can be master? */
if (stop_op == NULL || stop_id < migrate_id) {
node_t *source = pe_find_node(data_set->nodes, migrate_source);
if (source && source->details->online) {
native_add_running(rsc, source, data_set);
}
}
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) {
int stop_id = 0;
int migrate_id = 0;
const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
xmlNode *stop_op =
find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_target, NULL, data_set);
xmlNode *migrate_op =
find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source,
data_set);
if (stop_op) {
crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id);
}
if (migrate_op) {
crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id);
}
/* Get our state right */
rsc->role = RSC_ROLE_STARTED; /* can be master? */
if (stop_op == NULL || stop_id < migrate_id) {
node_t *target = pe_find_node(data_set->nodes, migrate_target);
pe_rsc_trace(rsc, "Stop: %p %d, Migrated: %p %d", stop_op, stop_id, migrate_op,
migrate_id);
if (target && target->details->online) {
native_add_running(rsc, target, data_set);
}
} else if (migrate_op == NULL) {
/* Make sure it gets cleaned up, the stop may pre-date the migrate_from */
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
}
}
}
static void
record_failed_op(xmlNode *op, node_t* node, pe_working_set_t * data_set)
{
xmlNode *xIter = NULL;
const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY);
if (node->details->online == FALSE) {
return;
}
for (xIter = data_set->failed->children; xIter; xIter = xIter->next) {
const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY);
const char *uname = crm_element_value(xIter, XML_ATTR_UNAME);
if(safe_str_eq(op_key, key) && safe_str_eq(uname, node->details->uname)) {
crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname);
return;
}
}
crm_trace("Adding entry %s on %s", op_key, node->details->uname);
crm_xml_add(op, XML_ATTR_UNAME, node->details->uname);
add_node_copy(data_set->failed, op);
}
static const char *get_op_key(xmlNode *xml_op)
{
const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
if(key == NULL) {
key = ID(xml_op);
}
return key;
}
static void
unpack_rsc_op_failure(resource_t * rsc, node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure,
enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
int interval = 0;
bool is_probe = FALSE;
action_t *action = NULL;
const char *key = get_op_key(xml_op);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
CRM_ASSERT(rsc);
*last_failure = xml_op;
crm_element_value_int(xml_op, XML_LRM_ATTR_INTERVAL, &interval);
if(interval == 0 && safe_str_eq(task, CRMD_ACTION_STATUS)) {
is_probe = TRUE;
pe_rsc_trace(rsc, "is a probe: %s", key);
}
if (rc != PCMK_OCF_NOT_INSTALLED || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
crm_warn("Processing failed op %s for %s on %s: %s (%d)",
task, rsc->id, node->details->uname, services_ocf_exitcode_str(rc),
rc);
record_failed_op(xml_op, node, data_set);
} else {
crm_trace("Processing failed op %s for %s on %s: %s (%d)",
task, rsc->id, node->details->uname, services_ocf_exitcode_str(rc),
rc);
}
action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) ||
(action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) ||
(action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) ||
(*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) {
pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail),
fail2text(action->on_fail), action->uuid, key);
*on_fail = action->on_fail;
}
if (safe_str_eq(task, CRMD_ACTION_STOP)) {
resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set);
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATE) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
unpack_rsc_migration_failure(rsc, node, xml_op, data_set);
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_MASTER;
} else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
/*
* staying in role=master ends up putting the PE/TE into a loop
* setting role=slave is not dangerous because no master will be
* promoted until the failed resource has been fully stopped
*/
if (action->on_fail == action_fail_block) {
rsc->role = RSC_ROLE_MASTER;
rsc->next_role = RSC_ROLE_STOPPED;
} else if(rc == PCMK_OCF_NOT_RUNNING) {
rsc->role = RSC_ROLE_STOPPED;
} else {
crm_warn("Forcing %s to stop after a failed demote action", rsc->id);
rsc->role = RSC_ROLE_SLAVE;
rsc->next_role = RSC_ROLE_STOPPED;
}
} else if (compare_version("2.0", op_version) > 0 && safe_str_eq(task, CRMD_ACTION_START)) {
crm_warn("Compatibility handling for failed op %s on %s", key, node->details->uname);
resource_location(rsc, node, -INFINITY, "__legacy_start__", data_set);
}
if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) {
/* leave stopped */
pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id);
rsc->role = RSC_ROLE_STOPPED;
} else if (rsc->role < RSC_ROLE_STARTED) {
pe_rsc_trace(rsc, "Setting %s active", rsc->id);
set_active(rsc);
}
pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
rsc->id, role2text(rsc->role),
node->details->unclean ? "true" : "false",
fail2text(action->on_fail), role2text(action->fail_role));
if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) {
rsc->next_role = action->fail_role;
}
if (action->fail_role == RSC_ROLE_STOPPED) {
int score = -INFINITY;
resource_t *fail_rsc = rsc;
if (fail_rsc->parent) {
resource_t *parent = uber_parent(fail_rsc);
if (pe_rsc_is_clone(parent)
&& is_not_set(parent->flags, pe_rsc_unique)) {
/* for clone and master resources, if a child fails on an operation
* with on-fail = stop, all the resources fail. Do this by preventing
* the parent from coming up again. */
fail_rsc = parent;
}
}
crm_warn("Making sure %s doesn't come up again", fail_rsc->id);
/* make sure it doesn't come up again */
g_hash_table_destroy(fail_rsc->allowed_nodes);
fail_rsc->allowed_nodes = node_hash_from_list(data_set->nodes);
g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
}
pe_free_action(action);
}
static int
determine_op_status(
resource_t *rsc, int rc, int target_rc, node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
int interval = 0;
int result = PCMK_LRM_OP_DONE;
const char *key = get_op_key(xml_op);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
bool is_probe = FALSE;
CRM_ASSERT(rsc);
crm_element_value_int(xml_op, XML_LRM_ATTR_INTERVAL, &interval);
if (interval == 0 && safe_str_eq(task, CRMD_ACTION_STATUS)) {
is_probe = TRUE;
}
if (target_rc >= 0 && target_rc != rc) {
result = PCMK_LRM_OP_ERROR;
pe_rsc_debug(rsc, "%s on %s returned '%s' (%d) instead of the expected value: '%s' (%d)",
key, node->details->uname,
services_ocf_exitcode_str(rc), rc,
services_ocf_exitcode_str(target_rc), target_rc);
}
/* we could clean this up significantly except for old LRMs and CRMs that
* didn't include target_rc and liked to remap status
*/
switch (rc) {
case PCMK_OCF_OK:
if (is_probe && target_rc == 7) {
result = PCMK_LRM_OP_DONE;
set_bit(rsc->flags, pe_rsc_unexpectedly_running);
pe_rsc_info(rsc, "Operation %s found resource %s active on %s",
task, rsc->id, node->details->uname);
/* legacy code for pre-0.6.5 operations */
} else if (target_rc < 0 && interval > 0 && rsc->role == RSC_ROLE_MASTER) {
/* catch status ops that return 0 instead of 8 while they
* are supposed to be in master mode
*/
result = PCMK_LRM_OP_ERROR;
}
break;
case PCMK_OCF_NOT_RUNNING:
if (is_probe || target_rc == rc || is_not_set(rsc->flags, pe_rsc_managed)) {
result = PCMK_LRM_OP_DONE;
rsc->role = RSC_ROLE_STOPPED;
/* clear any previous failure actions */
*on_fail = action_fail_ignore;
rsc->next_role = RSC_ROLE_UNKNOWN;
} else if (safe_str_neq(task, CRMD_ACTION_STOP)) {
result = PCMK_LRM_OP_ERROR;
}
break;
case PCMK_OCF_RUNNING_MASTER:
if (is_probe) {
result = PCMK_LRM_OP_DONE;
pe_rsc_info(rsc, "Operation %s found resource %s active in master mode on %s",
task, rsc->id, node->details->uname);
} else if (target_rc == rc) {
/* nothing to do */
} else if (target_rc >= 0) {
result = PCMK_LRM_OP_ERROR;
/* legacy code for pre-0.6.5 operations */
} else if (safe_str_neq(task, CRMD_ACTION_STATUS)
|| rsc->role != RSC_ROLE_MASTER) {
result = PCMK_LRM_OP_ERROR;
if (rsc->role != RSC_ROLE_MASTER) {
crm_err("%s reported %s in master mode on %s",
key, rsc->id, node->details->uname);
}
}
rsc->role = RSC_ROLE_MASTER;
break;
case PCMK_OCF_DEGRADED_MASTER:
case PCMK_OCF_FAILED_MASTER:
rsc->role = RSC_ROLE_MASTER;
result = PCMK_LRM_OP_ERROR;
break;
case PCMK_OCF_NOT_CONFIGURED:
result = PCMK_LRM_OP_ERROR_FATAL;
break;
case PCMK_OCF_NOT_INSTALLED:
case PCMK_OCF_INVALID_PARAM:
case PCMK_OCF_INSUFFICIENT_PRIV:
case PCMK_OCF_UNIMPLEMENT_FEATURE:
if (rc == PCMK_OCF_UNIMPLEMENT_FEATURE && interval > 0) {
result = PCMK_LRM_OP_NOTSUPPORTED;
break;
} else if (pe_can_fence(data_set, node) == FALSE
&& safe_str_eq(task, CRMD_ACTION_STOP)) {
/* If a stop fails and we can't fence, there's nothing else we can do */
pe_proc_err("No further recovery can be attempted for %s: %s action failed with '%s' (%d)",
rsc->id, task, services_ocf_exitcode_str(rc), rc);
clear_bit(rsc->flags, pe_rsc_managed);
set_bit(rsc->flags, pe_rsc_block);
}
result = PCMK_LRM_OP_ERROR_HARD;
break;
default:
if (result == PCMK_LRM_OP_DONE) {
crm_info("Treating %s (rc=%d) on %s as an ERROR",
key, rc, node->details->uname);
result = PCMK_LRM_OP_ERROR;
}
}
return result;
}
static bool check_operation_expiry(resource_t *rsc, node_t *node, int rc, xmlNode *xml_op, pe_working_set_t * data_set)
{
bool expired = FALSE;
time_t last_failure = 0;
int interval = 0;
int failure_timeout = rsc->failure_timeout;
const char *key = get_op_key(xml_op);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *clear_reason = NULL;
/* clearing recurring monitor operation failures automatically
* needs to be carefully considered */
if (safe_str_eq(crm_element_value(xml_op, XML_LRM_ATTR_TASK), "monitor") &&
safe_str_neq(crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL), "0")) {
/* TODO, in the future we should consider not clearing recurring monitor
* op failures unless the last action for a resource was a "stop" action.
* otherwise it is possible that clearing the monitor failure will result
* in the resource being in an undeterministic state.
*
* For now we handle this potential undeterministic condition for remote
* node connection resources by not clearing a recurring monitor op failure
* until after the node has been fenced. */
if (is_set(data_set->flags, pe_flag_stonith_enabled) &&
(rsc->remote_reconnect_interval)) {
node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
if (remote_node && remote_node->details->remote_was_fenced == 0) {
if (strstr(ID(xml_op), "last_failure")) {
crm_info("Waiting to clear monitor failure for remote node %s until fencing has occurred", rsc->id);
}
/* disabling failure timeout for this operation because we believe
* fencing of the remote node should occur first. */
failure_timeout = 0;
}
}
}
if (failure_timeout > 0) {
int last_run = 0;
if (crm_element_value_int(xml_op, XML_RSC_OP_LAST_CHANGE, &last_run) == 0) {
time_t now = get_effective_time(data_set);
if (now > (last_run + failure_timeout)) {
expired = TRUE;
}
}
}
if (expired) {
if (failure_timeout > 0) {
int fc = get_failcount_full(node, rsc, &last_failure, FALSE, xml_op, data_set);
if(fc) {
if (get_failcount_full(node, rsc, &last_failure, TRUE, xml_op, data_set) == 0) {
clear_reason = "it expired";
} else {
expired = FALSE;
}
} else if (rsc->remote_reconnect_interval && strstr(ID(xml_op), "last_failure")) {
/* always clear last failure when reconnect interval is set */
clear_reason = "reconnect interval is set";
}
}
} else if (strstr(ID(xml_op), "last_failure") &&
((strcmp(task, "start") == 0) || (strcmp(task, "monitor") == 0))) {
op_digest_cache_t *digest_data = NULL;
digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set);
if (digest_data->rc == RSC_DIGEST_UNKNOWN) {
crm_trace("rsc op %s/%s on node %s does not have a op digest to compare against", rsc->id,
key, node->details->id);
} else if (digest_data->rc != RSC_DIGEST_MATCH) {
clear_reason = "resource parameters have changed";
}
}
if (clear_reason != NULL) {
char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT,
node, FALSE, TRUE, data_set);
add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s",
rsc->id, node->details->uname, clear_reason, clear_op->uuid);
}
crm_element_value_int(xml_op, XML_LRM_ATTR_INTERVAL, &interval);
if(expired && interval == 0 && safe_str_eq(task, CRMD_ACTION_STATUS)) {
switch(rc) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_MASTER:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_MASTER:
/* Don't expire probes that return these values */
expired = FALSE;
break;
}
}
return expired;
}
int get_target_rc(xmlNode *xml_op)
{
int dummy = 0;
int target_rc = 0;
char *dummy_string = NULL;
const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
if (key == NULL) {
return -1;
}
decode_transition_key(key, &dummy_string, &dummy, &dummy, &target_rc);
free(dummy_string);
return target_rc;
}
static enum action_fail_response
get_action_on_fail(resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set)
{
int result = action_fail_recover;
action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
result = action->on_fail;
pe_free_action(action);
return result;
}
static void
update_resource_state(resource_t * rsc, node_t * node, xmlNode * xml_op, const char * task, int rc,
xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
gboolean clear_past_failure = FALSE;
CRM_ASSERT(rsc);
CRM_ASSERT(xml_op);
if (rc == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = TRUE;
} else if (rc == PCMK_OCF_NOT_INSTALLED) {
rsc->role = RSC_ROLE_STOPPED;
} else if (safe_str_eq(task, CRMD_ACTION_STATUS)) {
if (last_failure) {
const char *op_key = get_op_key(xml_op);
const char *last_failure_key = get_op_key(last_failure);
if (safe_str_eq(op_key, last_failure_key)) {
clear_past_failure = TRUE;
}
}
if (rsc->role < RSC_ROLE_STARTED) {
set_active(rsc);
}
} else if (safe_str_eq(task, CRMD_ACTION_START)) {
rsc->role = RSC_ROLE_STARTED;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_STOP)) {
rsc->role = RSC_ROLE_STOPPED;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_MASTER;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
/* Demote from Master does not clear an error */
rsc->role = RSC_ROLE_SLAVE;
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
rsc->role = RSC_ROLE_STARTED;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) {
unpack_rsc_migration(rsc, node, xml_op, data_set);
} else if (rsc->role < RSC_ROLE_STARTED) {
pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname);
set_active(rsc);
}
/* clear any previous failure actions */
if (clear_past_failure) {
switch (*on_fail) {
case action_fail_stop:
case action_fail_fence:
case action_fail_migrate:
case action_fail_standby:
pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop",
rsc->id, fail2text(*on_fail));
break;
case action_fail_block:
case action_fail_ignore:
case action_fail_recover:
case action_fail_restart_container:
*on_fail = action_fail_ignore;
rsc->next_role = RSC_ROLE_UNKNOWN;
break;
case action_fail_reset_remote:
if (rsc->remote_reconnect_interval == 0) {
/* when reconnect delay is not in use, the connection is allowed
* to start again after the remote node is fenced and completely
* stopped. Otherwise, with reconnect delay we wait for the failure
* to be cleared entirely before reconnected can be attempted. */
*on_fail = action_fail_ignore;
rsc->next_role = RSC_ROLE_UNKNOWN;
}
break;
}
}
}
gboolean
unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, xmlNode ** last_failure,
enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
int task_id = 0;
const char *key = NULL;
const char *task = NULL;
const char *task_key = NULL;
int rc = 0;
int status = PCMK_LRM_OP_PENDING-1;
int target_rc = get_target_rc(xml_op);
int interval = 0;
gboolean expired = FALSE;
resource_t *parent = rsc;
enum action_fail_response failure_strategy = action_fail_recover;
CRM_CHECK(rsc != NULL, return FALSE);
CRM_CHECK(node != NULL, return FALSE);
CRM_CHECK(xml_op != NULL, return FALSE);
task_key = get_op_key(xml_op);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc);
crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id);
crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status);
crm_element_value_int(xml_op, XML_LRM_ATTR_INTERVAL, &interval);
CRM_CHECK(task != NULL, return FALSE);
CRM_CHECK(status <= PCMK_LRM_OP_NOT_INSTALLED, return FALSE);
CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return FALSE);
if (safe_str_eq(task, CRMD_ACTION_NOTIFY)) {
/* safe to ignore these */
return TRUE;
}
if (is_not_set(rsc->flags, pe_rsc_unique)) {
parent = uber_parent(rsc);
}
pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)",
task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role));
if (node->details->unclean) {
pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean."
" Further action depends on the value of the stop's on-fail attribute",
node->details->uname, rsc->id);
}
if (status == PCMK_LRM_OP_ERROR) {
/* Older versions set this if rc != 0 but it's up to us to decide */
status = PCMK_LRM_OP_DONE;
}
if(status != PCMK_LRM_OP_NOT_INSTALLED) {
expired = check_operation_expiry(rsc, node, rc, xml_op, data_set);
}
/* Degraded results are informational only, re-map them to their error-free equivalents */
if (rc == PCMK_OCF_DEGRADED && safe_str_eq(task, CRMD_ACTION_STATUS)) {
rc = PCMK_OCF_OK;
/* Add them to the failed list to highlight them for the user */
if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) {
crm_trace("Remapping %d to %d", PCMK_OCF_DEGRADED, PCMK_OCF_OK);
record_failed_op(xml_op, node, data_set);
}
} else if (rc == PCMK_OCF_DEGRADED_MASTER && safe_str_eq(task, CRMD_ACTION_STATUS)) {
rc = PCMK_OCF_RUNNING_MASTER;
/* Add them to the failed list to highlight them for the user */
if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) {
crm_trace("Remapping %d to %d", PCMK_OCF_DEGRADED_MASTER, PCMK_OCF_RUNNING_MASTER);
record_failed_op(xml_op, node, data_set);
}
}
if (expired && target_rc != rc) {
const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC);
pe_rsc_debug(rsc, "Expired operation '%s' on %s returned '%s' (%d) instead of the expected value: '%s' (%d)",
key, node->details->uname,
services_ocf_exitcode_str(rc), rc,
services_ocf_exitcode_str(target_rc), target_rc);
if(interval == 0) {
crm_notice("Ignoring expired calculated failure %s (rc=%d, magic=%s) on %s",
task_key, rc, magic, node->details->uname);
goto done;
} else if(node->details->online && node->details->unclean == FALSE) {
crm_notice("Re-initiated expired calculated failure %s (rc=%d, magic=%s) on %s",
task_key, rc, magic, node->details->uname);
/* This is SO horrible, but we don't have access to CancelXmlOp() yet */
crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout");
goto done;
}
}
if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) {
status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set);
}
pe_rsc_trace(rsc, "Handling status: %d", status);
switch (status) {
case PCMK_LRM_OP_CANCELLED:
/* do nothing?? */
pe_err("Don't know what to do for cancelled ops yet");
break;
case PCMK_LRM_OP_PENDING:
if (safe_str_eq(task, CRMD_ACTION_START)) {
set_bit(rsc->flags, pe_rsc_start_pending);
set_active(rsc);
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_MASTER;
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATE) && node->details->unclean) {
/* If a pending migrate_to action is out on a unclean node,
* we have to force the stop action on the target. */
const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
node_t *target = pe_find_node(data_set->nodes, migrate_target);
if (target) {
stop_action(rsc, target, FALSE);
}
}
if (rsc->pending_task == NULL) {
if (safe_str_eq(task, CRMD_ACTION_STATUS) && interval == 0) {
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, uncomment this and the corresponding part of
* native.c:native_pending_task().
*/
/*rsc->pending_task = strdup("probe");*/
} else {
rsc->pending_task = strdup(task);
}
}
break;
case PCMK_LRM_OP_DONE:
pe_rsc_trace(rsc, "%s/%s completed on %s", rsc->id, task, node->details->uname);
update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set);
break;
case PCMK_LRM_OP_NOT_INSTALLED:
failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
if (failure_strategy == action_fail_ignore) {
crm_warn("Cannot ignore failed %s (status=%d, rc=%d) on %s: "
"Resource agent doesn't exist",
task_key, status, rc, node->details->uname);
/* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
*on_fail = action_fail_migrate;
}
resource_location(parent, node, -INFINITY, "hard-error", data_set);
unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
break;
case PCMK_LRM_OP_ERROR:
case PCMK_LRM_OP_ERROR_HARD:
case PCMK_LRM_OP_ERROR_FATAL:
case PCMK_LRM_OP_TIMEOUT:
case PCMK_LRM_OP_NOTSUPPORTED:
failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
if ((failure_strategy == action_fail_ignore)
|| (failure_strategy == action_fail_restart_container
&& safe_str_eq(task, CRMD_ACTION_STOP))) {
crm_warn("Pretending the failure of %s (rc=%d) on %s succeeded",
task_key, rc, node->details->uname);
update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
set_bit(rsc->flags, pe_rsc_failure_ignored);
record_failed_op(xml_op, node, data_set);
if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) {
*on_fail = failure_strategy;
}
} else {
unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
if(status == PCMK_LRM_OP_ERROR_HARD) {
do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE,
"Preventing %s from re-starting on %s: operation %s failed '%s' (%d)",
parent->id, node->details->uname,
task, services_ocf_exitcode_str(rc), rc);
resource_location(parent, node, -INFINITY, "hard-error", data_set);
} else if(status == PCMK_LRM_OP_ERROR_FATAL) {
crm_err("Preventing %s from re-starting anywhere: operation %s failed '%s' (%d)",
parent->id, task, services_ocf_exitcode_str(rc), rc);
resource_location(parent, NULL, -INFINITY, "fatal-error", data_set);
}
}
break;
}
done:
pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s", rsc->id, task, role2text(rsc->role), role2text(rsc->next_role));
return TRUE;
}
gboolean
add_node_attrs(xmlNode * xml_obj, node_t * node, gboolean overwrite, pe_working_set_t * data_set)
{
const char *cluster_name = NULL;
g_hash_table_insert(node->details->attrs,
strdup("#uname"), strdup(node->details->uname));
g_hash_table_insert(node->details->attrs, strdup("#" XML_ATTR_ID), strdup(node->details->id));
if (safe_str_eq(node->details->id, data_set->dc_uuid)) {
data_set->dc_node = node;
node->details->is_dc = TRUE;
g_hash_table_insert(node->details->attrs,
strdup("#" XML_ATTR_DC), strdup(XML_BOOLEAN_TRUE));
} else {
g_hash_table_insert(node->details->attrs,
strdup("#" XML_ATTR_DC), strdup(XML_BOOLEAN_FALSE));
}
cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
if (cluster_name) {
g_hash_table_insert(node->details->attrs, strdup("#cluster-name"), strdup(cluster_name));
}
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL,
node->details->attrs, NULL, overwrite, data_set->now);
if (g_hash_table_lookup(node->details->attrs, "#site-name") == NULL) {
const char *site_name = g_hash_table_lookup(node->details->attrs, "site-name");
if (site_name) {
/* Prefix '#' to the key */
g_hash_table_insert(node->details->attrs, strdup("#site-name"), strdup(site_name));
} else if (cluster_name) {
/* Default to cluster-name if unset */
g_hash_table_insert(node->details->attrs, strdup("#site-name"), strdup(cluster_name));
}
}
return TRUE;
}
static GListPtr
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GListPtr gIter = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
crm_xml_add(rsc_op, "resource", rsc);
crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", ID(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", ID(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GListPtr
find_operations(const char *rsc, const char *node, gboolean active_filter,
pe_working_set_t * data_set)
{
GListPtr output = NULL;
GListPtr intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
node_t *this_node = NULL;
xmlNode *node_state = NULL;
for (node_state = __xml_first_child(status); node_state != NULL;
node_state = __xml_next_element(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
const char *uname = crm_element_value(node_state, XML_ATTR_UNAME);
if (node != NULL && safe_str_neq(uname, node)) {
continue;
}
this_node = pe_find_node(data_set->nodes, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (is_remote_node(this_node)) {
determine_remote_online_status(data_set, this_node);
} else {
determine_online_status(node_state, this_node, data_set);
}
if (this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
for (lrm_rsc = __xml_first_child(tmp); lrm_rsc != NULL;
lrm_rsc = __xml_next_element(lrm_rsc)) {
if (crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) {
const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID);
if (rsc != NULL && safe_str_neq(rsc_id, rsc)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
}
}
return output;
}
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 7acd90c2d4..44fbb512d7 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,1967 +1,1967 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <glib.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
pe_working_set_t *pe_dataset = NULL;
extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container,
pe_working_set_t * data_set);
static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key,
gboolean include_disabled);
/*!
* \internal
* \brief Check whether we can fence a particular node
*
* \param[in] data_set Working set for cluster
* \param[in] node Name of node to check
*
* \return TRUE if node can be fenced, FALSE otherwise
*
* \note This function should only be called for cluster nodes and baremetal
* remote nodes; guest nodes are fenced by stopping their container
* resource, so fence execution requirements do not apply to them.
*/
bool pe_can_fence(pe_working_set_t * data_set, node_t *node)
{
if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
return FALSE; /* Turned off */
} else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) {
return FALSE; /* No devices */
} else if (is_set(data_set->flags, pe_flag_have_quorum)) {
return TRUE;
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
return TRUE;
} else if(node == NULL) {
return FALSE;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
return TRUE;
}
crm_trace("Cannot fence %s", node->details->uname);
return FALSE;
}
node_t *
node_copy(const node_t *this_node)
{
node_t *new_node = NULL;
CRM_CHECK(this_node != NULL, return NULL);
new_node = calloc(1, sizeof(node_t));
CRM_ASSERT(new_node != NULL);
crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed;
new_node->details = this_node->details;
return new_node;
}
/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
void
node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
{
GHashTable *result = hash;
node_t *other_node = NULL;
GListPtr gIter = list;
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
other_node = pe_find_node_id(list, node->details->id);
if (other_node == NULL) {
node->weight = -INFINITY;
} else if (merge_scores) {
node->weight = merge_weights(node->weight, other_node->weight);
}
}
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
other_node = pe_hash_table_lookup(result, node->details->id);
if (other_node == NULL) {
node_t *new_node = node_copy(node);
new_node->weight = -INFINITY;
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
}
}
GHashTable *
node_hash_from_list(GListPtr list)
{
GListPtr gIter = list;
GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
node_t *n = node_copy(node);
g_hash_table_insert(result, (gpointer) n->details->id, n);
}
return result;
}
GListPtr
node_list_dup(GListPtr list1, gboolean reset, gboolean filter)
{
GListPtr result = NULL;
GListPtr gIter = list1;
for (; gIter != NULL; gIter = gIter->next) {
node_t *new_node = NULL;
node_t *this_node = (node_t *) gIter->data;
if (filter && this_node->weight < 0) {
continue;
}
new_node = node_copy(this_node);
if (reset) {
new_node->weight = 0;
}
if (new_node != NULL) {
result = g_list_prepend(result, new_node);
}
}
return result;
}
gint
sort_node_uname(gconstpointer a, gconstpointer b)
{
const node_t *node_a = a;
const node_t *node_b = b;
return strcmp(node_a->details->uname, node_b->details->uname);
}
void
dump_node_scores_worker(int level, const char *file, const char *function, int line,
resource_t * rsc, const char *comment, GHashTable * nodes)
{
GHashTable *hash = nodes;
GHashTableIter iter;
node_t *node = NULL;
if (rsc) {
hash = rsc->allowed_nodes;
}
if (rsc && is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't show the allocation scores for orphans */
return;
}
if (level == 0) {
char score[128];
int len = sizeof(score);
/* For now we want this in sorted order to keep the regression tests happy */
GListPtr gIter = NULL;
GListPtr list = g_hash_table_get_values(hash);
list = g_list_sort(list, sort_node_uname);
gIter = list;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
/* This function is called a whole lot, use stack allocated score */
score2char_stack(node->weight, score, len);
if (rsc) {
printf("%s: %s allocation score on %s: %s\n",
comment, rsc->id, node->details->uname, score);
} else {
printf("%s: %s = %s\n", comment, node->details->uname, score);
}
}
g_list_free(list);
} else if (hash) {
char score[128];
int len = sizeof(score);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
/* This function is called a whole lot, use stack allocated score */
score2char_stack(node->weight, score, len);
if (rsc) {
do_crm_log_alias(LOG_TRACE, file, function, line,
"%s: %s allocation score on %s: %s", comment, rsc->id,
node->details->uname, score);
} else {
do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment,
node->details->uname, score);
}
}
}
if (rsc && rsc->children) {
GListPtr gIter = NULL;
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
dump_node_scores_worker(level, file, function, line, child, comment, nodes);
}
}
}
static void
append_dump_text(gpointer key, gpointer value, gpointer user_data)
{
char **dump_text = user_data;
int len = 0;
char *new_text = NULL;
len = strlen(*dump_text) + strlen(" ") + strlen(key) + strlen("=") + strlen(value) + 1;
new_text = calloc(1, len);
sprintf(new_text, "%s %s=%s", *dump_text, (char *)key, (char *)value);
free(*dump_text);
*dump_text = new_text;
}
void
dump_node_capacity(int level, const char *comment, node_t * node)
{
int len = 0;
char *dump_text = NULL;
len = strlen(comment) + strlen(": ") + strlen(node->details->uname) + strlen(" capacity:") + 1;
dump_text = calloc(1, len);
sprintf(dump_text, "%s: %s capacity:", comment, node->details->uname);
g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
if (level == 0) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
void
dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node)
{
int len = 0;
char *dump_text = NULL;
len = strlen(comment) + strlen(": ") + strlen(rsc->id) + strlen(" utilization on ")
+ strlen(node->details->uname) + strlen(":") + 1;
dump_text = calloc(1, len);
sprintf(dump_text, "%s: %s utilization on %s:", comment, rsc->id, node->details->uname);
g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
if (level == 0) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
gint
sort_rsc_index(gconstpointer a, gconstpointer b)
{
const resource_t *resource1 = (const resource_t *)a;
const resource_t *resource2 = (const resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->sort_index > resource2->sort_index) {
return -1;
}
if (resource1->sort_index < resource2->sort_index) {
return 1;
}
return 0;
}
gint
sort_rsc_priority(gconstpointer a, gconstpointer b)
{
const resource_t *resource1 = (const resource_t *)a;
const resource_t *resource2 = (const resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
action_t *
custom_action(resource_t * rsc, char *key, const char *task,
node_t * on_node, gboolean optional, gboolean save_action,
pe_working_set_t * data_set)
{
action_t *action = NULL;
GListPtr possible_matches = NULL;
CRM_CHECK(key != NULL, return NULL);
CRM_CHECK(task != NULL, free(key); return NULL);
if (save_action && rsc != NULL) {
possible_matches = find_actions(rsc->actions, key, on_node);
} else if(save_action) {
#if 0
action = g_hash_table_lookup(data_set->singletons, key);
#else
/* More expensive but takes 'node' into account */
possible_matches = find_actions(data_set->actions, key, on_node);
#endif
}
if(data_set->singletons == NULL) {
data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
}
if (possible_matches != NULL) {
if (g_list_length(possible_matches) > 1) {
pe_warn("Action %s for %s on %s exists %d times",
task, rsc ? rsc->id : "<NULL>",
on_node ? on_node->details->uname : "<NULL>", g_list_length(possible_matches));
}
action = g_list_nth_data(possible_matches, 0);
pe_rsc_trace(rsc, "Found existing action (%d) %s for %s on %s",
action->id, task, rsc ? rsc->id : "<NULL>",
on_node ? on_node->details->uname : "<NULL>");
g_list_free(possible_matches);
}
if (action == NULL) {
if (save_action) {
pe_rsc_trace(rsc, "Creating%s action %d: %s for %s on %s %d",
optional ? "" : " mandatory", data_set->action_id, key,
rsc ? rsc->id : "<NULL>", on_node ? on_node->details->uname : "<NULL>", optional);
}
action = calloc(1, sizeof(action_t));
if (save_action) {
action->id = data_set->action_id++;
} else {
action->id = 0;
}
action->rsc = rsc;
CRM_ASSERT(task != NULL);
action->task = strdup(task);
if (on_node) {
action->node = node_copy(on_node);
}
action->uuid = strdup(key);
pe_set_action_bit(action, pe_action_runnable);
if (optional) {
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
} else {
pe_clear_action_bit(action, pe_action_optional);
pe_rsc_trace(rsc, "Unset optional on %s", action->uuid);
}
/*
Implied by calloc()...
action->actions_before = NULL;
action->actions_after = NULL;
action->pseudo = FALSE;
action->dumped = FALSE;
action->processed = FALSE;
action->seen_count = 0;
*/
action->extra = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free);
action->meta = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free);
if (save_action) {
data_set->actions = g_list_prepend(data_set->actions, action);
if(rsc == NULL) {
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
}
if (rsc != NULL) {
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
unpack_operation(action, action->op_entry, rsc->container, data_set);
if (save_action) {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
if (save_action) {
pe_rsc_trace(rsc, "Action %d created", action->id);
}
}
if (optional == FALSE) {
pe_rsc_trace(rsc, "Unset optional on %s", action->uuid);
pe_clear_action_bit(action, pe_action_optional);
}
if (rsc != NULL) {
enum action_tasks a_task = text2task(action->task);
int warn_level = LOG_TRACE;
if (save_action) {
warn_level = LOG_WARNING;
}
if (is_set(action->flags, pe_action_have_node_attrs) == FALSE
&& action->node != NULL && action->op_entry != NULL) {
pe_set_action_bit(action, pe_action_have_node_attrs);
unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS,
action->node->details->attrs,
action->extra, NULL, FALSE, data_set->now);
}
if (is_set(action->flags, pe_action_pseudo)) {
/* leave untouched */
} else if (action->node == NULL) {
pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid);
pe_clear_action_bit(action, pe_action_runnable);
} else if (is_not_set(rsc->flags, pe_rsc_managed)
&& g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL) == NULL) {
crm_debug("Action %s (unmanaged)", action->uuid);
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
/* action->runnable = FALSE; */
} else if (action->node->details->online == FALSE
&& (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
action->uuid, action->node->details->uname);
if (is_set(action->rsc->flags, pe_rsc_managed)
&& save_action && a_task == stop_rsc
&& action->node->details->unclean == FALSE) {
- pe_fence_node(data_set, action->node, "because of unrunnable resource actions");
+ pe_fence_node(data_set, action->node, "resource actions are unrunnable");
}
} else if (action->node->details->pending) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
action->uuid, action->node->details->uname);
} else if (action->needs == rsc_req_nothing) {
pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid);
pe_set_action_bit(action, pe_action_runnable);
#if 0
/*
* No point checking this
* - if we don't have quorum we can't stonith anyway
*/
} else if (action->needs == rsc_req_stonith) {
crm_trace("Action %s requires only stonith", action->uuid);
action->runnable = TRUE;
#endif
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_stop) {
pe_clear_action_bit(action, pe_action_runnable);
crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid);
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_freeze) {
pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role));
if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
pe_clear_action_bit(action, pe_action_runnable);
pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)",
action->node->details->uname, action->uuid);
}
} else {
pe_rsc_trace(rsc, "Action %s is runnable", action->uuid);
pe_set_action_bit(action, pe_action_runnable);
}
if (save_action) {
switch (a_task) {
case stop_rsc:
set_bit(rsc->flags, pe_rsc_stopping);
break;
case start_rsc:
clear_bit(rsc->flags, pe_rsc_starting);
if (is_set(action->flags, pe_action_runnable)) {
set_bit(rsc->flags, pe_rsc_starting);
}
break;
default:
break;
}
}
}
free(key);
return action;
}
static const char *
unpack_operation_on_fail(action_t * action)
{
const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) {
crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id);
return NULL;
} else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) {
/* demote on_fail defaults to master monitor value if present */
xmlNode *operation = NULL;
const char *name = NULL;
const char *role = NULL;
const char *on_fail = NULL;
const char *interval = NULL;
const char *enabled = NULL;
CRM_CHECK(action->rsc != NULL, return NULL);
for (operation = __xml_first_child(action->rsc->ops_xml);
operation && !value; operation = __xml_next_element(operation)) {
if (!crm_str_eq((const char *)operation->name, "op", TRUE)) {
continue;
}
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
enabled = crm_element_value(operation, "enabled");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!on_fail) {
continue;
} else if (enabled && !crm_is_true(enabled)) {
continue;
} else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) {
continue;
} else if (crm_get_interval(interval) <= 0) {
continue;
}
value = on_fail;
}
}
return value;
}
static xmlNode *
find_min_interval_mon(resource_t * rsc, gboolean include_disabled)
{
int number = 0;
int min_interval = -1;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
if (safe_str_neq(name, RSC_STATUS)) {
continue;
}
number = crm_get_interval(interval);
if (number < 0) {
continue;
}
if (min_interval < 0 || number < min_interval) {
min_interval = number;
op = operation;
}
}
}
return op;
}
void
unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container,
pe_working_set_t * data_set)
{
int value_i = 0;
unsigned long long interval = 0;
unsigned long long start_delay = 0;
char *value_ms = NULL;
const char *value = NULL;
const char *field = NULL;
CRM_CHECK(action->rsc != NULL, return);
unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL,
action->meta, NULL, FALSE, data_set->now);
if (xml_obj) {
xmlAttrPtr xIter = NULL;
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_obj, prop_name);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS,
NULL, action->meta, NULL, FALSE, data_set->now);
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS,
NULL, action->meta, NULL, FALSE, data_set->now);
g_hash_table_remove(action->meta, "id");
field = XML_LRM_ATTR_INTERVAL;
value = g_hash_table_lookup(action->meta, field);
if (value != NULL) {
interval = crm_get_interval(value);
if (interval > 0) {
value_ms = crm_itoa(interval);
g_hash_table_replace(action->meta, strdup(field), value_ms);
} else {
g_hash_table_remove(action->meta, field);
}
}
/* Begin compatibility code ("requires" set on start action not resource) */
value = g_hash_table_lookup(action->meta, "requires");
if (safe_str_neq(action->task, RSC_START)
&& safe_str_neq(action->task, RSC_PROMOTE)) {
action->needs = rsc_req_nothing;
value = "nothing (not start/promote)";
} else if (safe_str_eq(value, "nothing")) {
action->needs = rsc_req_nothing;
} else if (safe_str_eq(value, "quorum")) {
action->needs = rsc_req_quorum;
} else if (safe_str_eq(value, "unfencing")) {
action->needs = rsc_req_stonith;
set_bit(action->rsc->flags, pe_rsc_needs_unfencing);
if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_notice("%s requires unfencing but fencing is disabled", action->rsc->id);
}
} else if (is_set(data_set->flags, pe_flag_stonith_enabled)
&& safe_str_eq(value, "fencing")) {
action->needs = rsc_req_stonith;
if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_notice("%s requires fencing but fencing is disabled", action->rsc->id);
}
/* End compatibility code */
} else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = rsc_req_stonith;
value = "fencing (resource)";
} else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = rsc_req_quorum;
value = "quorum (resource)";
} else {
action->needs = rsc_req_nothing;
value = "nothing (resource)";
}
pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value);
value = unpack_operation_on_fail(action);
if (value == NULL) {
} else if (safe_str_eq(value, "block")) {
action->on_fail = action_fail_block;
g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
} else if (safe_str_eq(value, "fence")) {
action->on_fail = action_fail_fence;
value = "node fencing";
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense");
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
}
} else if (safe_str_eq(value, "standby")) {
action->on_fail = action_fail_standby;
value = "node standby";
} else if (safe_str_eq(value, "ignore")
|| safe_str_eq(value, "nothing")) {
action->on_fail = action_fail_ignore;
value = "ignore";
} else if (safe_str_eq(value, "migrate")) {
action->on_fail = action_fail_migrate;
value = "force migration";
} else if (safe_str_eq(value, "stop")) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
} else if (safe_str_eq(value, "restart")) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate)";
} else if (safe_str_eq(value, "restart-container")) {
if (container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* for baremetal remote nodes, ensure that any failure that results in
* dropping an active connection to a remote node results in fencing of
* the remote node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) &&
(is_rsc_baremetal_remote_node(action->rsc, data_set) &&
!(safe_str_eq(action->task, CRMD_ACTION_STATUS) && interval == 0) &&
(safe_str_neq(action->task, CRMD_ACTION_START)))) {
if (!is_set(action->rsc->flags, pe_rsc_managed)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop unmanaged baremetal remote node (enforcing default)";
} else {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fence baremetal remote node (default)";
} else {
value = "recover baremetal remote node connection (default)";
}
if (action->rsc->remote_reconnect_interval) {
action->fail_role = RSC_ROLE_STOPPED;
}
action->on_fail = action_fail_reset_remote;
}
} else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
action->on_fail = action_fail_fence;
value = "resource fence (default)";
} else {
action->on_fail = action_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
}
if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == RSC_ROLE_UNKNOWN) {
if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) {
action->fail_role = RSC_ROLE_SLAVE;
} else {
action->fail_role = RSC_ROLE_STARTED;
}
}
pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task,
role2text(action->fail_role));
field = XML_OP_ATTR_START_DELAY;
value = g_hash_table_lookup(action->meta, field);
if (value != NULL) {
value_i = crm_get_msec(value);
if (value_i < 0) {
value_i = 0;
}
start_delay = value_i;
value_ms = crm_itoa(value_i);
g_hash_table_replace(action->meta, strdup(field), value_ms);
} else if (interval > 0 && g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN)) {
crm_time_t *origin = NULL;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
origin = crm_time_new(value);
if (origin == NULL) {
crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s",
ID(xml_obj), value);
} else {
crm_time_t *delay = NULL;
int rc = crm_time_compare(origin, data_set->now);
long long delay_s = 0;
int interval_s = (interval / 1000);
crm_trace("Origin: %s, interval: %d", value, interval_s);
/* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */
while(rc > 0) {
crm_time_add_seconds(origin, -interval_s);
rc = crm_time_compare(origin, data_set->now);
}
/* Now find the first "multiple" that occurs after 'now' */
while (rc < 0) {
crm_time_add_seconds(origin, interval_s);
rc = crm_time_compare(origin, data_set->now);
}
delay = crm_time_calculate_duration(origin, data_set->now);
crm_time_log(LOG_TRACE, "origin", origin,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
crm_time_log(LOG_TRACE, "now", data_set->now,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration);
delay_s = crm_time_get_seconds(delay);
CRM_CHECK(delay_s >= 0, delay_s = 0);
start_delay = delay_s * 1000;
crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj));
g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
crm_itoa(start_delay));
crm_time_free(origin);
crm_time_free(delay);
}
}
field = XML_ATTR_TIMEOUT;
value = g_hash_table_lookup(action->meta, field);
if (value == NULL && xml_obj == NULL && safe_str_eq(action->task, RSC_STATUS) && interval == 0) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
pe_rsc_trace(action->rsc,
"\t%s uses the timeout value '%s' from the minimum interval monitor",
action->uuid, value);
}
}
if (value == NULL) {
value = pe_pref(data_set->config_hash, "default-action-timeout");
}
value_i = crm_get_msec(value);
if (value_i < 0) {
value_i = 0;
}
value_ms = crm_itoa(value_i);
g_hash_table_replace(action->meta, strdup(field), value_ms);
}
static xmlNode *
find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled)
{
unsigned long long number = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
number = crm_get_interval(interval);
match_key = generate_op_key(rsc->id, name, number);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = generate_op_key(rsc->clone_name, name, number);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
local_key = generate_op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = generate_op_key(rsc->id, "notify", 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(resource_t * rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
void
print_node(const char *pre_text, node_t * node, gboolean details)
{
if (node == NULL) {
crm_trace("%s%s: <NULL>", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
CRM_ASSERT(node->details);
crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
pre_text == NULL ? "" : pre_text,
pre_text == NULL ? "" : ": ",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
print_resource(LOG_DEBUG_4, "\t\t", rsc, FALSE);
}
}
}
/*
* Used by the HashTable for-loop
*/
void
print_str_str(gpointer key, gpointer value, gpointer user_data)
{
crm_trace("%s%s %s ==> %s",
user_data == NULL ? "" : (char *)user_data,
user_data == NULL ? "" : ": ", (char *)key, (char *)value);
}
void
print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details)
{
long options = pe_print_log | pe_print_pending;
if (rsc == NULL) {
do_crm_log(log_level - 1, "%s%s: <NULL>",
pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
if (details) {
options |= pe_print_details;
}
rsc->fns->print(rsc, pre_text, options, &log_level);
}
void
pe_free_action(action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
free(action->cancel_task);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
GListPtr
find_recurring_actions(GListPtr input, node_t * not_on_node)
{
const char *value = NULL;
GListPtr result = NULL;
GListPtr gIter = input;
CRM_CHECK(input != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
if (value == NULL) {
/* skip */
} else if (safe_str_eq(value, "0")) {
/* skip */
} else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) {
/* skip */
} else if (not_on_node == NULL) {
crm_trace("(null) Found: %s", action->uuid);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
} else if (action->node->details != not_on_node->details) {
crm_trace("Found: %s", action->uuid);
result = g_list_prepend(result, action);
}
}
return result;
}
enum action_tasks
get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic)
{
enum action_tasks task = text2task(name);
if (rsc == NULL) {
return task;
} else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
switch (task) {
case stopped_rsc:
case started_rsc:
case action_demoted:
case action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
return task - 1;
break;
default:
break;
}
}
return task;
}
action_t *
find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node)
{
GListPtr gIter = NULL;
CRM_CHECK(uuid || task, return NULL);
for (gIter = input; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (uuid != NULL && safe_str_neq(uuid, action->uuid)) {
continue;
} else if (task != NULL && safe_str_neq(task, action->task)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GListPtr
find_actions(GListPtr input, const char *key, const node_t *on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (safe_str_neq(key, action->uuid)) {
crm_trace("%s does not match action %s", key, action->uuid);
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, on_node->details->uname);
action->node = node_copy(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, on_node->details->uname);
result = g_list_prepend(result, action);
} else {
crm_trace("Action %s on node %s does not match requested node %s",
key, action->node->details->uname,
on_node->details->uname);
}
}
return result;
}
GListPtr
find_actions_exact(GListPtr input, const char *key, node_t * on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
crm_trace("Matching %s against %s", key, action->uuid);
if (safe_str_neq(key, action->uuid)) {
crm_trace("Key mismatch: %s vs. %s", key, action->uuid);
continue;
} else if (on_node == NULL || action->node == NULL) {
crm_trace("on_node=%p, action->node=%p", on_node, action->node);
continue;
} else if (safe_str_eq(on_node->details->id, action->node->details->id)) {
result = g_list_prepend(result, action);
}
crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id);
}
return result;
}
static void
resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag)
{
node_t *match = NULL;
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = node_copy(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = merge_weights(match->weight, score);
}
void
resource_location(resource_t * rsc, node_t * node, int score, const char *tag,
pe_working_set_t * data_set)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (data_set != NULL) {
GListPtr gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node_iter = (node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
resource_node_score(rsc, node_iter, score, tag);
}
}
if (node == NULL && score == -INFINITY) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
const char *a_xml_id = crm_element_value_const(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value_const(xml_b, XML_ATTR_ID);
if (safe_str_eq(a_xml_id, b_xml_id)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unliklely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why its happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_const_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_const_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (b_call_id >= 0 && a_call_id == b_call_id) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
int last_a = -1;
int last_b = -1;
crm_element_value_const_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_const_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %d vs %d", last_a, last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
int dummy = -1;
const char *a_magic = crm_element_value_const(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value_const(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) {
sort_return(0, "bad magic a");
}
if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (ie. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means its a shutdown operation and _always_ comes last
*/
if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
time_t
get_effective_time(pe_working_set_t * data_set)
{
if(data_set) {
if (data_set->now == NULL) {
crm_trace("Recording a new 'now'");
data_set->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(data_set->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
gboolean
get_target_role(resource_t * rsc, enum rsc_role_e * role)
{
enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (value == NULL || safe_str_eq("started", value)
|| safe_str_eq("default", value)) {
return FALSE;
}
local_role = text2role(value);
if (local_role == RSC_ROLE_UNKNOWN) {
crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value);
return FALSE;
} else if (local_role > RSC_ROLE_STARTED) {
if (uber_parent(rsc)->variant == pe_master) {
if (local_role > RSC_ROLE_SLAVE) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
crm_config_err("%s is not part of a master/slave resource, a %s of '%s' makes no sense",
rsc->id, XML_RSC_ATTR_TARGET_ROLE, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order)
{
GListPtr gIter = NULL;
action_wrapper_t *wrapper = NULL;
GListPtr list = NULL;
if (order == pe_order_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... its happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
action_wrapper_t *after = (action_wrapper_t *) gIter->data;
if (after->action == rh_action && (after->type & order)) {
return FALSE;
}
}
wrapper = calloc(1, sizeof(action_wrapper_t));
wrapper->action = rh_action;
wrapper->type = order;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = NULL;
/* order |= pe_order_implies_then; */
/* order ^= pe_order_implies_then; */
wrapper = calloc(1, sizeof(action_wrapper_t));
wrapper->action = lh_action;
wrapper->type = order;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
action_t *op = NULL;
if(data_set->singletons) {
op = g_hash_table_lookup(data_set->singletons, name);
}
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
set_bit(op->flags, pe_action_pseudo);
set_bit(op->flags, pe_action_runnable);
}
return op;
}
void
destroy_ticket(gpointer data)
{
ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
ticket_t *
ticket_new(const char *ticket_id, pe_working_set_t * data_set)
{
ticket_t *ticket = NULL;
if (ticket_id == NULL || strlen(ticket_id) == 0) {
return NULL;
}
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket);
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str, g_hash_destroy_str);
g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
static void
filter_parameters(xmlNode * param_set, const char *param_string, bool need_present)
{
int len = 0;
char *name = NULL;
char *match = NULL;
if (param_set == NULL) {
return;
}
if (param_set) {
xmlAttrPtr xIter = param_set->properties;
while (xIter) {
const char *prop_name = (const char *)xIter->name;
xIter = xIter->next;
name = NULL;
len = strlen(prop_name) + 3;
name = malloc(len);
if(name) {
sprintf(name, " %s ", prop_name);
name[len - 1] = 0;
match = strstr(param_string, name);
}
if (need_present && match == NULL) {
crm_trace("%s not found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
} else if (need_present == FALSE && match) {
crm_trace("%s found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
}
free(name);
}
}
}
bool fix_remote_addr(resource_t * rsc)
{
const char *name;
const char *value;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
const char *value_list[] = {
"remote",
"ocf",
"pacemaker"
};
if(rsc == NULL) {
return FALSE;
}
name = "addr";
value = g_hash_table_lookup(rsc->parameters, name);
if (safe_str_eq(value, "#uname") == FALSE) {
return FALSE;
}
for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) {
name = attr_list[lpc];
value = crm_element_value(rsc->xml, attr_list[lpc]);
if (safe_str_eq(value, value_list[lpc]) == FALSE) {
return FALSE;
}
}
return TRUE;
}
op_digest_cache_t *
rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
pe_working_set_t * data_set)
{
op_digest_cache_t *data = NULL;
GHashTable *local_rsc_params = NULL;
#ifdef ENABLE_VERSIONED_ATTRS
xmlNode *local_versioned_params = NULL;
#endif
action_t *action = NULL;
char *key = NULL;
int interval = 0;
const char *op_id = ID(xml_op);
const char *interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_all;
const char *digest_restart;
const char *secure_list;
const char *restart_list;
const char *op_version;
CRM_ASSERT(node != NULL);
data = g_hash_table_lookup(node->details->digest_cache, op_id);
if (data) {
return data;
}
data = calloc(1, sizeof(op_digest_cache_t));
CRM_ASSERT(data != NULL);
digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
/* key is freed in custom_action */
interval = crm_parse_int(interval_s, "0");
key = generate_op_key(rsc->id, task, interval);
action = custom_action(rsc, key, task, node, TRUE, FALSE, data_set);
key = NULL;
local_rsc_params = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str, g_hash_destroy_str);
get_rsc_attributes(local_rsc_params, rsc, node, data_set);
#ifdef ENABLE_VERSIONED_ATTRS
local_versioned_params = create_xml_node(NULL, XML_TAG_VER_ATTRS);
pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set);
#endif
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
if(fix_remote_addr(rsc) && node) {
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
crm_xml_add(data->params_all, "addr", node->details->uname);
crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname);
}
g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
g_hash_table_foreach(action->extra, hash2field, data->params_all);
g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
filter_action_parameters(data->params_all, op_version);
#ifdef ENABLE_VERSIONED_ATTRS
crm_summarize_versioned_params(data->params_all, rsc->versioned_parameters);
crm_summarize_versioned_params(data->params_all, local_versioned_params);
#endif
data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
if (secure_list && is_set(data_set->flags, pe_flag_sanitized)) {
data->params_secure = copy_xml(data->params_all);
if (secure_list) {
filter_parameters(data->params_secure, secure_list, FALSE);
}
data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version);
}
if (digest_restart) {
data->params_restart = copy_xml(data->params_all);
if (restart_list) {
filter_parameters(data->params_restart, restart_list, TRUE);
}
data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version);
}
data->rc = RSC_DIGEST_MATCH;
if (digest_restart && strcmp(data->digest_restart_calc, digest_restart) != 0) {
data->rc = RSC_DIGEST_RESTART;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = RSC_DIGEST_UNKNOWN;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
data->rc = RSC_DIGEST_ALL;
}
g_hash_table_insert(node->details->digest_cache, strdup(op_id), data);
g_hash_table_destroy(local_rsc_params);
#ifdef ENABLE_VERSIONED_ATTRS
free_xml(local_versioned_params);
#endif
pe_free_action(action);
return data;
}
const char *rsc_printable_id(resource_t *rsc)
{
if (is_not_set(rsc->flags, pe_rsc_unique)) {
return ID(rsc->xml);
}
return rsc->id;
}
void
clear_bit_recursive(resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
clear_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
clear_bit_recursive(child_rsc, flag);
}
}
void
set_bit_recursive(resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
set_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
set_bit_recursive(child_rsc, flag);
}
}
action_t *
pe_fence_op(node_t * node, const char *op, bool optional, pe_working_set_t * data_set)
{
char *key = NULL;
action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
if(data_set->singletons) {
stonith_op = g_hash_table_lookup(data_set->singletons, key);
}
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, key, CRM_OP_FENCE, node, optional, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
} else {
free(key);
}
if(optional == FALSE) {
crm_trace("%s is no longer optional", stonith_op->uuid);
pe_clear_action_bit(stonith_op, pe_action_optional);
}
return stonith_op;
}
void
trigger_unfencing(
resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set)
{
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
/* No resources require it */
return;
} else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
action_t *unfence = pe_fence_op(node, "on", FALSE, data_set);
crm_notice("Unfencing %s: %s", node->details->uname, reason);
if(dependency) {
order_actions(unfence, dependency, pe_order_optional);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, data_set);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
tag_t *tag = NULL;
GListPtr gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(tag_t));
if (tag == NULL) {
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (crm_str_eq(existing_ref, obj_ref, TRUE)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
diff --git a/pengine/allocate.c b/pengine/allocate.c
index 795ed5659f..0020af6599 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -1,2409 +1,2448 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <crm/pengine/status.h>
#include <pengine.h>
#include <allocate.h>
#include <utils.h>
CRM_TRACE_INIT_DATA(pe_allocate);
void set_alloc_actions(pe_working_set_t * data_set);
void migrate_reload_madness(pe_working_set_t * data_set);
extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
static void apply_remote_node_ordering(pe_working_set_t *data_set);
resource_alloc_functions_t resource_class_alloc_functions[] = {
{
native_merge_weights,
native_color,
native_create_actions,
native_create_probe,
native_internal_constraints,
native_rsc_colocation_lh,
native_rsc_colocation_rh,
native_rsc_location,
native_action_flags,
native_update_actions,
native_expand,
native_append_meta,
},
{
group_merge_weights,
group_color,
group_create_actions,
native_create_probe,
group_internal_constraints,
group_rsc_colocation_lh,
group_rsc_colocation_rh,
group_rsc_location,
group_action_flags,
group_update_actions,
group_expand,
group_append_meta,
},
{
clone_merge_weights,
clone_color,
clone_create_actions,
clone_create_probe,
clone_internal_constraints,
clone_rsc_colocation_lh,
clone_rsc_colocation_rh,
clone_rsc_location,
clone_action_flags,
clone_update_actions,
clone_expand,
clone_append_meta,
},
{
master_merge_weights,
master_color,
master_create_actions,
clone_create_probe,
master_internal_constraints,
clone_rsc_colocation_lh,
master_rsc_colocation_rh,
clone_rsc_location,
clone_action_flags,
clone_update_actions,
clone_expand,
master_append_meta,
},
{
container_merge_weights,
container_color,
container_create_actions,
container_create_probe,
container_internal_constraints,
container_rsc_colocation_lh,
container_rsc_colocation_rh,
container_rsc_location,
container_action_flags,
container_update_actions,
container_expand,
container_append_meta,
}
};
gboolean
update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line)
{
static unsigned long calls = 0;
gboolean changed = FALSE;
gboolean clear = is_set(flags, pe_action_clear);
enum pe_action_flags last = action->flags;
if (clear) {
action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags);
} else {
action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags);
}
if (last != action->flags) {
calls++;
changed = TRUE;
/* Useful for tracking down _who_ changed a specific flag */
/* CRM_ASSERT(calls != 534); */
clear_bit(flags, pe_action_clear);
crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
action->uuid, action->node ? action->node->details->uname : "[none]",
clear ? "un-" : "", flags, last, action->flags, calls, source);
}
return changed;
}
static gboolean
check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry,
gboolean active_here, pe_working_set_t * data_set)
{
int attr_lpc = 0;
gboolean force_restart = FALSE;
gboolean delete_resource = FALSE;
gboolean changed = FALSE;
const char *value = NULL;
const char *old_value = NULL;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
if (value == old_value /* ie. NULL */
|| crm_str_eq(value, old_value, TRUE)) {
continue;
}
changed = TRUE;
trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
if (active_here) {
force_restart = TRUE;
crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
rsc->id, node->details->uname, attr_list[attr_lpc],
crm_str(old_value), crm_str(value));
}
}
if (force_restart) {
/* make sure the restart happens */
stop_action(rsc, node, FALSE);
set_bit(rsc->flags, pe_rsc_start_pending);
delete_resource = TRUE;
} else if (changed) {
delete_resource = TRUE;
}
return delete_resource;
}
static void
CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node,
const char *reason, pe_working_set_t * data_set)
{
int interval = 0;
action_t *cancel = NULL;
char *key = NULL;
const char *task = NULL;
const char *call_id = NULL;
const char *interval_s = NULL;
CRM_CHECK(xml_op != NULL, return);
CRM_CHECK(active_node != NULL, return);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
interval = crm_parse_int(interval_s, "0");
/* we need to reconstruct the key because of the way we used to construct resource IDs */
key = generate_op_key(rsc->id, task, interval);
crm_info("Action %s on %s will be stopped: %s",
key, active_node->details->uname, reason ? reason : "unknown");
/* TODO: This looks highly dangerous if we ever try to schedule 'key' too */
cancel = custom_action(rsc, strdup(key), RSC_CANCEL, active_node, FALSE, TRUE, data_set);
free(cancel->task);
free(cancel->cancel_task);
cancel->task = strdup(RSC_CANCEL);
cancel->cancel_task = strdup(task);
add_hash_param(cancel->meta, XML_LRM_ATTR_TASK, task);
add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
add_hash_param(cancel->meta, XML_LRM_ATTR_INTERVAL, interval_s);
custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
free(key);
key = NULL;
}
static gboolean
check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op,
pe_working_set_t * data_set)
{
char *key = NULL;
int interval = 0;
const char *interval_s = NULL;
const op_digest_cache_t *digest_data = NULL;
gboolean did_change = FALSE;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *op_version;
const char *digest_secure = NULL;
CRM_CHECK(active_node != NULL, return FALSE);
if (safe_str_eq(task, RSC_STOP)) {
return FALSE;
}
interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
interval = crm_parse_int(interval_s, "0");
if (interval > 0) {
xmlNode *op_match = NULL;
/* we need to reconstruct the key because of the way we used to construct resource IDs */
key = generate_op_key(rsc->id, task, interval);
pe_rsc_trace(rsc, "Checking parameters for %s", key);
op_match = find_rsc_op_entry(rsc, key);
if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) {
CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
free(key);
return TRUE;
} else if (op_match == NULL) {
pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
free(key);
return TRUE;
}
free(key);
key = NULL;
}
crm_trace("Testing %s_%s_%d on %s",
rsc->id, task, interval, active_node->details->uname);
if (interval == 0 && safe_str_eq(task, RSC_STATUS)) {
/* Reload based on the start action not a probe */
task = RSC_START;
} else if (interval == 0 && safe_str_eq(task, RSC_MIGRATED)) {
/* Reload based on the start action not a migrate */
task = RSC_START;
} else if (interval == 0 && safe_str_eq(task, RSC_PROMOTE)) {
/* Reload based on the start action not a promote */
task = RSC_START;
}
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
if(is_set(data_set->flags, pe_flag_sanitized)) {
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
}
if(digest_data->rc != RSC_DIGEST_MATCH
&& digest_secure
&& digest_data->digest_secure_calc
&& strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
fprintf(stdout, "Only 'private' parameters to %s_%s_%d on %s changed: %s\n",
rsc->id, task, interval, active_node->details->uname,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
} else if (digest_data->rc == RSC_DIGEST_RESTART) {
/* Changes that force a restart */
const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
did_change = TRUE;
key = generate_op_key(rsc->id, task, interval);
crm_log_xml_info(digest_data->params_restart, "params:restart");
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s",
key, active_node->details->uname,
crm_str(digest_restart), digest_data->digest_restart_calc,
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set);
trigger_unfencing(rsc, NULL, "Device parameters changed", NULL, data_set);
} else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
/* Changes that can potentially be handled by a reload */
const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
const char *digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
did_change = TRUE;
trigger_unfencing(rsc, NULL, "Device parameters changed (reload)", NULL, data_set);
crm_log_xml_info(digest_data->params_all, "params:reload");
key = generate_op_key(rsc->id, task, interval);
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (reload:%s) %s",
key, active_node->details->uname,
crm_str(digest_all), digest_data->digest_all_calc, op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
if (interval > 0) {
action_t *op = NULL;
#if 0
/* Always reload/restart the entire resource */
ReloadRsc(rsc, active_node, data_set);
#else
/* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
set_bit(op->flags, pe_action_reschedule);
#endif
} else if (digest_restart && rsc->isolation_wrapper == NULL && (uber_parent(rsc))->isolation_wrapper == NULL) {
pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
/* Reload this resource */
ReloadRsc(rsc, active_node, data_set);
free(key);
} else {
pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
/* Re-send the start/demote/promote op
* Recurring ops will be detected independently
*/
custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set);
}
}
return did_change;
}
static void
check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int offset = -1;
int interval = 0;
int stop_index = 0;
int start_index = 0;
const char *task = NULL;
const char *interval_s = NULL;
xmlNode *rsc_op = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
gboolean is_probe = FALSE;
gboolean did_change = FALSE;
CRM_CHECK(node != NULL, return);
if (is_set(rsc->flags, pe_rsc_orphan)) {
resource_t *parent = uber_parent(rsc);
if(parent == NULL
|| pe_rsc_is_clone(parent) == FALSE
|| is_set(parent->flags, pe_rsc_unique)) {
pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
DeleteRsc(rsc, node, FALSE, data_set);
} else {
pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
}
return;
} else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
rsc->id, node->details->uname);
return;
}
pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_prepend(op_list, rsc_op);
}
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
offset++;
if (start_index < stop_index) {
/* stopped */
continue;
} else if (offset < start_index) {
/* action occurred prior to a start */
continue;
}
is_probe = FALSE;
did_change = FALSE;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL);
interval = crm_parse_int(interval_s, "0");
if (interval == 0 && safe_str_eq(task, RSC_STATUS)) {
is_probe = TRUE;
}
if (interval > 0 &&
(is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
} else if (is_probe || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || interval > 0
|| safe_str_eq(task, RSC_MIGRATED)) {
did_change = check_action_definition(rsc, node, rsc_op, data_set);
}
if (did_change && get_failcount(node, rsc, NULL, data_set)) {
char *key = NULL;
action_t *action_clear = NULL;
key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
action_clear =
custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set);
set_bit(action_clear->flags, pe_action_runnable);
crm_notice("Clearing failure of %s on %s "
- "because action definition changed " CRM_XS " %s",
+ "action definition changed " CRM_XS " %s",
rsc->id, node->details->uname, action_clear->uuid);
}
}
g_list_free(sorted_op_list);
}
static GListPtr
find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones,
gboolean partial, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
gboolean match = FALSE;
if (id == NULL) {
return NULL;
} else if (rsc == NULL && data_set) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
}
return result;
} else if (rsc == NULL) {
return NULL;
}
if (partial) {
if (strstr(rsc->id, id)) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
match = TRUE;
}
} else {
if (strcmp(rsc->id, id) == 0) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
}
}
if (match) {
result = g_list_prepend(result, rsc);
}
if (rsc->children) {
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
}
}
return result;
}
static void
check_actions(pe_working_set_t * data_set)
{
const char *id = NULL;
node_t *node = NULL;
xmlNode *lrm_rscs = NULL;
xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
xmlNode *node_state = NULL;
for (node_state = __xml_first_child(status); node_state != NULL;
node_state = __xml_next_element(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
id = crm_element_value(node_state, XML_ATTR_ID);
lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
node = pe_find_node_id(data_set->nodes, id);
if (node == NULL) {
continue;
/* Still need to check actions for a maintenance node to cancel existing monitor operations */
} else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
crm_trace("Skipping param check for %s: can't run resources",
node->details->uname);
continue;
}
crm_trace("Processing node %s", node->details->uname);
if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
xmlNode *rsc_entry = NULL;
for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL;
rsc_entry = __xml_next_element(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
if (xml_has_children(rsc_entry)) {
GListPtr gIter = NULL;
GListPtr result = NULL;
const char *rsc_id = ID(rsc_entry);
CRM_CHECK(rsc_id != NULL, return);
result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
for (gIter = result; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (rsc->variant != pe_native) {
continue;
}
check_actions_for(rsc_entry, rsc, node, data_set);
}
g_list_free(result);
}
}
}
}
}
}
}
static gboolean
apply_placement_constraints(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
crm_trace("Applying constraints...");
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
rsc_to_node_t *cons = (rsc_to_node_t *) gIter->data;
cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
}
return TRUE;
}
static gboolean
failcount_clear_action_exists(node_t * node, resource_t * rsc)
{
gboolean rc = FALSE;
char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
GListPtr list = find_actions_exact(rsc->actions, key, node);
if (list) {
rc = TRUE;
}
g_list_free(list);
free(key);
return rc;
}
/*!
* \internal
* \brief Force resource away if failures hit migration threshold
*
* \param[in,out] rsc Resource to check for failures
* \param[in,out] node Node to check for failures
* \param[in,out] data_set Cluster working set to update
*/
static void
check_migration_threshold(resource_t *rsc, node_t *node,
pe_working_set_t *data_set)
{
int fail_count, countdown;
resource_t *failed;
/* Migration threshold of 0 means never force away */
if (rsc->migration_threshold == 0) {
return;
}
/* If there are no failures, there's no need to force away */
fail_count = get_failcount_all(node, rsc, NULL, data_set);
if (fail_count <= 0) {
return;
}
/* How many more times recovery will be tried on this node */
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
/* If failed resource has a parent, we'll force the parent away */
failed = rsc;
if (is_not_set(rsc->flags, pe_rsc_unique)) {
failed = uber_parent(rsc);
}
if (countdown == 0) {
resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
failed->id, node->details->uname, fail_count,
rsc->migration_threshold);
} else {
crm_info("%s can fail %d more times on %s before being forced off",
failed->id, countdown, node->details->uname);
}
}
static void
common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
common_apply_stickiness(child_rsc, node, data_set);
}
return;
}
if (is_set(rsc->flags, pe_rsc_managed)
&& rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) {
node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (current == NULL) {
} else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
resource_t *sticky_rsc = rsc;
resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
" (node=%s, weight=%d)", sticky_rsc->id,
node->details->uname, rsc->stickiness);
} else {
GHashTableIter iter;
node_t *nIter = NULL;
pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
" and node %s is not explicitly allowed", rsc->id, node->details->uname);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
}
}
}
/* Check the migration threshold only if a failcount clear action
* has not already been placed for this resource on the node.
* There is no sense in potentially forcing the resource from this
* node if the failcount is being reset anyway. */
if (failcount_clear_action_exists(node, rsc) == FALSE) {
check_migration_threshold(rsc, node, data_set);
}
}
void
complex_set_cmds(resource_t * rsc)
{
GListPtr gIter = rsc->children;
rsc->cmds = &resource_class_alloc_functions[rsc->variant];
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
complex_set_cmds(child_rsc);
}
}
void
set_alloc_actions(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
complex_set_cmds(rsc);
}
}
static void
calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
{
const char *key = (const char *)gKey;
const char *value = (const char *)gValue;
int *system_health = (int *)user_data;
if (!gKey || !gValue || !user_data) {
return;
}
/* Does it start with #health? */
if (0 == strncmp(key, "#health", 7)) {
int score;
/* Convert the value into an integer */
score = char2score(value);
/* Add it to the running total */
*system_health = merge_weights(score, *system_health);
}
}
static gboolean
apply_system_health(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
int base_health = 0;
if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) {
/* Prevent any accidental health -> score translation */
node_score_red = 0;
node_score_yellow = 0;
node_score_green = 0;
return TRUE;
} else if (safe_str_eq(health_strategy, "migrate-on-red")) {
/* Resources on nodes which have health values of red are
* weighted away from that node.
*/
node_score_red = -INFINITY;
node_score_yellow = 0;
node_score_green = 0;
} else if (safe_str_eq(health_strategy, "only-green")) {
/* Resources on nodes which have health values of red or yellow
* are forced away from that node.
*/
node_score_red = -INFINITY;
node_score_yellow = -INFINITY;
node_score_green = 0;
} else if (safe_str_eq(health_strategy, "progressive")) {
/* Same as the above, but use the r/y/g scores provided by the user
* Defaults are provided by the pe_prefs table
* Also, custom health "base score" can be used
*/
base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
} else if (safe_str_eq(health_strategy, "custom")) {
/* Requires the admin to configure the rsc_location constaints for
* processing the stored health scores
*/
/* TODO: Check for the existence of appropriate node health constraints */
return TRUE;
} else {
crm_err("Unknown node health strategy: %s", health_strategy);
return FALSE;
}
crm_info("Applying automated node health strategy: %s", health_strategy);
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
int system_health = base_health;
node_t *node = (node_t *) gIter->data;
/* Search through the node hash table for system health entries. */
g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
crm_info(" Node %s has an combined system health of %d",
node->details->uname, system_health);
/* If the health is non-zero, then create a new rsc2node so that the
* weight will be added later on.
*/
if (system_health != 0) {
GListPtr gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
}
}
}
return TRUE;
}
gboolean
stage0(pe_working_set_t * data_set)
{
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
if (data_set->input == NULL) {
return FALSE;
}
if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
crm_trace("Calculating status");
cluster_status(data_set);
}
set_alloc_actions(data_set);
apply_system_health(data_set);
unpack_constraints(cib_constraints, data_set);
return TRUE;
}
/*
* Check nodes for resources started outside of the LRM
*/
gboolean
probe_resources(pe_working_set_t * data_set)
{
action_t *probe_node_complete = NULL;
GListPtr gIter = NULL;
GListPtr gIter2 = NULL;
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *probed = g_hash_table_lookup(node->details->attrs, CRM_OP_PROBED);
if (node->details->online == FALSE) {
continue;
} else if (node->details->unclean) {
continue;
} else if (is_container_remote_node(node)) {
/* TODO enable guest node probes once ordered probing is implemented */
continue;
} else if (node->details->rsc_discovery_enabled == FALSE) {
/* resource discovery is disabled for this node */
continue;
}
if (probed != NULL && crm_is_true(probed) == FALSE) {
action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
continue;
}
for (gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
}
}
return TRUE;
}
static void
rsc_discover_filter(resource_t *rsc, node_t *node)
{
GListPtr gIter = rsc->children;
resource_t *top = uber_parent(rsc);
node_t *match;
if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
return;
}
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
rsc_discover_filter(child_rsc, node);
}
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match && match->rsc_discover_mode != discover_exclusive) {
match->weight = -INFINITY;
}
}
/*
* Count how many valid nodes we have (so we know the maximum number of
* colors we can resolve).
*
* Apply node constraints (ie. filter the "allowed_nodes" part of resources
*/
gboolean
stage2(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
crm_trace("Applying placement constraints");
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node == NULL) {
/* error */
} else if (node->weight >= 0.0 /* global weight */
&& node->details->online && node->details->type != node_ping) {
data_set->max_valid_nodes++;
}
}
apply_placement_constraints(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
GListPtr gIter2 = NULL;
node_t *node = (node_t *) gIter->data;
gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
common_apply_stickiness(rsc, node, data_set);
rsc_discover_filter(rsc, node);
}
}
return TRUE;
}
/*
* Create internal resource constraints before allocation
*/
gboolean
stage3(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
rsc->cmds->internal_constraints(rsc, data_set);
}
return TRUE;
}
/*
* Check for orphaned or redefined actions
*/
gboolean
stage4(pe_working_set_t * data_set)
{
check_actions(data_set);
return TRUE;
}
static gint
sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
{
int rc = 0;
int r1_weight = -INFINITY;
int r2_weight = -INFINITY;
const char *reason = "existence";
const GListPtr nodes = (GListPtr) data;
resource_t *resource1 = (resource_t *) convert_const_pointer(a);
resource_t *resource2 = (resource_t *) convert_const_pointer(b);
node_t *r1_node = NULL;
node_t *r2_node = NULL;
GListPtr gIter = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
if (a == NULL && b == NULL) {
goto done;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
reason = "priority";
r1_weight = resource1->priority;
r2_weight = resource2->priority;
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "no node list";
if (nodes == NULL) {
goto done;
}
r1_nodes =
rsc_merge_weights(resource1, resource1->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes);
r2_nodes =
rsc_merge_weights(resource2, resource2->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes);
/* Current location score */
reason = "current location";
r1_weight = -INFINITY;
r2_weight = -INFINITY;
if (resource1->running_on) {
r1_node = g_list_nth_data(resource1->running_on, 0);
r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
if (r1_node != NULL) {
r1_weight = r1_node->weight;
}
}
if (resource2->running_on) {
r2_node = g_list_nth_data(resource2->running_on, 0);
r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
if (r2_node != NULL) {
r2_weight = r2_node->weight;
}
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "score";
for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
r1_node = NULL;
r2_node = NULL;
r1_weight = -INFINITY;
if (r1_nodes) {
r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
}
if (r1_node) {
r1_weight = r1_node->weight;
}
r2_weight = -INFINITY;
if (r2_nodes) {
r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
}
if (r2_node) {
r2_weight = r2_node->weight;
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
rc < 0 ? '>' : rc > 0 ? '<' : '=',
resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
if (r1_nodes) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
static void
allocate_resources(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (is_set(data_set->flags, pe_flag_have_remote_nodes)) {
/* Force remote connection resources to be allocated first. This
* also forces any colocation dependencies to be allocated as well */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (rsc->is_remote_node == FALSE) {
continue;
}
pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
/* For remote node connection resources, always prefer the partial
* migration target during resource allocation, if the rsc is in the
* middle of a migration.
*/
rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
}
}
/* now do the rest of the resources */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (rsc->is_remote_node == TRUE) {
continue;
}
pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
rsc->cmds->allocate(rsc, NULL, data_set);
}
}
static void
cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
return;
}
/* Don't recurse into ->children, those are just unallocated clone instances */
if(is_not_set(rsc->flags, pe_rsc_orphan)) {
return;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node->details->online && get_failcount(node, rsc, NULL, data_set)) {
char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT,
node, FALSE, TRUE, data_set);
add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
pe_rsc_info(rsc,
"Clearing failure of %s on %s because it is orphaned "
CRM_XS " %s",
rsc->id, node->details->uname, clear_op->uuid);
custom_action_order(rsc, NULL, clear_op,
rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_optional, data_set);
}
}
}
gboolean
stage5(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (safe_str_neq(data_set->placement_strategy, "default")) {
GListPtr nodes = g_list_copy(data_set->nodes);
nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL);
data_set->resources =
g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
g_list_free(nodes);
}
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node);
}
crm_trace("Allocating services");
/* Take (next) highest resource, assign it and create its actions */
allocate_resources(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node);
}
if (is_set(data_set->flags, pe_flag_startup_probes)) {
crm_trace("Calculating needed probes");
/* This code probably needs optimization
* ptest -x with 100 nodes, 100 clones and clone-max=100:
With probes:
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
36s
ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
Without probes:
ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
*/
probe_resources(data_set);
}
crm_trace("Handle orphans");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
cleanup_orphans(rsc, data_set);
}
crm_trace("Creating actions");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
rsc->cmds->create_actions(rsc, data_set);
}
crm_trace("Creating done");
return TRUE;
}
static gboolean
is_managed(const resource_t * rsc)
{
GListPtr gIter = rsc->children;
if (is_set(rsc->flags, pe_rsc_managed)) {
return TRUE;
}
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
if (is_managed(child_rsc)) {
return TRUE;
}
}
return FALSE;
}
static gboolean
any_managed_resources(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (is_managed(rsc)) {
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Create pseudo-op for guest node fence, and order relative to it
*
* \param[in] node Guest node to fence
* \param[in] done STONITH_DONE operation
* \param[in] data_set Working set of CIB state
*/
static void
fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set)
{
resource_t *container = node->details->remote_rsc->container;
pe_action_t *stop = NULL;
pe_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
* cluster's default action, because we are not _initiating_ fencing -- we
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
const char *fence_action = "off";
/* Check whether guest's container resource is has any explicit stop or
* start (the stop may be implied by fencing of the guest's host).
*/
if (container) {
stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
fence_action = "reboot";
}
}
/* Create a fence pseudo-event, so we have an event to order actions
* against, and crmd can always detect it.
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, data_set);
update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
__FUNCTION__, __LINE__);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
if (stop) {
order_actions(stop, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
crm_info("Implying guest node %s is down (action %d) "
"after container %s is stopped (action %d)",
node->details->uname, stonith_op->id,
container->id, stop->id);
} else {
crm_info("Implying guest node %s is down (action %d) ",
node->details->uname, stonith_op->id);
}
/* @TODO: Order pseudo-fence after any (optional) fence of guest's host */
/* Order/imply other actions relative to pseudo-fence as with real fence */
stonith_constraints(node, stonith_op, data_set);
order_actions(stonith_op, done, pe_order_implies_then);
}
/*
* Create dependencies for stonith and shutdown operations
*/
gboolean
stage6(pe_working_set_t * data_set)
{
action_t *dc_down = NULL;
action_t *dc_fence = NULL;
action_t *stonith_op = NULL;
action_t *last_stonith = NULL;
gboolean integrity_lost = FALSE;
action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
action_t *done = get_pseudo_op(STONITH_DONE, data_set);
gboolean need_stonith = TRUE;
GListPtr gIter;
GListPtr stonith_ops = NULL;
/* Remote ordering constraints need to happen prior to calculate
* fencing because it is one more place we will mark the node as
* dirty.
*
* A nice side-effect of doing it first is that we can remove a
* bunch of special logic from apply_*_ordering() because its
* already part of pe_fence_node()
*/
crm_trace("Creating remote ordering constraints");
apply_remote_node_ordering(data_set);
crm_trace("Processing fencing and shutdown cases");
if (any_managed_resources(data_set) == FALSE) {
crm_notice("Delaying fencing operations until there are resources to manage");
need_stonith = FALSE;
}
/* Check each node for stonith/shutdown */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (is_container_remote_node(node)) {
if (node->details->remote_requires_reset && need_stonith) {
fence_guest(node, done, data_set);
}
continue;
}
stonith_op = NULL;
if (node->details->unclean
&& need_stonith && pe_can_fence(data_set, node)) {
pe_warn("Scheduling Node %s for STONITH", node->details->uname);
stonith_op = pe_fence_op(node, NULL, FALSE, data_set);
stonith_constraints(node, stonith_op, data_set);
if (node->details->is_dc) {
dc_down = stonith_op;
dc_fence = stonith_op;
} else if (is_set(data_set->flags, pe_flag_concurrent_fencing) == FALSE) {
if (last_stonith) {
order_actions(last_stonith, stonith_op, pe_order_optional);
}
last_stonith = stonith_op;
} else {
order_actions(stonith_op, done, pe_order_implies_then);
stonith_ops = g_list_append(stonith_ops, stonith_op);
}
} else if (node->details->online && node->details->shutdown &&
/* TODO define what a shutdown op means for a remote node.
* For now we do not send shutdown operations for remote nodes, but
* if we can come up with a good use for this in the future, we will. */
is_remote_node(node) == FALSE) {
action_t *down_op = NULL;
crm_notice("Scheduling Node %s for shutdown", node->details->uname);
down_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname),
CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set);
shutdown_constraints(node, down_op, data_set);
add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
if (node->details->is_dc) {
dc_down = down_op;
}
}
if (node->details->unclean && stonith_op == NULL) {
integrity_lost = TRUE;
pe_warn("Node %s is unclean!", node->details->uname);
}
}
if (integrity_lost) {
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) {
crm_notice("Cannot fence unclean nodes until quorum is"
" attained (or no-quorum-policy is set to ignore)");
}
}
if (dc_down != NULL) {
GListPtr gIter = NULL;
crm_trace("Ordering shutdowns before %s on %s (DC)",
dc_down->task, dc_down->node->details->uname);
add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *node_stop = (action_t *) gIter->data;
if (safe_str_neq(CRM_OP_SHUTDOWN, node_stop->task)) {
continue;
} else if (node_stop->node->details->is_dc) {
continue;
}
crm_debug("Ordering shutdown on %s before %s on %s",
node_stop->node->details->uname,
dc_down->task, dc_down->node->details->uname);
order_actions(node_stop, dc_down, pe_order_optional);
}
if (last_stonith) {
if (dc_down != last_stonith) {
order_actions(last_stonith, dc_down, pe_order_optional);
}
} else {
GListPtr gIter2 = NULL;
for (gIter2 = stonith_ops; gIter2 != NULL; gIter2 = gIter2->next) {
stonith_op = (action_t *) gIter2->data;
if (dc_down != stonith_op) {
order_actions(stonith_op, dc_down, pe_order_optional);
}
}
}
}
if (dc_fence) {
order_actions(dc_down, done, pe_order_implies_then);
} else if (last_stonith) {
order_actions(last_stonith, done, pe_order_implies_then);
}
order_actions(done, all_stopped, pe_order_implies_then);
g_list_free(stonith_ops);
return TRUE;
}
/*
* Determine the sets of independent actions and the correct order for the
* actions in each set.
*
* Mark dependencies of un-runnable actions un-runnable
*
*/
static GListPtr
find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key)
{
GListPtr list = NULL;
list = find_actions(actions, original_key, NULL);
if (list == NULL) {
/* we're potentially searching a child of the original resource */
char *key = NULL;
char *tmp = NULL;
char *task = NULL;
int interval = 0;
if (parse_op_key(original_key, &tmp, &task, &interval)) {
key = generate_op_key(rsc->id, task, interval);
/* crm_err("looking up %s instead of %s", key, original_key); */
/* slist_iter(action, action_t, actions, lpc, */
/* crm_err(" - %s", action->uuid)); */
list = find_actions(actions, key, NULL);
} else {
crm_err("search key: %s", original_key);
}
free(key);
free(tmp);
free(task);
}
return list;
}
static void
rsc_order_then(action_t * lh_action, resource_t * rsc, order_constraint_t * order)
{
GListPtr gIter = NULL;
GListPtr rh_actions = NULL;
action_t *rh_action = NULL;
enum pe_ordering type = order->type;
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(order != NULL, return);
rh_action = order->rh_action;
crm_trace("Processing RH of ordering constraint %d", order->id);
if (rh_action != NULL) {
rh_actions = g_list_prepend(NULL, rh_action);
} else if (rsc != NULL) {
rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
}
if (rh_actions == NULL) {
pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
" ignoring", rsc->id, order->rh_action_task);
if (lh_action) {
pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
}
return;
}
if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) {
pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
order->rh_action_task);
clear_bit(type, pe_order_implies_then);
}
gIter = rh_actions;
for (; gIter != NULL; gIter = gIter->next) {
action_t *rh_action_iter = (action_t *) gIter->data;
if (lh_action) {
order_actions(lh_action, rh_action_iter, type);
} else if (type & pe_order_implies_then) {
update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
} else {
crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
}
}
g_list_free(rh_actions);
}
static void
rsc_order_first(resource_t * lh_rsc, order_constraint_t * order, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
GListPtr lh_actions = NULL;
action_t *lh_action = order->lh_action;
resource_t *rh_rsc = order->rh_rsc;
crm_trace("Processing LH of ordering constraint %d", order->id);
CRM_ASSERT(lh_rsc != NULL);
if (lh_action != NULL) {
lh_actions = g_list_prepend(NULL, lh_action);
} else if (lh_action == NULL) {
lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
}
if (lh_actions == NULL && lh_rsc != rh_rsc) {
char *key = NULL;
char *rsc_id = NULL;
char *op_type = NULL;
int interval = 0;
parse_op_key(order->lh_action_task, &rsc_id, &op_type, &interval);
key = generate_op_key(lh_rsc->id, op_type, interval);
if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else {
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
lh_actions = g_list_prepend(NULL, lh_action);
}
free(op_type);
free(rsc_id);
}
gIter = lh_actions;
for (; gIter != NULL; gIter = gIter->next) {
action_t *lh_action_iter = (action_t *) gIter->data;
if (rh_rsc == NULL && order->rh_action) {
rh_rsc = order->rh_action->rsc;
}
if (rh_rsc) {
rsc_order_then(lh_action_iter, rh_rsc, order);
} else if (order->rh_action) {
order_actions(lh_action_iter, order->rh_action, order->type);
}
}
g_list_free(lh_actions);
}
extern gboolean update_action(action_t * action);
extern void update_colo_start_chain(action_t * action);
enum remote_connection_state
{
remote_state_unknown = 0,
remote_state_alive = 1,
remote_state_resting = 2,
remote_state_failed = 3,
remote_state_stopped = 4
};
static int
is_recurring_action(action_t *action)
{
const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
int interval = crm_parse_int(interval_s, "0");
if(interval > 0) {
return TRUE;
}
return FALSE;
}
static void
apply_container_ordering(action_t *action, pe_working_set_t *data_set)
{
/* VMs are also classified as containers for these purposes... in
* that they both involve a 'thing' running on a real or remote
* cluster node.
*
* This allows us to be smarter about the type and extent of
* recovery actions required in various scenarios
*/
resource_t *remote_rsc = NULL;
resource_t *container = NULL;
enum action_tasks task = text2task(action->task);
if (action->rsc == NULL) {
return;
}
CRM_ASSERT(action->node);
CRM_ASSERT(is_remote_node(action->node));
CRM_ASSERT(action->node->details->remote_rsc);
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
container = remote_rsc->container;
CRM_ASSERT(container);
if(is_set(container->flags, pe_rsc_failed)) {
- pe_fence_node(data_set, action->node, " because the container failed");
+ pe_fence_node(data_set, action->node, "container failed");
}
crm_trace("%s %s %s %s %d", action->uuid, action->task, remote_rsc->id, container->id, is_set(container->flags, pe_rsc_failed));
switch (task) {
case start_rsc:
case action_promote:
/* Force resource recovery if the container is recovered */
custom_action_order(container, generate_op_key(container->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_implies_then | pe_order_runnable_left, data_set);
/* Wait for the connection resource to be up too */
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left, data_set);
break;
case stop_rsc:
if(is_set(container->flags, pe_rsc_failed)) {
/* When the container representing a guest node fails,
* the stop action for all the resources living in
* that container is implied by the container
* stopping. This is similar to how fencing operations
* work for cluster nodes.
*/
} else {
/* Otherwise, ensure the operation happens before the connection is brought down */
custom_action_order(action->rsc, NULL, action,
remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
pe_order_preserve, data_set);
}
break;
case action_demote:
if(is_set(container->flags, pe_rsc_failed)) {
/* Just like a stop, the demote is implied by the
* container having failed/stopped
*
* If we really wanted to we would order the demote
* after the stop, IFF the containers current role was
* stopped (otherwise we re-introduce an ordering
* loop)
*/
} else {
/* Otherwise, ensure the operation happens before the connection is brought down */
custom_action_order(action->rsc, NULL, action,
remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
pe_order_preserve, data_set);
}
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left | pe_order_implies_then, data_set);
} else {
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left, data_set);
}
break;
}
}
static void
apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
{
resource_t *remote_rsc = NULL;
node_t *cluster_node = NULL;
enum action_tasks task = text2task(action->task);
enum remote_connection_state state = remote_state_unknown;
if (action->rsc == NULL) {
return;
}
CRM_ASSERT(action->node);
CRM_ASSERT(is_remote_node(action->node));
CRM_ASSERT(action->node->details->remote_rsc);
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
if(remote_rsc->running_on) {
cluster_node = remote_rsc->running_on->data;
}
/* If the cluster node the remote connection resource resides on
* is unclean or went offline, we can't process any operations
* on that remote node until after it starts elsewhere.
*/
if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
/* There is no-where left to run the connection resource
* and the resource is in a failed state (either directly
* or because it is located on a failed node).
*
* If there are any resources known to be active on it (stop),
* or if there are resources in an unknown state (probe), we
* must assume the worst and fence it.
*/
if(is_set(action->node->details->remote_rsc->flags, pe_rsc_failed)) {
state = remote_state_failed;
} else if(cluster_node && cluster_node->details->unclean) {
state = remote_state_failed;
} else {
state = remote_state_stopped;
}
} else if (cluster_node == NULL) {
/* Connection is recoverable but not currently running anywhere, see if we can recover it first */
state = remote_state_unknown;
} else if(cluster_node->details->unclean == TRUE
|| cluster_node->details->online == FALSE) {
/* Connection is running on a dead node, see if we can recover it first */
state = remote_state_resting;
} else if (g_list_length(remote_rsc->running_on) > 1
&& remote_rsc->partial_migration_source
&& remote_rsc->partial_migration_target) {
/* We're in the middle of migrating a connection resource,
* wait until after the resource migrates before performing
* any actions.
*/
state = remote_state_resting;
} else {
state = remote_state_alive;
}
crm_trace("%s %s %s %d %d", action->uuid, action->task, action->node->details->uname, state, is_set(remote_rsc->flags, pe_rsc_failed));
switch (task) {
case start_rsc:
case action_promote:
if(state == remote_state_failed) {
/* Wait for the connection resource to be up and force recovery */
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_implies_then | pe_order_runnable_left, data_set);
} else {
/* Ensure the connection resource is up and assume everything is as we left it */
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left, data_set);
}
break;
case stop_rsc:
/* Handle special case with remote node where stop actions need to be
* ordered after the connection resource starts somewhere else.
*/
if(state == remote_state_resting) {
/* Wait for the connection resource to be up and assume everything is as we left it */
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left, data_set);
} else {
if(state == remote_state_failed) {
/* We would only be here if the resource is
* running on the remote node. Since we have no
* way to stop it, it is necessary to fence the
* node.
*/
- pe_fence_node(data_set, action->node, "because resources are active and the connection is unrecoverable");
+ pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
}
custom_action_order(action->rsc, NULL, action,
remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
pe_order_preserve | pe_order_implies_first, data_set);
}
break;
case action_demote:
/* If the connection is being torn down, we don't want
* to build a constraint between a resource's demotion and
* the connection resource starting... because the connection
* resource can not start. The connection might already be up,
* but the "start" action would not be allowed, which in turn would
* block the demotion of any resources living in the node.
*/
if(state == remote_state_resting || state == remote_state_unknown) {
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve, data_set);
} /* Otherwise we can rely on the stop ordering */
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left | pe_order_implies_then, data_set);
} else {
if(task == monitor_rsc && state == remote_state_failed) {
/* We would only be here if we do not know the
* state of the resource on the remote node.
* Since we have no way to find out, it is
* necessary to fence the node.
*/
- pe_fence_node(data_set, action->node, "because resources are in an unknown state and the connection is unrecoverable");
+ pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable");
}
if(cluster_node && state == remote_state_stopped) {
/* The connection is currently up, but is going
* down permanently.
*
* Make sure we check services are actually
* stopped _before_ we let the connection get
* closed
*/
custom_action_order(action->rsc, NULL, action,
remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
pe_order_preserve | pe_order_runnable_left, data_set);
} else {
custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
action->rsc, NULL, action,
pe_order_preserve | pe_order_runnable_left, data_set);
}
}
break;
}
}
static void
apply_remote_node_ordering(pe_working_set_t *data_set)
{
GListPtr gIter = data_set->actions;
if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
return;
}
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (action->rsc == NULL) {
continue;
}
/* Special case. */
if (action->rsc &&
action->rsc->is_remote_node &&
safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
/* If we are clearing the failcount of an actual remote node
* connection resource, then make sure this happens before allowing
* the connection to start if we are planning on starting the
* connection during this transition.
*/
custom_action_order(action->rsc,
NULL,
action,
action->rsc,
generate_op_key(action->rsc->id, RSC_START, 0),
NULL,
pe_order_optional,
data_set);
continue;
}
/* If the action occurs on a Pacemaker Remote node, create
* ordering constraints that guarantee the action occurs while the node
* is active (after start, before stop ... things like that).
*/
if (action->node == NULL ||
is_remote_node(action->node) == FALSE ||
action->node->details->remote_rsc == NULL ||
is_set(action->flags, pe_action_pseudo)) {
crm_trace("Nothing required for %s", action->uuid);
} else if(action->node->details->remote_rsc->container) {
apply_container_ordering(action, data_set);
} else {
apply_remote_ordering(action, data_set);
}
}
}
static void
order_probes(pe_working_set_t * data_set)
{
#if 0
GListPtr gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
/* Given "A then B", we would prefer to wait for A to be
* started before probing B.
*
* If A was a filesystem on which the binaries and data for B
* lived, it would have been useful if the author of B's agent
* could assume that A is running before B.monitor will be
* called.
*
* However we can't _only_ probe once A is running, otherwise
* we'd not detect the state of B if A could not be started
* for some reason.
*
* In practice however, we cannot even do an opportunistic
* version of this because B may be moving:
*
* B.probe -> B.start
* B.probe -> B.stop
* B.stop -> B.start
* A.stop -> A.start
* A.start -> B.probe
*
* So far so good, but if we add the result of this code:
*
* B.stop -> A.stop
*
* Then we get a loop:
*
* B.probe -> B.stop -> A.stop -> A.start -> B.probe
*
* We could kill the 'B.probe -> B.stop' dependency, but that
* could mean stopping B "too" soon, because B.start must wait
* for the probes to complete.
*
* Another option is to allow it only if A is a non-unique
* clone with clone-max == node-max (since we'll never be
* moving it). However, we could still be stopping one
* instance at the same time as starting another.
* The complexity of checking for allowed conditions combined
* with the ever narrowing usecase suggests that this code
* should remain disabled until someone gets smarter.
*/
action_t *start = NULL;
GListPtr actions = NULL;
GListPtr probes = NULL;
char *key = NULL;
key = start_key(rsc);
actions = find_actions(rsc->actions, key, NULL);
free(key);
if (actions) {
start = actions->data;
g_list_free(actions);
}
if(start == NULL) {
crm_err("No start action for %s", rsc->id);
continue;
}
key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0);
probes = find_actions(rsc->actions, key, NULL);
free(key);
for (actions = start->actions_before; actions != NULL; actions = actions->next) {
action_wrapper_t *before = (action_wrapper_t *) actions->data;
GListPtr pIter = NULL;
action_t *first = before->action;
resource_t *first_rsc = first->rsc;
if(first->required_runnable_before) {
GListPtr clone_actions = NULL;
for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
before = (action_wrapper_t *) clone_actions->data;
crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
CRM_ASSERT(before->action->rsc);
first_rsc = before->action->rsc;
break;
}
} else if(safe_str_neq(first->task, RSC_START)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
if(first_rsc == NULL) {
continue;
} else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
continue;
} else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
continue;
}
crm_err("Appplying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
action_t *probe = (action_t *) pIter->data;
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
order_actions(first, probe, pe_order_optional);
}
}
}
#endif
}
gboolean
stage7(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
crm_trace("Applying ordering constraints");
/* Don't ask me why, but apparently they need to be processed in
* the order they were created in... go figure
*
* Also g_list_append() has horrendous performance characteristics
* So we need to use g_list_prepend() and then reverse the list here
*/
data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
order_constraint_t *order = (order_constraint_t *) gIter->data;
resource_t *rsc = order->lh_rsc;
crm_trace("Applying ordering constraint: %d", order->id);
if (rsc != NULL) {
crm_trace("rsc_action-to-*");
rsc_order_first(rsc, order, data_set);
continue;
}
rsc = order->rh_rsc;
if (rsc != NULL) {
crm_trace("action-to-rsc_action");
rsc_order_then(order->lh_action, rsc, order);
} else {
crm_trace("action-to-action");
order_actions(order->lh_action, order->rh_action, order->type);
}
}
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
update_colo_start_chain(action);
}
crm_trace("Ordering probes");
order_probes(data_set);
crm_trace("Updating %d actions", g_list_length(data_set->actions));
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
update_action(action);
}
crm_trace("Processing reloads");
+ LogNodeActions(data_set, FALSE);
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
LogActions(rsc, data_set, FALSE);
}
return TRUE;
}
int transition_id = -1;
/*
* Create a dependency graph to send to the transitioner (via the CRMd)
*/
gboolean
stage8(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
const char *value = NULL;
transition_id++;
crm_trace("Creating transition graph %d.", transition_id);
data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
value = pe_pref(data_set->config_hash, "cluster-delay");
crm_xml_add(data_set->graph, "cluster-delay", value);
value = pe_pref(data_set->config_hash, "stonith-timeout");
crm_xml_add(data_set->graph, "stonith-timeout", value);
crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
if (is_set(data_set->flags, pe_flag_start_failure_fatal)) {
crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
} else {
crm_xml_add(data_set->graph, "failed-start-offset", "1");
}
value = pe_pref(data_set->config_hash, "batch-limit");
crm_xml_add(data_set->graph, "batch-limit", value);
crm_xml_add_int(data_set->graph, "transition_id", transition_id);
value = pe_pref(data_set->config_hash, "migration-limit");
if (crm_int_helper(value, NULL) > 0) {
crm_xml_add(data_set->graph, "migration-limit", value);
}
/* errors...
slist_iter(action, action_t, action_list, lpc,
if(action->optional == FALSE && action->runnable == FALSE) {
print_action("Ignoring", action, TRUE);
}
);
*/
gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
rsc->cmds->expand(rsc, data_set);
}
crm_log_xml_trace(data_set->graph, "created resource-driven action list");
/* pseudo action to distribute list of nodes with maintenance state update */
add_maintenance_update(data_set);
/* catch any non-resource specific actions */
crm_trace("processing non-resource actions");
gIter = data_set->actions;
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (action->rsc
&& action->node
&& action->node->details->shutdown
&& is_not_set(action->rsc->flags, pe_rsc_maintenance)
&& is_not_set(action->flags, pe_action_optional)
&& is_not_set(action->flags, pe_action_runnable)
&& crm_str_eq(action->task, RSC_STOP, TRUE)
) {
/* Eventually we should just ignore the 'fence' case
* But for now it's the best way to detect (in CTS) when
* CIB resource updates are being lost
*/
if (is_set(data_set->flags, pe_flag_have_quorum)
|| data_set->no_quorum_policy == no_quorum_ignore) {
crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
action->node->details->unclean ? "fence" : "shut down",
action->node->details->uname, action->rsc->id,
is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked",
is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "",
action->uuid);
}
}
graph_element_from_action(action, data_set);
}
crm_log_xml_trace(data_set->graph, "created generic action list");
crm_trace("Created transition graph %d.", transition_id);
return TRUE;
}
+void
+LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
+{
+ GListPtr gIter = NULL;
+
+ for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
+ char *node_name = NULL;
+ const char *task = NULL;
+ action_t *action = (action_t *) gIter->data;
+
+ if (action->rsc != NULL) {
+ continue;
+ }
+
+ if (is_container_remote_node(action->node)) {
+ node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
+ } else if(action->node) {
+ node_name = crm_strdup_printf("%s", action->node->details->uname);
+ }
+
+ if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
+ task = "Shutdown";
+ } else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
+ task = "Fence";
+ }
+
+ if(task == NULL) {
+ /* Nothing to report */
+ } else if(terminal) {
+ printf(" * %s %s\n", task, node_name);
+ } else {
+ crm_notice(" * %s %s\n", task, node_name);
+ }
+
+ free(node_name);
+ }
+}
+
void
cleanup_alloc_calculations(pe_working_set_t * data_set)
{
if (data_set == NULL) {
return;
}
crm_trace("deleting %d order cons: %p",
g_list_length(data_set->ordering_constraints), data_set->ordering_constraints);
pe_free_ordering(data_set->ordering_constraints);
data_set->ordering_constraints = NULL;
crm_trace("deleting %d node cons: %p",
g_list_length(data_set->placement_constraints), data_set->placement_constraints);
pe_free_rsc_to_node(data_set->placement_constraints);
data_set->placement_constraints = NULL;
crm_trace("deleting %d inter-resource cons: %p",
g_list_length(data_set->colocation_constraints), data_set->colocation_constraints);
g_list_free_full(data_set->colocation_constraints, free);
data_set->colocation_constraints = NULL;
crm_trace("deleting %d ticket deps: %p",
g_list_length(data_set->ticket_constraints), data_set->ticket_constraints);
g_list_free_full(data_set->ticket_constraints, free);
data_set->ticket_constraints = NULL;
cleanup_calculations(data_set);
}
diff --git a/pengine/allocate.h b/pengine/allocate.h
index 3d7b7cef6c..d89943dd14 100644
--- a/pengine/allocate.h
+++ b/pengine/allocate.h
@@ -1,183 +1,184 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef CRM_PENGINE_COMPLEX_ALLOC__H
# define CRM_PENGINE_COMPLEX_ALLOC__H
# include <glib.h>
# include <crm/common/xml.h>
# include <crm/pengine/status.h>
# include <crm/pengine/complex.h>
# include <crm/pengine/internal.h>
# include <pengine.h>
struct resource_alloc_functions_s {
GHashTable *(*merge_weights) (resource_t *, const char *, GHashTable *, const char *, float,
enum pe_weights);
node_t *(*allocate) (resource_t *, node_t *, pe_working_set_t *);
void (*create_actions) (resource_t *, pe_working_set_t *);
gboolean(*create_probe) (resource_t *, node_t *, action_t *, gboolean, pe_working_set_t *);
void (*internal_constraints) (resource_t *, pe_working_set_t *);
void (*rsc_colocation_lh) (resource_t *, resource_t *, rsc_colocation_t *);
void (*rsc_colocation_rh) (resource_t *, resource_t *, rsc_colocation_t *);
void (*rsc_location) (resource_t *, rsc_to_node_t *);
enum pe_action_flags (*action_flags) (action_t *, node_t *);
enum pe_graph_flags (*update_actions) (action_t *, action_t *, node_t *, enum pe_action_flags,
enum pe_action_flags, enum pe_ordering);
void (*expand) (resource_t *, pe_working_set_t *);
void (*append_meta) (resource_t * rsc, xmlNode * xml);
};
extern GHashTable *rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes,
const char *attr, float factor, enum pe_weights flags);
extern GHashTable *clone_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes,
const char *attr, float factor, enum pe_weights flags);
extern GHashTable *container_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes,
const char *attr, float factor, enum pe_weights flags);
extern GHashTable *master_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes,
const char *attr, float factor, enum pe_weights flags);
extern GHashTable *native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes,
const char *attr, float factor, enum pe_weights flags);
extern GHashTable *group_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes,
const char *attr, float factor, enum pe_weights flags);
extern node_t *native_color(resource_t * rsc, node_t * preferred, pe_working_set_t * data_set);
extern void native_create_actions(resource_t * rsc, pe_working_set_t * data_set);
extern void native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set);
extern void native_rsc_colocation_lh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void native_rsc_colocation_rh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void rsc_ticket_constraint(resource_t * lh_rsc, rsc_ticket_t * rsc_ticket,
pe_working_set_t * data_set);
extern enum pe_action_flags native_action_flags(action_t * action, node_t * node);
extern void native_rsc_location(resource_t * rsc, rsc_to_node_t * constraint);
extern void native_expand(resource_t * rsc, pe_working_set_t * data_set);
extern void native_dump(resource_t * rsc, const char *pre_text, gboolean details);
extern gboolean native_create_probe(resource_t * rsc, node_t * node, action_t * complete,
gboolean force, pe_working_set_t * data_set);
extern void native_append_meta(resource_t * rsc, xmlNode * xml);
extern int group_num_allowed_nodes(resource_t * rsc);
extern node_t *group_color(resource_t * rsc, node_t * preferred, pe_working_set_t * data_set);
extern void group_create_actions(resource_t * rsc, pe_working_set_t * data_set);
extern void group_internal_constraints(resource_t * rsc, pe_working_set_t * data_set);
extern void group_rsc_colocation_lh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void group_rsc_colocation_rh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern enum pe_action_flags group_action_flags(action_t * action, node_t * node);
extern void group_rsc_location(resource_t * rsc, rsc_to_node_t * constraint);
extern void group_expand(resource_t * rsc, pe_working_set_t * data_set);
extern void group_append_meta(resource_t * rsc, xmlNode * xml);
extern int container_num_allowed_nodes(resource_t * rsc);
extern node_t *container_color(resource_t * rsc, node_t * preferred, pe_working_set_t * data_set);
extern void container_create_actions(resource_t * rsc, pe_working_set_t * data_set);
extern void container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set);
extern void container_rsc_colocation_lh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void container_rsc_colocation_rh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void container_rsc_location(resource_t * rsc, rsc_to_node_t * constraint);
extern enum pe_action_flags container_action_flags(action_t * action, node_t * node);
extern void container_expand(resource_t * rsc, pe_working_set_t * data_set);
extern gboolean container_create_probe(resource_t * rsc, node_t * node, action_t * complete,
gboolean force, pe_working_set_t * data_set);
extern void container_append_meta(resource_t * rsc, xmlNode * xml);
extern int clone_num_allowed_nodes(resource_t * rsc);
extern node_t *clone_color(resource_t * rsc, node_t * preferred, pe_working_set_t * data_set);
extern void clone_create_actions(resource_t * rsc, pe_working_set_t * data_set);
extern void clone_internal_constraints(resource_t * rsc, pe_working_set_t * data_set);
extern void clone_rsc_colocation_lh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void clone_rsc_colocation_rh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void clone_rsc_location(resource_t * rsc, rsc_to_node_t * constraint);
extern enum pe_action_flags clone_action_flags(action_t * action, node_t * node);
extern void clone_expand(resource_t * rsc, pe_working_set_t * data_set);
extern gboolean clone_create_probe(resource_t * rsc, node_t * node, action_t * complete,
gboolean force, pe_working_set_t * data_set);
extern void clone_append_meta(resource_t * rsc, xmlNode * xml);
extern gboolean master_unpack(resource_t * rsc, pe_working_set_t * data_set);
extern node_t *master_color(resource_t * rsc, node_t * preferred, pe_working_set_t * data_set);
extern void master_create_actions(resource_t * rsc, pe_working_set_t * data_set);
extern void master_internal_constraints(resource_t * rsc, pe_working_set_t * data_set);
extern void master_rsc_colocation_rh(resource_t * lh_rsc, resource_t * rh_rsc,
rsc_colocation_t * constraint);
extern void master_append_meta(resource_t * rsc, xmlNode * xml);
/* extern resource_object_functions_t resource_variants[]; */
extern resource_alloc_functions_t resource_class_alloc_functions[];
extern gboolean is_active(rsc_to_node_t * cons);
extern gboolean native_constraint_violated(resource_t * rsc_lh, resource_t * rsc_rh,
rsc_colocation_t * constraint);
extern gboolean unpack_rsc_to_attr(xmlNode * xml_obj, pe_working_set_t * data_set);
extern gboolean unpack_rsc_to_node(xmlNode * xml_obj, pe_working_set_t * data_set);
extern gboolean unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set);
extern gboolean unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set);
extern gboolean unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set);
extern gboolean unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set);
-extern void LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal);
+void LogNodeActions(pe_working_set_t * data_set, gboolean terminal);
+void LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal);
void container_LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal);
extern void cleanup_alloc_calculations(pe_working_set_t * data_set);
extern void rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op,
pe_working_set_t * data_set);
extern enum pe_graph_flags native_update_actions(action_t * first, action_t * then, node_t * node,
enum pe_action_flags flags,
enum pe_action_flags filter,
enum pe_ordering type);
extern enum pe_graph_flags group_update_actions(action_t * first, action_t * then, node_t * node,
enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type);
extern enum pe_graph_flags clone_update_actions(action_t * first, action_t * then, node_t * node,
enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type);
extern enum pe_graph_flags container_update_actions(action_t * first, action_t * then, node_t * node,
enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type);
gboolean update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line);
gboolean update_action(action_t * action);
void complex_set_cmds(resource_t * rsc);
#endif
diff --git a/pengine/native.c b/pengine/native.c
index 2c4e2a1631..dd5ff184d1 100644
--- a/pengine/native.c
+++ b/pengine/native.c
@@ -1,3304 +1,3304 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <pengine.h>
#include <crm/pengine/rules.h>
#include <crm/msg_xml.h>
#include <allocate.h>
#include <notif.h>
#include <utils.h>
#include <crm/services.h>
/* #define DELETE_THEN_REFRESH 1 // The crmd will remove the resource from the CIB itself, making this redundant */
#define INFINITY_HACK (INFINITY * -100)
#define VARIANT_NATIVE 1
#include <lib/pengine/variant.h>
gboolean update_action(action_t * then);
void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh,
resource_t * rsc_rh, gboolean update_rh);
void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh,
resource_t * rsc_rh, gboolean update_rh);
void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set);
void RecurringOp(resource_t * rsc, action_t * start, node_t * node,
xmlNode * operation, pe_working_set_t * data_set);
void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node,
pe_working_set_t * data_set);
void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
xmlNode * operation, pe_working_set_t * data_set);
void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional,
pe_working_set_t * data_set);
gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
/* *INDENT-OFF* */
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current State */
/* Next State: Unknown Stopped Started Slave Master */
/* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, },
/* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, },
/* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
/* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
/* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
};
gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = {
/* Current State */
/* Next State: Unknown Stopped Started Slave Master */
/* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
/* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
/* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
/* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
/* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
};
/* *INDENT-ON* */
static action_t * get_first_named_action(resource_t * rsc, const char *action, gboolean only_valid, node_t * current);
static gboolean
native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
{
/*
1. Sort by weight
2. color.chosen_node = the node (of those with the highest wieght)
with the fewest resources
3. remove color.chosen_node from all other colors
*/
GListPtr nodes = NULL;
node_t *chosen = NULL;
int lpc = 0;
int multiple = 0;
int length = 0;
gboolean result = FALSE;
process_utilization(rsc, &prefer, data_set);
length = g_hash_table_size(rsc->allowed_nodes);
if (is_not_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to ? TRUE : FALSE;
}
if(rsc->allowed_nodes) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = g_list_sort_with_data(nodes, sort_node_weight, g_list_nth_data(rsc->running_on, 0));
}
if (prefer) {
node_t *best = g_list_nth_data(nodes, 0);
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen && chosen->weight >= 0
&& chosen->weight >= best->weight /* Possible alternative: (chosen->weight >= INFINITY || best->weight < INFINITY) */
&& can_run_resources(chosen)) {
pe_rsc_trace(rsc,
"Using preferred node %s for %s instead of choosing from %d candidates",
chosen->details->uname, rsc->id, length);
} else if (chosen && chosen->weight < 0) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname,
rsc->id);
chosen = NULL;
} else if (chosen && can_run_resources(chosen)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname,
rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname,
rsc->id);
}
}
if (chosen == NULL && rsc->allowed_nodes) {
chosen = g_list_nth_data(nodes, 0);
pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
chosen ? chosen->details->uname : "<none>", rsc->id, length);
if (chosen && chosen->weight > 0 && can_run_resources(chosen)) {
node_t *running = g_list_nth_data(rsc->running_on, 0);
if (running && can_run_resources(running) == FALSE) {
pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
rsc->id, running->details->uname);
running = NULL;
}
for (lpc = 1; lpc < length && running; lpc++) {
node_t *tmp = g_list_nth_data(nodes, lpc);
if (tmp->weight == chosen->weight) {
multiple++;
if (tmp->details == running->details) {
/* prefer the existing node if scores are equal */
chosen = tmp;
}
}
}
}
}
if (multiple > 1) {
int log_level = LOG_INFO;
static char score[33];
score2char_stack(chosen->weight, score, sizeof(score));
if (chosen->weight >= INFINITY) {
log_level = LOG_WARNING;
}
do_crm_log(log_level, "%d nodes with equal score (%s) for"
" running %s resources. Chose %s.",
multiple, score, rsc->id, chosen->details->uname);
}
result = native_assign_node(rsc, nodes, chosen, FALSE);
g_list_free(nodes);
return result;
}
static int
node_list_attr_score(GHashTable * list, const char *attr, const char *value)
{
GHashTableIter iter;
node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
if (attr == NULL) {
attr = "#" XML_ATTR_UNAME;
}
g_hash_table_iter_init(&iter, list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
int weight = node->weight;
if (can_run_resources(node) == FALSE) {
weight = -INFINITY;
}
if (weight > best_score || best_node == NULL) {
const char *tmp = g_hash_table_lookup(node->details->attrs, attr);
if (safe_str_eq(value, tmp)) {
best_score = weight;
best_node = node->details->uname;
}
}
}
if (safe_str_neq(attr, "#" XML_ATTR_UNAME)) {
crm_info("Best score for %s=%s was %s with %d",
attr, value, best_node ? best_node : "<none>", best_score);
}
return best_score;
}
static void
node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor,
gboolean only_positive)
{
int score = 0;
int new_score = 0;
GHashTableIter iter;
node_t *node = NULL;
if (attr == NULL) {
attr = "#" XML_ATTR_UNAME;
}
g_hash_table_iter_init(&iter, list1);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
CRM_LOG_ASSERT(node != NULL);
if(node == NULL) { continue; };
score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr));
new_score = merge_weights(factor * score, node->weight);
if (factor < 0 && score < 0) {
/* Negative preference for a node with a negative score
* should not become a positive preference
*
* TODO - Decide if we want to filter only if weight == -INFINITY
*
*/
crm_trace("%s: Filtering %d + %f*%d (factor * score)",
node->details->uname, node->weight, factor, score);
} else if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f*%d (node < 0)",
node->details->uname, node->weight, factor, score);
} else if (only_positive && new_score < 0 && node->weight > 0) {
node->weight = INFINITY_HACK;
crm_trace("%s: Filtering %d + %f*%d (score > 0)",
node->details->uname, node->weight, factor, score);
} else if (only_positive && new_score < 0 && node->weight == 0) {
crm_trace("%s: Filtering %d + %f*%d (score == 0)",
node->details->uname, node->weight, factor, score);
} else {
crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score);
node->weight = new_score;
}
}
}
GHashTable *
node_hash_dup(GHashTable * hash)
{
/* Hack! */
GListPtr list = g_hash_table_get_values(hash);
GHashTable *result = node_hash_from_list(list);
g_list_free(list);
return result;
}
GHashTable *
native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
float factor, enum pe_weights flags)
{
return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
}
GHashTable *
rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
float factor, enum pe_weights flags)
{
GHashTable *work = NULL;
int multiplier = 1;
if (factor < 0) {
multiplier = -1;
}
if (is_set(rsc->flags, pe_rsc_merging)) {
pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
return nodes;
}
set_bit(rsc->flags, pe_rsc_merging);
if (is_set(flags, pe_weights_init)) {
if (rsc->variant == pe_group && rsc->children) {
GListPtr last = rsc->children;
while (last->next != NULL) {
last = last->next;
}
pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last);
work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags);
} else {
work = node_hash_dup(rsc->allowed_nodes);
}
clear_bit(flags, pe_weights_init);
} else if (rsc->variant == pe_group && rsc->children) {
GListPtr iter = rsc->children;
pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id);
work = node_hash_dup(nodes);
for(iter = rsc->children; iter->next != NULL; iter = iter->next) {
work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags);
}
} else {
pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id);
work = node_hash_dup(nodes);
node_hash_update(work, rsc->allowed_nodes, attr, factor,
is_set(flags, pe_weights_positive));
}
if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id);
g_hash_table_destroy(work);
clear_bit(rsc->flags, pe_rsc_merging);
return nodes;
}
if (can_run_any(work)) {
GListPtr gIter = NULL;
if (is_set(flags, pe_weights_forward)) {
gIter = rsc->rsc_cons;
crm_trace("Checking %d additional colocation constraints", g_list_length(gIter));
} else if(rsc->variant == pe_group && rsc->children) {
GListPtr last = rsc->children;
while (last->next != NULL) {
last = last->next;
}
gIter = ((resource_t*)last->data)->rsc_cons_lhs;
crm_trace("Checking %d additional optional group colocation constraints from %s",
g_list_length(gIter), ((resource_t*)last->data)->id);
} else {
gIter = rsc->rsc_cons_lhs;
crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id);
}
for (; gIter != NULL; gIter = gIter->next) {
resource_t *other = NULL;
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
if (is_set(flags, pe_weights_forward)) {
other = constraint->rsc_rh;
} else {
other = constraint->rsc_lh;
}
pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id);
work = rsc_merge_weights(other, rhs, work, constraint->node_attribute,
multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback);
dump_node_scores(LOG_TRACE, NULL, rhs, work);
}
}
if (is_set(flags, pe_weights_positive)) {
node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (nodes) {
g_hash_table_destroy(nodes);
}
clear_bit(rsc->flags, pe_rsc_merging);
return work;
}
node_t *
native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int alloc_details = scores_log_level + 1;
if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
/* never allocate children on their own */
pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
rsc->parent->id);
rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
}
if (is_not_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to;
}
if (is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
set_bit(rsc->flags, pe_rsc_allocating);
print_resource(alloc_details, "Allocating: ", rsc, FALSE);
dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes);
for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
GHashTable *archive = NULL;
resource_t *rsc_rh = constraint->rsc_rh;
pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)",
rsc->id, constraint->id, rsc_rh->id,
constraint->score, role2text(constraint->role_lh));
if (constraint->role_lh >= RSC_ROLE_MASTER
|| (constraint->score < 0 && constraint->score > -INFINITY)) {
archive = node_hash_dup(rsc->allowed_nodes);
}
rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint);
if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive) {
g_hash_table_destroy(archive);
}
}
dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes);
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
rsc->allowed_nodes =
constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
constraint->node_attribute,
(float)constraint->score / INFINITY,
pe_weights_rollback);
}
print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE);
if (rsc->next_role == RSC_ROLE_STOPPED) {
pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
/* make sure it doesn't come up again */
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
} else if(rsc->next_role > rsc->role
&& is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_freeze) {
crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
rsc->next_role = rsc->role;
}
dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__,
rsc->allowed_nodes);
if (is_set(data_set->flags, pe_flag_stonith_enabled)
&& is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
clear_bit(rsc->flags, pe_rsc_managed);
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
const char *reason = NULL;
node_t *assign_to = NULL;
rsc->next_role = rsc->role;
if (rsc->running_on == NULL) {
reason = "inactive";
} else if (rsc->role == RSC_ROLE_MASTER) {
assign_to = rsc->running_on->data;
reason = "master";
} else if (is_set(rsc->flags, pe_rsc_failed)) {
assign_to = rsc->running_on->data;
reason = "failed";
} else {
assign_to = rsc->running_on->data;
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
assign_to ? assign_to->details->uname : "'nowhere'", reason);
native_assign_node(rsc, NULL, assign_to, TRUE);
} else if (is_set(data_set->flags, pe_flag_stop_everything)) {
pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
native_assign_node(rsc, NULL, NULL, TRUE);
} else if (is_set(rsc->flags, pe_rsc_provisional)
&& native_choose_node(rsc, prefer, data_set)) {
pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
} else if (rsc->allocated_to == NULL) {
if (is_not_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if (rsc->running_on != NULL) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
} else {
pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
}
clear_bit(rsc->flags, pe_rsc_allocating);
print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE);
if (rsc->is_remote_node) {
node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
CRM_ASSERT(remote_node != NULL);
if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
crm_trace("Setting remote node %s to ONLINE", remote_node->details->id);
remote_node->details->online = TRUE;
/* We shouldn't consider an unseen remote-node unclean if we are going
* to try and connect to it. Otherwise we get an unnecessary fence */
if (remote_node->details->unseen == TRUE) {
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Setting remote node %s to SHUTDOWN. next role = %s, allocated=%s",
remote_node->details->id, role2text(rsc->next_role), rsc->allocated_to ? "true" : "false");
remote_node->details->shutdown = TRUE;
}
}
return rsc->allocated_to;
}
static gboolean
is_op_dup(resource_t * rsc, const char *name, const char *interval)
{
gboolean dup = FALSE;
const char *id = NULL;
const char *value = NULL;
xmlNode *operation = NULL;
CRM_ASSERT(rsc);
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
value = crm_element_value(operation, "name");
if (safe_str_neq(value, name)) {
continue;
}
value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (value == NULL) {
value = "0";
}
if (safe_str_neq(value, interval)) {
continue;
}
if (id == NULL) {
id = ID(operation);
} else {
crm_config_err("Operation %s is a duplicate of %s", ID(operation), id);
crm_config_err
("Do not use the same (name, interval) combination more than once per resource");
dup = TRUE;
}
}
}
return dup;
}
void
RecurringOp(resource_t * rsc, action_t * start, node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
const char *node_uname = NULL;
unsigned long long interval_ms = 0;
action_t *mon = NULL;
gboolean is_optional = TRUE;
GListPtr possible_matches = NULL;
/* Only process for the operations without role="Stopped" */
value = crm_element_value(operation, "role");
if (value && text2role(value) == RSC_ROLE_STOPPED) {
return;
}
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
ID(operation), rsc->id, role2text(rsc->next_role),
node ? node->details->uname : "n/a");
if (node != NULL) {
node_uname = node->details->uname;
}
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_get_interval(interval);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval)) {
return;
}
if (safe_str_eq(name, RSC_STOP)
|| safe_str_eq(name, RSC_START)
|| safe_str_eq(name, RSC_DEMOTE)
|| safe_str_eq(name, RSC_PROMOTE)
) {
crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name);
return;
}
key = generate_op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
/* disabled */
free(key);
return;
}
if (start != NULL) {
pe_rsc_trace(rsc, "Marking %s %s due to %s",
key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
start->uuid);
is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
} else {
pe_rsc_trace(rsc, "Marking %s optional", key);
is_optional = TRUE;
}
/* start a monitor for an already active resource */
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches == NULL) {
is_optional = FALSE;
pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
} else {
GListPtr gIter = NULL;
for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
action_t *op = (action_t *) gIter->data;
if (is_set(op->flags, pe_action_reschedule)) {
is_optional = FALSE;
break;
}
}
g_list_free(possible_matches);
}
if ((rsc->next_role == RSC_ROLE_MASTER && value == NULL)
|| (value != NULL && text2role(value) != rsc->next_role)) {
int log_level = LOG_DEBUG_2;
const char *result = "Ignoring";
if (is_optional) {
char *local_key = strdup(key);
log_level = LOG_INFO;
result = "Cancelling";
/* it's running : cancel it */
mon = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set);
free(mon->task);
free(mon->cancel_task);
mon->task = strdup(RSC_CANCEL);
mon->cancel_task = strdup(name);
add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval);
add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name);
local_key = NULL;
switch (rsc->role) {
case RSC_ROLE_SLAVE:
case RSC_ROLE_STARTED:
if (rsc->next_role == RSC_ROLE_MASTER) {
local_key = promote_key(rsc);
} else if (rsc->next_role == RSC_ROLE_STOPPED) {
local_key = stop_key(rsc);
}
break;
case RSC_ROLE_MASTER:
local_key = demote_key(rsc);
break;
default:
break;
}
if (local_key) {
custom_action_order(rsc, NULL, mon, rsc, local_key, NULL,
pe_order_runnable_left, data_set);
}
mon = NULL;
}
do_crm_log(log_level, "%s action %s (%s vs. %s)",
result, key, value ? value : role2text(RSC_ROLE_SLAVE),
role2text(rsc->next_role));
free(key);
return;
}
mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
key = mon->uuid;
if (is_optional) {
pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(node_uname), mon->uuid);
}
if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", crm_str(node_uname),
mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
} else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(node_uname),
mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
} else if (is_set(mon->flags, pe_action_optional) == FALSE) {
pe_rsc_info(rsc, " Start recurring %s (%llus) for %s on %s", mon->task, interval_ms / 1000,
rsc->id, crm_str(node_uname));
}
if (rsc->next_role == RSC_ROLE_MASTER) {
char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
free(running_master);
}
if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, start_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
custom_action_order(rsc, reload_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
if (rsc->next_role == RSC_ROLE_MASTER) {
custom_action_order(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
} else if (rsc->role == RSC_ROLE_MASTER) {
custom_action_order(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
}
}
}
void
Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
RecurringOp(rsc, start, node, operation, data_set);
}
}
}
}
void
RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval = NULL;
const char *node_uname = NULL;
unsigned long long interval_ms = 0;
GListPtr possible_matches = NULL;
GListPtr gIter = NULL;
/* TODO: Support of non-unique clone */
if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
return;
}
/* Only process for the operations with role="Stopped" */
role = crm_element_value(operation, "role");
if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
return;
}
pe_rsc_trace(rsc,
"Creating recurring actions %s for %s in role %s on nodes where it'll not be running",
ID(operation), rsc->id, role2text(rsc->next_role));
if (node != NULL) {
node_uname = node->details->uname;
}
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_get_interval(interval);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval)) {
return;
}
if (safe_str_eq(name, RSC_STOP)
|| safe_str_eq(name, RSC_START)
|| safe_str_eq(name, RSC_DEMOTE)
|| safe_str_eq(name, RSC_PROMOTE)
) {
crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name);
return;
}
key = generate_op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
/* disabled */
free(key);
return;
}
/* if the monitor exists on the node where the resource will be running, cancel it */
if (node != NULL) {
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches) {
action_t *cancel_op = NULL;
char *local_key = strdup(key);
g_list_free(possible_matches);
cancel_op = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set);
free(cancel_op->task);
free(cancel_op->cancel_task);
cancel_op->task = strdup(RSC_CANCEL);
cancel_op->cancel_task = strdup(name);
add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL, interval);
add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, name);
local_key = NULL;
if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
/* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
/* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
pe_order_runnable_left, data_set);
}
pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
key, role, role2text(rsc->next_role), crm_str(node_uname));
}
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *stop_node = (node_t *) gIter->data;
const char *stop_node_uname = stop_node->details->uname;
gboolean is_optional = TRUE;
gboolean probe_is_optional = TRUE;
gboolean stop_is_optional = TRUE;
action_t *stopped_mon = NULL;
char *rc_inactive = NULL;
GListPtr probe_complete_ops = NULL;
GListPtr stop_ops = NULL;
GListPtr local_gIter = NULL;
char *stop_op_key = NULL;
if (node_uname && safe_str_eq(stop_node_uname, node_uname)) {
continue;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
ID(operation), rsc->id, crm_str(stop_node_uname));
/* start a monitor for an already stopped resource */
possible_matches = find_actions_exact(rsc->actions, key, stop_node);
if (possible_matches == NULL) {
pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
crm_str(stop_node_uname));
is_optional = FALSE;
} else {
pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
crm_str(stop_node_uname));
is_optional = TRUE;
g_list_free(possible_matches);
}
stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
free(rc_inactive);
if (is_set(rsc->flags, pe_rsc_managed)) {
char *probe_key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0);
GListPtr probes = find_actions(rsc->actions, probe_key, stop_node);
GListPtr pIter = NULL;
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
action_t *probe = (action_t *) pIter->data;
order_actions(probe, stopped_mon, pe_order_runnable_left);
crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
}
g_list_free(probes);
free(probe_key);
}
if (probe_complete_ops) {
g_list_free(probe_complete_ops);
}
stop_op_key = stop_key(rsc);
stop_ops = find_actions_exact(rsc->actions, stop_op_key, stop_node);
for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
action_t *stop = (action_t *) local_gIter->data;
if (is_set(stop->flags, pe_action_optional) == FALSE) {
stop_is_optional = FALSE;
}
if (is_set(stop->flags, pe_action_runnable) == FALSE) {
crm_debug("%s\t %s (cancelled : stop un-runnable)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
if (is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, strdup(stop_op_key), stop,
NULL, strdup(key), stopped_mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
}
}
if (stop_ops) {
g_list_free(stop_ops);
}
free(stop_op_key);
if (is_optional == FALSE && probe_is_optional && stop_is_optional
&& is_set(rsc->flags, pe_rsc_managed) == FALSE) {
pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
key, crm_str(stop_node_uname));
update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
}
if (is_set(stopped_mon->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
}
if (stop_node->details->online == FALSE || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
if (is_set(stopped_mon->flags, pe_action_runnable)
&& is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
crm_notice(" Start recurring %s (%llus) for %s on %s", stopped_mon->task,
interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
}
}
free(key);
}
void
Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
RecurringOp_Stopped(rsc, start, node, operation, data_set);
}
}
}
}
static void
handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set)
{
action_t *migrate_to = NULL;
action_t *migrate_from = NULL;
action_t *start = NULL;
action_t *stop = NULL;
gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
start = start_action(rsc, chosen, TRUE);
stop = stop_action(rsc, current, TRUE);
if (partial == FALSE) {
migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set);
}
migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
if ((migrate_to && migrate_from) || (migrate_from && partial)) {
set_bit(start->flags, pe_action_migrate_runnable);
set_bit(stop->flags, pe_action_migrate_runnable);
update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
/* order probes before migrations */
if (partial) {
set_bit(migrate_from->flags, pe_action_migrate_runnable);
migrate_from->needs = start->needs;
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set);
} else {
set_bit(migrate_from->flags, pe_action_migrate_runnable);
set_bit(migrate_to->flags, pe_action_migrate_runnable);
migrate_to->needs = start->needs;
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set);
}
custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional | pe_order_implies_first_migratable | pe_order_pseudo_left, data_set);
}
if (migrate_to) {
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
/* pcmk remote connections don't require pending to be recorded in cib.
* We can optimize cib writes by only setting PENDING for non pcmk remote
* connection resources */
if (rsc->is_remote_node == FALSE) {
/* migrate_to takes place on the source node, but can
* have an effect on the target node depending on how
* the agent is written. Because of this, we have to maintain
* a record that the migrate_to occurred incase the source node
* loses membership while the migrate_to action is still in-flight. */
add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
}
}
if (migrate_from) {
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
}
}
void
native_create_actions(resource_t * rsc, pe_working_set_t * data_set)
{
action_t *start = NULL;
node_t *chosen = NULL;
node_t *current = NULL;
gboolean need_stop = FALSE;
gboolean is_moving = FALSE;
gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
GListPtr gIter = NULL;
int num_active_nodes = 0;
enum rsc_role_e role = RSC_ROLE_UNKNOWN;
enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
CRM_ASSERT(rsc);
chosen = rsc->allocated_to;
if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
rsc->next_role = RSC_ROLE_STARTED;
pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
} else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
rsc->next_role = RSC_ROLE_STOPPED;
pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
}
pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
role2text(rsc->role), role2text(rsc->next_role));
if (rsc->running_on) {
current = rsc->running_on->data;
}
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *n = (node_t *) gIter->data;
if (rsc->partial_migration_source &&
(n->details == rsc->partial_migration_source->details)) {
current = rsc->partial_migration_source;
}
num_active_nodes++;
}
for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
action_t *stop = stop_action(rsc, current, FALSE);
set_bit(stop->flags, pe_action_dangle);
pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s", rsc->id, current->details->uname);
if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, current, FALSE, data_set);
}
}
if (num_active_nodes > 1) {
if (num_active_nodes == 2
&& chosen
&& rsc->partial_migration_target
&& rsc->partial_migration_source
&& (current->details == rsc->partial_migration_source->details)
&& (chosen->details == rsc->partial_migration_target->details)) {
/* Here the chosen node is still the migration target from a partial
* migration. Attempt to continue the migration instead of recovering
* by stopping the resource everywhere and starting it on a single node. */
pe_rsc_trace(rsc,
"Will attempt to continue with a partial migration to target %s from %s",
rsc->partial_migration_target->details->id,
rsc->partial_migration_source->details->id);
} else {
const char *type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if(rsc->partial_migration_target && rsc->partial_migration_source) {
crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too", rsc->id,
rsc->partial_migration_target->details->uname,
rsc->partial_migration_source->details->uname);
} else {
pe_proc_err("Resource %s (%s::%s) is active on %d nodes %s",
rsc->id, class, type, num_active_nodes, recovery2text(rsc->recovery_type));
crm_warn("See %s for more information.",
"http://clusterlabs.org/wiki/FAQ#Resource_is_Too_Active");
}
if (rsc->recovery_type == recovery_stop_start) {
need_stop = TRUE;
}
/* If by chance a partial migration is in process,
* but the migration target is not chosen still, clear all
* partial migration data. */
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = FALSE;
}
}
if (is_set(rsc->flags, pe_rsc_start_pending)) {
start = start_action(rsc, chosen, TRUE);
set_bit(start->flags, pe_action_print_always);
}
if (current && chosen && current->details != chosen->details) {
pe_rsc_trace(rsc, "Moving %s", rsc->id);
is_moving = TRUE;
need_stop = TRUE;
} else if (is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
need_stop = TRUE;
} else if (is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Block %s", rsc->id);
need_stop = TRUE;
} else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
/* Recovery of a promoted resource */
start = start_action(rsc, chosen, TRUE);
if (is_set(start->flags, pe_action_optional) == FALSE) {
pe_rsc_trace(rsc, "Forced start %s", rsc->id);
need_stop = TRUE;
}
}
pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
role2text(rsc->role), role2text(rsc->next_role));
/* Create any additional actions required when bringing resource down and
* back up to same level.
*/
role = rsc->role;
while (role != RSC_ROLE_STOPPED) {
next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
rsc->id, need_stop ? " required" : "");
if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
next_role = rsc_state_matrix[role][rsc->role];
pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
rsc->id, need_stop ? " required" : "");
if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
role = rsc->role;
/* Required steps from this role to the next */
while (role != rsc->next_role) {
next_role = rsc_state_matrix[role][rsc->next_role];
pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
break;
}
role = next_role;
}
if(is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
} else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
pe_rsc_trace(rsc, "Monitor ops for active resource");
start = start_action(rsc, chosen, TRUE);
Recurring(rsc, start, chosen, data_set);
Recurring_Stopped(rsc, start, chosen, data_set);
} else {
pe_rsc_trace(rsc, "Monitor ops for in-active resource");
Recurring_Stopped(rsc, NULL, NULL, data_set);
}
/* if we are stuck in a partial migration, where the target
* of the partial migration no longer matches the chosen target.
* A full stop/start is required */
if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
allow_migrate = FALSE;
} else if (is_moving == FALSE ||
is_not_set(rsc->flags, pe_rsc_managed) ||
is_set(rsc->flags, pe_rsc_failed) ||
is_set(rsc->flags, pe_rsc_start_pending) ||
(current->details->unclean == TRUE) ||
rsc->next_role < RSC_ROLE_STARTED) {
allow_migrate = FALSE;
}
if (allow_migrate) {
handle_migration_actions(rsc, current, chosen, data_set);
}
}
static void
rsc_avoids_remote_nodes(resource_t *rsc)
{
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->details->remote_rsc) {
node->weight = -INFINITY;
}
}
}
void
native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set)
{
/* This function is on the critical path and worth optimizing as much as possible */
resource_t *top = uber_parent(rsc);
int type = pe_order_optional | pe_order_implies_then | pe_order_restart;
gboolean is_stonith = is_set(rsc->flags, pe_rsc_fence_device);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, type, data_set);
if (top->variant == pe_master || rsc->role > RSC_ROLE_SLAVE) {
custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_implies_first_master, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL,
pe_order_runnable_left, data_set);
}
if (is_stonith == FALSE
&& is_set(data_set->flags, pe_flag_enable_unfencing)
&& is_set(rsc->flags, pe_rsc_needs_unfencing)) {
/* Check if the node needs to be unfenced first */
node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
action_t *unfence = pe_fence_op(node, "on", TRUE, data_set);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
/*
* It would be more efficient to order clone resources once,
* rather than order each instance, but ordering the instance
* allows us to avoid unnecessary dependencies that might conflict
* with user constraints.
*
* @TODO: This constraint can still produce a transition loop if the
* resource has a stop scheduled on the node being unfenced, and
* there is a user ordering constraint to start some other resource
* (which will be ordered after the unfence) before stopping this
* resource. An example is "start some slow-starting cloned service
* before stopping an associated virtual IP that may be moving to
* it":
* stop this -> unfencing -> start that -> stop this
*/
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
pe_order_optional|pe_order_same_node, data_set);
custom_action_order(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
pe_order_implies_then_on_node|pe_order_same_node,
data_set);
}
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
return;
}
{
action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, strdup(all_stopped->task), all_stopped,
pe_order_implies_then | pe_order_runnable_left, data_set);
}
if (g_hash_table_size(rsc->utilization) > 0
&& safe_str_neq(data_set->placement_strategy, "default")) {
GHashTableIter iter;
node_t *next = NULL;
GListPtr gIter = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, data_set->placement_strategy);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_');
action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = node_copy(current);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
}
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
}
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&next)) {
char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_');
action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = node_copy(next);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
}
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, start_key(rsc), NULL, pe_order_load, data_set);
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
pe_order_load, data_set);
free(load_stopped_task);
}
}
if (rsc->container) {
resource_t *remote_rsc = NULL;
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else if (rsc->is_remote_node == FALSE) {
remote_rsc = rsc_contains_remote_node(data_set, rsc->container);
}
if (remote_rsc) {
/* The container represents a Pacemaker Remote node, so force the
* resource on the Pacemaker Remote node instead of colocating the
* resource with the container resource.
*/
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
custom_action_order(rsc->container, generate_op_key(rsc->container->id, RSC_START, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
pe_order_implies_then | pe_order_runnable_left, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL,
pe_order_implies_first, data_set);
if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
rsc_colocation_new("resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL, data_set);
}
}
if (rsc->is_remote_node || is_stonith) {
/* don't allow remote nodes to run stonith devices
* or remote connection resources.*/
rsc_avoids_remote_nodes(rsc);
}
/* If this is a guest node's implicit remote connection, do not allow the
* guest resource to live on a Pacemaker Remote node, to avoid nesting
* remotes. However, allow bundles to run on remote nodes.
*/
if (rsc->is_remote_node && rsc->container
&& is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
}
void
native_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", constraint->id);
return;
} else if (constraint->rsc_rh == NULL) {
pe_err("rsc_rh was NULL for %s", constraint->id);
return;
}
pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
rsc_rh->id);
rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint);
}
enum filter_colocation_res
filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh,
rsc_colocation_t * constraint, gboolean preview)
{
if (constraint->score == 0) {
return influence_nothing;
}
/* rh side must be allocated before we can process constraint */
if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
return influence_nothing;
}
if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
rsc_lh->parent &&
rsc_lh->parent->variant == pe_master && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
/* LH and RH resources have already been allocated, place the correct
* priority oh LH rsc for the given multistate resource role */
return influence_rsc_priority;
}
if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
/* error check */
struct node_shared_s *details_lh;
struct node_shared_s *details_rh;
if ((constraint->score > -INFINITY) && (constraint->score < INFINITY)) {
return influence_nothing;
}
details_rh = rsc_rh->allocated_to ? rsc_rh->allocated_to->details : NULL;
details_lh = rsc_lh->allocated_to ? rsc_lh->allocated_to->details : NULL;
if (constraint->score == INFINITY && details_lh != details_rh) {
crm_err("%s and %s are both allocated"
" but to different nodes: %s vs. %s",
rsc_lh->id, rsc_rh->id,
details_lh ? details_lh->uname : "n/a", details_rh ? details_rh->uname : "n/a");
} else if (constraint->score == -INFINITY && details_lh == details_rh) {
crm_err("%s and %s are both allocated"
" but to the SAME node: %s",
rsc_lh->id, rsc_rh->id, details_rh ? details_rh->uname : "n/a");
}
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
role2text(constraint->role_lh), role2text(rsc_lh->next_role));
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
return FALSE;
}
if (constraint->score < 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
crm_trace("LH: Skipping -ve constraint: \"%s\" state filter",
role2text(constraint->role_lh));
return influence_nothing;
}
if (constraint->score < 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
crm_trace("RH: Skipping -ve constraint: \"%s\" state filter",
role2text(constraint->role_rh));
return influence_nothing;
}
return influence_rsc_location;
}
static void
influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
const char *rh_value = NULL;
const char *lh_value = NULL;
const char *attribute = "#id";
int score_multiplier = 1;
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
return;
}
lh_value = g_hash_table_lookup(rsc_lh->allocated_to->details->attrs, attribute);
rh_value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute);
if (!safe_str_eq(lh_value, rh_value)) {
if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
rsc_lh->priority = -INFINITY;
}
return;
}
if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
return;
}
if (constraint->role_lh == RSC_ROLE_SLAVE) {
score_multiplier = -1;
}
rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority);
}
static void
colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
const char *tmp = NULL;
const char *value = NULL;
const char *attribute = "#id";
GHashTable *work = NULL;
gboolean do_check = FALSE;
GHashTableIter iter;
node_t *node = NULL;
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
if (rsc_rh->allocated_to) {
value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute);
do_check = TRUE;
} else if (constraint->score < 0) {
/* nothing to do:
* anti-colocation with something that is not running
*/
return;
}
work = node_hash_dup(rsc_lh->allowed_nodes);
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
tmp = g_hash_table_lookup(node->details->attrs, attribute);
if (do_check && safe_str_eq(tmp, value)) {
if (constraint->score < INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id,
node->details->uname, constraint->score);
node->weight = merge_weights(constraint->score, node->weight);
}
} else if (do_check == FALSE || constraint->score >= INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id,
node->details->uname, constraint->score,
do_check ? "failed" : "unallocated");
node->weight = merge_weights(-constraint->score, node->weight);
}
}
if (can_run_any(work)
|| constraint->score <= -INFINITY || constraint->score >= INFINITY) {
g_hash_table_destroy(rsc_lh->allowed_nodes);
rsc_lh->allowed_nodes = work;
work = NULL;
} else {
static char score[33];
score2char_stack(constraint->score, score, sizeof(score));
pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)",
rsc_lh->id, rsc_rh->id, do_check, score);
}
if (work) {
g_hash_table_destroy(work);
}
}
void
native_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
enum filter_colocation_res filter_results;
CRM_ASSERT(rsc_lh);
CRM_ASSERT(rsc_rh);
filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)",
constraint->score >= 0 ? "" : "Anti-",
rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
switch (filter_results) {
case influence_rsc_priority:
influence_priority(rsc_lh, rsc_rh, constraint);
break;
case influence_rsc_location:
pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)",
constraint->score >= 0 ? "" : "Anti-",
rsc_lh->id, rsc_rh->id, constraint->id, constraint->score);
colocation_match(rsc_lh, rsc_rh, constraint);
break;
case influence_nothing:
default:
return;
}
}
static gboolean
filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
{
if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
role2text(rsc_ticket->role_lh));
return FALSE;
}
return TRUE;
}
void
rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set)
{
if (rsc_ticket == NULL) {
pe_err("rsc_ticket was NULL");
return;
}
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
return;
}
if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
return;
}
if (rsc_lh->children) {
GListPtr gIter = rsc_lh->children;
pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
}
return;
}
pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
role2text(rsc_ticket->role_lh));
if (rsc_ticket->ticket->granted == FALSE && g_list_length(rsc_lh->running_on) > 0) {
GListPtr gIter = NULL;
switch (rsc_ticket->loss_policy) {
case loss_ticket_stop:
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
break;
case loss_ticket_demote:
/*Promotion score will be set to -INFINITY in master_promotion_order() */
if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
}
break;
case loss_ticket_fence:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
- pe_fence_node(data_set, node, "because deadman ticket was lost");
+ pe_fence_node(data_set, node, "deadman ticket was lost");
}
break;
case loss_ticket_freeze:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
if (g_list_length(rsc_lh->running_on) > 0) {
clear_bit(rsc_lh->flags, pe_rsc_managed);
set_bit(rsc_lh->flags, pe_rsc_block);
}
break;
}
} else if (rsc_ticket->ticket->granted == FALSE) {
if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
}
} else if (rsc_ticket->ticket->standby) {
if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
}
}
}
enum pe_action_flags
native_action_flags(action_t * action, node_t * node)
{
return action->flags;
}
enum pe_graph_flags
native_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type)
{
/* flags == get_action_flags(first, then_node) called from update_action() */
enum pe_graph_flags changed = pe_graph_none;
enum pe_action_flags then_flags = then->flags;
enum pe_action_flags first_flags = first->flags;
crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, then->uuid, then->flags);
if (type & pe_order_asymmetrical) {
resource_t *then_rsc = then->rsc;
enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
if (!then_rsc) {
/* ignore */
} else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
/* ignore... if 'then' is supposed to be stopped after 'first', but
* then is already stopped, there is nothing to be done when non-symmetrical. */
} else if ((then_rsc_role >= RSC_ROLE_STARTED)
&& safe_str_eq(then->task, RSC_START)
&& then->node
&& then_rsc->running_on
&& g_list_length(then_rsc->running_on) == 1
&& then->node->details == ((node_t *) then_rsc->running_on->data)->details) {
/* ignore... if 'then' is supposed to be started after 'first', but
* then is already started, there is nothing to be done when non-symmetrical. */
} else if (!(first->flags & pe_action_runnable)) {
/* prevent 'then' action from happening if 'first' is not runnable and
* 'then' has not yet occurred. */
pe_clear_action_bit(then, pe_action_runnable);
pe_clear_action_bit(then, pe_action_optional);
pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
} else {
/* ignore... then is allowed to start/stop if it wants to. */
}
}
if (type & pe_order_implies_first) {
if ((filter & pe_action_optional) && (flags & pe_action_optional) == 0) {
pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
pe_clear_action_bit(first, pe_action_optional);
}
if (is_set(flags, pe_action_migrate_runnable) &&
is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
is_set(then->flags, pe_action_optional) == FALSE) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
first->uuid, then->uuid);
pe_clear_action_bit(first, pe_action_migrate_runnable);
}
}
if (type & pe_order_implies_first_master) {
if ((filter & pe_action_optional) &&
((then->flags & pe_action_optional) == FALSE) &&
then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
pe_clear_action_bit(first, pe_action_optional);
if (is_set(first->flags, pe_action_migrate_runnable) &&
is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
pe_clear_action_bit(first, pe_action_migrate_runnable);
}
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
}
}
if ((type & pe_order_implies_first_migratable)
&& is_set(filter, pe_action_optional)) {
if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
((then->flags & pe_action_runnable) == FALSE)) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
pe_clear_action_bit(first, pe_action_runnable);
}
if ((then->flags & pe_action_optional) == 0) {
pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
pe_clear_action_bit(first, pe_action_optional);
}
}
if ((type & pe_order_pseudo_left)
&& is_set(filter, pe_action_optional)) {
if ((first->flags & pe_action_runnable) == FALSE) {
pe_clear_action_bit(then, pe_action_migrate_runnable);
pe_clear_action_bit(then, pe_action_pseudo);
pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
}
}
if (is_set(type, pe_order_runnable_left)
&& is_set(filter, pe_action_runnable)
&& is_set(then->flags, pe_action_runnable)
&& is_set(flags, pe_action_runnable) == FALSE) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
pe_clear_action_bit(then, pe_action_runnable);
pe_clear_action_bit(then, pe_action_migrate_runnable);
}
if (is_set(type, pe_order_implies_then)
&& is_set(filter, pe_action_optional)
&& is_set(then->flags, pe_action_optional)
&& is_set(flags, pe_action_optional) == FALSE) {
/* in this case, treat migrate_runnable as if first is optional */
if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
pe_clear_action_bit(then, pe_action_optional);
}
}
if (is_set(type, pe_order_restart)) {
const char *reason = NULL;
CRM_ASSERT(first->rsc && first->rsc->variant == pe_native);
CRM_ASSERT(then->rsc && then->rsc->variant == pe_native);
if ((filter & pe_action_runnable)
&& (then->flags & pe_action_runnable) == 0
&& (then->rsc->flags & pe_rsc_managed)) {
reason = "shutdown";
}
if ((filter & pe_action_optional) && (then->flags & pe_action_optional) == 0) {
reason = "recover";
}
if (reason && is_set(first->flags, pe_action_optional)) {
if (is_set(first->flags, pe_action_runnable)
|| is_not_set(then->flags, pe_action_optional)) {
pe_rsc_trace(first->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid);
pe_clear_action_bit(first, pe_action_optional);
}
}
if (reason && is_not_set(first->flags, pe_action_optional)
&& is_not_set(first->flags, pe_action_runnable)) {
pe_rsc_trace(then->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid);
pe_clear_action_bit(then, pe_action_runnable);
}
if (reason &&
is_not_set(first->flags, pe_action_optional) &&
is_set(first->flags, pe_action_migrate_runnable) &&
is_not_set(then->flags, pe_action_migrate_runnable)) {
pe_clear_action_bit(first, pe_action_migrate_runnable);
}
}
if (then_flags != then->flags) {
changed |= pe_graph_updated_then;
pe_rsc_trace(then->rsc,
"Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
then_flags, first->uuid, first->flags);
if(then->rsc && then->rsc->parent) {
/* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
update_action(then);
}
}
if (first_flags != first->flags) {
changed |= pe_graph_updated_first;
pe_rsc_trace(first->rsc,
"First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
void
native_rsc_location(resource_t * rsc, rsc_to_node_t * constraint)
{
GListPtr gIter = NULL;
GHashTableIter iter;
node_t *node = NULL;
if (constraint == NULL) {
pe_err("Constraint is NULL");
return;
} else if (rsc == NULL) {
pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
return;
}
pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
role2text(constraint->role_filter), rsc->id);
/* take "lifetime" into account */
if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
return;
} else if (is_active(constraint) == FALSE) {
pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id);
return;
}
if (constraint->node_list_rh == NULL) {
pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
return;
}
for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
node_t *other_node = NULL;
other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (other_node != NULL) {
pe_rsc_trace(rsc, "%s + %s: %d + %d",
node->details->uname,
other_node->details->uname, node->weight, other_node->weight);
other_node->weight = merge_weights(other_node->weight, node->weight);
} else {
other_node = node_copy(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
}
if (other_node->rsc_discover_mode < constraint->discover_mode) {
if (constraint->discover_mode == discover_exclusive) {
rsc->exclusive_discover = TRUE;
}
/* exclusive > never > always... always is default */
other_node->rsc_discover_mode = constraint->discover_mode;
}
}
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
}
}
void
native_expand(resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
graph_element_from_action(action, data_set);
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
}
#define log_change(fmt, args...) do { \
if(terminal) { \
printf(" * "fmt"\n", ##args); \
} else { \
crm_notice(fmt, ##args); \
} \
} while(0)
#define STOP_SANITY_ASSERT(lineno) do { \
if(current && current->details->unclean) { \
/* It will be a pseudo op */ \
} else if(stop == NULL) { \
crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
CRM_ASSERT(stop != NULL); \
} else if(is_set(stop->flags, pe_action_optional)) { \
crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
} \
} while(0)
void
LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
{
node_t *next = NULL;
node_t *current = NULL;
action_t *stop = NULL;
action_t *start = NULL;
action_t *demote = NULL;
action_t *promote = NULL;
char *key = NULL;
gboolean moving = FALSE;
GListPtr possible_matches = NULL;
if(rsc->variant == pe_container) {
container_LogActions(rsc, data_set, terminal);
return;
}
if (rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
LogActions(child_rsc, data_set, terminal);
}
return;
}
next = rsc->allocated_to;
if (rsc->running_on) {
if (g_list_length(rsc->running_on) > 1 && rsc->partial_migration_source) {
current = rsc->partial_migration_source;
} else {
current = rsc->running_on->data;
}
if (rsc->role == RSC_ROLE_STOPPED) {
/*
* This can occur when resources are being recovered
* We fiddle with the current role in native_create_actions()
*/
rsc->role = RSC_ROLE_STARTED;
}
}
if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't log stopped orphans */
return;
}
if (is_not_set(rsc->flags, pe_rsc_managed)
|| (current == NULL && next == NULL)) {
pe_rsc_info(rsc, "Leave %s\t(%s%s)",
rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
pe_rsc_managed) ? " unmanaged" : "");
return;
}
if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
moving = TRUE;
}
key = start_key(rsc);
possible_matches = find_actions(rsc->actions, key, next);
free(key);
if (possible_matches) {
start = possible_matches->data;
g_list_free(possible_matches);
}
key = stop_key(rsc);
if(start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
possible_matches = find_actions(rsc->actions, key, NULL);
} else {
possible_matches = find_actions(rsc->actions, key, current);
}
free(key);
if (possible_matches) {
stop = possible_matches->data;
g_list_free(possible_matches);
}
key = promote_key(rsc);
possible_matches = find_actions(rsc->actions, key, next);
free(key);
if (possible_matches) {
promote = possible_matches->data;
g_list_free(possible_matches);
}
key = demote_key(rsc);
possible_matches = find_actions(rsc->actions, key, next);
free(key);
if (possible_matches) {
demote = possible_matches->data;
g_list_free(possible_matches);
}
if (rsc->role == rsc->next_role) {
action_t *migrate_to = NULL;
key = generate_op_key(rsc->id, RSC_MIGRATED, 0);
possible_matches = find_actions(rsc->actions, key, next);
free(key);
if (possible_matches) {
migrate_to = possible_matches->data;
}
CRM_CHECK(next != NULL,);
if (next == NULL) {
} else if (migrate_to && is_set(migrate_to->flags, pe_action_runnable) && current) {
log_change("Migrate %s\t(%s %s -> %s)",
rsc->id, role2text(rsc->role), current->details->uname,
next->details->uname);
} else if (is_set(rsc->flags, pe_rsc_reload)) {
log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname);
} else if (start == NULL || is_set(start->flags, pe_action_optional)) {
pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role),
next->details->uname);
} else if (start && is_set(start->flags, pe_action_runnable) == FALSE) {
log_change("Stop %s\t(%s %s%s)", rsc->id, role2text(rsc->role), current?current->details->uname:"N/A",
stop && is_not_set(stop->flags, pe_action_runnable) ? " - blocked" : "");
STOP_SANITY_ASSERT(__LINE__);
} else if (moving && current) {
log_change("%s %s\t(%s %s -> %s)",
is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move ",
rsc->id, role2text(rsc->role),
current->details->uname, next->details->uname);
} else if (is_set(rsc->flags, pe_rsc_failed)) {
log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname);
STOP_SANITY_ASSERT(__LINE__);
} else {
log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname);
/* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
}
g_list_free(possible_matches);
return;
}
if (rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) {
CRM_CHECK(current != NULL,);
if (current != NULL) {
gboolean allowed = FALSE;
if (demote != NULL && (demote->flags & pe_action_runnable)) {
allowed = TRUE;
}
log_change("Demote %s\t(%s -> %s %s%s)",
rsc->id,
role2text(rsc->role),
role2text(rsc->next_role),
current->details->uname, allowed ? "" : " - blocked");
if (stop != NULL && is_not_set(stop->flags, pe_action_optional)
&& rsc->next_role > RSC_ROLE_STOPPED && moving == FALSE) {
if (is_set(rsc->flags, pe_rsc_failed)) {
log_change("Recover %s\t(%s %s)",
rsc->id, role2text(rsc->role), next->details->uname);
STOP_SANITY_ASSERT(__LINE__);
} else if (is_set(rsc->flags, pe_rsc_reload)) {
log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role),
next->details->uname);
} else {
log_change("Restart %s\t(%s %s)",
rsc->id, role2text(rsc->next_role), next->details->uname);
STOP_SANITY_ASSERT(__LINE__);
}
}
}
} else if (rsc->next_role == RSC_ROLE_STOPPED) {
GListPtr gIter = NULL;
CRM_CHECK(current != NULL,);
key = stop_key(rsc);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
action_t *stop_op = NULL;
gboolean allowed = FALSE;
possible_matches = find_actions(rsc->actions, key, node);
if (possible_matches) {
stop_op = possible_matches->data;
g_list_free(possible_matches);
}
if (stop_op && (stop_op->flags & pe_action_runnable)) {
STOP_SANITY_ASSERT(__LINE__);
allowed = TRUE;
}
log_change("Stop %s\t(%s%s)", rsc->id, node->details->uname,
allowed ? "" : " - blocked");
}
free(key);
}
if (moving) {
log_change("Move %s\t(%s %s -> %s)",
rsc->id, role2text(rsc->next_role), current->details->uname,
next->details->uname);
STOP_SANITY_ASSERT(__LINE__);
}
if (rsc->role == RSC_ROLE_STOPPED) {
gboolean allowed = FALSE;
if (start && (start->flags & pe_action_runnable)) {
allowed = TRUE;
}
CRM_CHECK(next != NULL,);
if (next != NULL) {
log_change("Start %s\t(%s%s)", rsc->id, next->details->uname,
allowed ? "" : " - blocked");
}
if (allowed == FALSE) {
return;
}
}
if (rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) {
gboolean allowed = FALSE;
CRM_LOG_ASSERT(next);
if (stop != NULL && is_not_set(stop->flags, pe_action_optional)
&& rsc->role > RSC_ROLE_STOPPED) {
if (is_set(rsc->flags, pe_rsc_failed)) {
log_change("Recover %s\t(%s %s)",
rsc->id, role2text(rsc->role), next?next->details->uname:NULL);
STOP_SANITY_ASSERT(__LINE__);
} else if (is_set(rsc->flags, pe_rsc_reload)) {
log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role),
next?next->details->uname:NULL);
STOP_SANITY_ASSERT(__LINE__);
} else {
log_change("Restart %s\t(%s %s)",
rsc->id, role2text(rsc->role), next?next->details->uname:NULL);
STOP_SANITY_ASSERT(__LINE__);
}
}
if (promote && (promote->flags & pe_action_runnable)) {
allowed = TRUE;
}
log_change("Promote %s\t(%s -> %s %s%s)",
rsc->id,
role2text(rsc->role),
role2text(rsc->next_role),
next?next->details->uname:NULL,
allowed ? "" : " - blocked");
}
}
gboolean
StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
action_t *stop;
if (rsc->partial_migration_target) {
if (rsc->partial_migration_target->details == current->details) {
pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
next->details->uname, rsc->id);
continue;
} else {
pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
optional = FALSE;
}
}
pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
stop = stop_action(rsc, current, optional);
if (is_not_set(rsc->flags, pe_rsc_managed)) {
update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, current, optional, data_set);
}
}
return TRUE;
}
gboolean
StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
action_t *start = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s on %s %d", rsc->id, next ? next->details->uname : "N/A", optional);
start = start_action(rsc, next, TRUE);
if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
}
return TRUE;
}
gboolean
PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
char *key = NULL;
GListPtr gIter = NULL;
gboolean runnable = TRUE;
GListPtr action_list = NULL;
CRM_ASSERT(rsc);
CRM_CHECK(next != NULL, return FALSE);
pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
key = start_key(rsc);
action_list = find_actions_exact(rsc->actions, key, next);
free(key);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *start = (action_t *) gIter->data;
if (is_set(start->flags, pe_action_runnable) == FALSE) {
runnable = FALSE;
}
}
g_list_free(action_list);
if (runnable) {
promote_action(rsc, next, optional);
return TRUE;
}
pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
key = promote_key(rsc);
action_list = find_actions_exact(rsc->actions, key, next);
free(key);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *promote = (action_t *) gIter->data;
update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
g_list_free(action_list);
return TRUE;
}
gboolean
DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
/* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
demote_action(rsc, current, optional);
}
return TRUE;
}
gboolean
RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
CRM_CHECK(FALSE, return FALSE);
return FALSE;
}
gboolean
NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
return FALSE;
}
gboolean
DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set)
{
if (is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
return FALSE;
} else if (node == NULL) {
pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
return FALSE;
} else if (node->details->unclean || node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
node->details->uname);
return FALSE;
}
crm_notice("Removing %s from %s", rsc->id, node->details->uname);
delete_action(rsc, node, optional);
new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
optional ? pe_order_implies_then : pe_order_optional, data_set);
new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
optional ? pe_order_implies_then : pe_order_optional, data_set);
return TRUE;
}
#include <../lib/pengine/unpack.h>
#define set_char(x) last_rsc_id[lpc] = x; complete = TRUE;
static char *
increment_clone(char *last_rsc_id)
{
int lpc = 0;
int len = 0;
char *tmp = NULL;
gboolean complete = FALSE;
CRM_CHECK(last_rsc_id != NULL, return NULL);
if (last_rsc_id != NULL) {
len = strlen(last_rsc_id);
}
lpc = len - 1;
while (complete == FALSE && lpc > 0) {
switch (last_rsc_id[lpc]) {
case 0:
lpc--;
break;
case '0':
set_char('1');
break;
case '1':
set_char('2');
break;
case '2':
set_char('3');
break;
case '3':
set_char('4');
break;
case '4':
set_char('5');
break;
case '5':
set_char('6');
break;
case '6':
set_char('7');
break;
case '7':
set_char('8');
break;
case '8':
set_char('9');
break;
case '9':
last_rsc_id[lpc] = '0';
lpc--;
break;
case ':':
tmp = last_rsc_id;
last_rsc_id = calloc(1, len + 2);
memcpy(last_rsc_id, tmp, len);
last_rsc_id[++lpc] = '1';
last_rsc_id[len] = '0';
last_rsc_id[len + 1] = 0;
complete = TRUE;
free(tmp);
break;
default:
crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc);
return NULL;
break;
}
}
return last_rsc_id;
}
static node_t *
probe_grouped_clone(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
node_t *running = NULL;
resource_t *top = uber_parent(rsc);
if (running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) {
/* Annoyingly we also need to check any other clone instances
* Clumsy, but it will work.
*
* An alternative would be to update known_on for every peer
* during process_rsc_state()
*
* This code desperately needs optimization
* ptest -x with 100 nodes, 100 clones and clone-max=10:
* No probes O(25s)
* Detection without clone loop O(3m)
* Detection with clone loop O(8m)
ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources
ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done
ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states
ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done
*/
char *clone_id = clone_zero(rsc->id);
resource_t *peer = pe_find_resource(top->children, clone_id);
while (peer && running == NULL) {
running = pe_hash_table_lookup(peer->known_on, node->details->id);
if (running != NULL) {
/* we already know the status of the resource on this node */
pe_rsc_trace(rsc, "Skipping active clone: %s", rsc->id);
free(clone_id);
return running;
}
clone_id = increment_clone(clone_id);
peer = pe_find_resource(data_set->resources, clone_id);
}
free(clone_id);
}
return running;
}
gboolean
native_create_probe(resource_t * rsc, node_t * node, action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
enum pe_ordering flags = pe_order_optional;
char *key = NULL;
action_t *probe = NULL;
node_t *running = NULL;
node_t *allowed = NULL;
resource_t *top = uber_parent(rsc);
static const char *rc_master = NULL;
static const char *rc_inactive = NULL;
if (rc_inactive == NULL) {
rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
}
CRM_CHECK(node != NULL, return FALSE);
if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
return FALSE;
} else if (force == FALSE && is_container_remote_node(node)) {
pe_rsc_trace(rsc, "Skipping active resource detection for %s on container %s",
rsc->id, node->details->id);
return FALSE;
}
if (is_remote_node(node)) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) {
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes do not run stonith agents.", rsc->id, node->details->id);
return FALSE;
} else if (rsc_contains_remote_node(data_set, rsc)) {
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes can not run resources that contain connection resources.", rsc->id, node->details->id);
return FALSE;
} else if (rsc->is_remote_node) {
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, remote-nodes can not run connection resources", rsc->id, node->details->id);
return FALSE;
}
}
if (rsc->children) {
GListPtr gIter = NULL;
gboolean any_created = FALSE;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
|| any_created;
}
return any_created;
} else if ((rsc->container) && (!rsc->is_remote_node)) {
pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
return FALSE;
}
if (is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
return FALSE;
}
running = g_hash_table_lookup(rsc->known_on, node->details->id);
if (running == NULL && is_set(rsc->flags, pe_rsc_unique) == FALSE) {
/* Anonymous clones */
if (rsc->parent == top) {
running = g_hash_table_lookup(rsc->parent->known_on, node->details->id);
} else {
/* Grouped anonymous clones need extra special handling */
running = probe_grouped_clone(rsc, node, data_set);
}
}
if (force == FALSE && running != NULL) {
/* we already know the status of the resource on this node */
- pe_rsc_trace(rsc, "Skipping active: %s on %s", rsc->id, node->details->uname);
+ pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
return FALSE;
}
allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (rsc->exclusive_discover || top->exclusive_discover) {
if (allowed == NULL) {
/* exclusive discover is enabled and this node is not in the allowed list. */
return FALSE;
} else if (allowed->rsc_discover_mode != discover_exclusive) {
/* exclusive discover is enabled and this node is not marked
* as a node this resource should be discovered on */
return FALSE;
}
}
if (allowed && allowed->rsc_discover_mode == discover_never) {
/* this resource is marked as not needing to be discovered on this node */
return FALSE;
}
key = generate_op_key(rsc->id, RSC_STATUS, 0);
probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
/* If enabled, require unfencing before probing any fence devices
* but ensure it happens after any resources that require
* unfencing have been probed.
*
* Doing it the other way (requiring unfencing after probing
* resources that need it) would result in the node being
* unfenced, and all its resources being stopped, whenever a new
* resource is added. Which would be highly suboptimal.
*
* So essentially, at the point the fencing device(s) have been
* probed, we know the state of all resources that require
* unfencing and that unfencing occurred.
*/
if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) {
trigger_unfencing(NULL, node, "node discovery", probe, data_set);
probe->priority = INFINITY; /* Ensure this runs if unfencing succeeds */
} else if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
action_t *unfence = pe_fence_op(node, "on", TRUE, data_set);
order_actions(probe, unfence, pe_order_optional);
}
/*
* We need to know if it's running_on (not just known_on) this node
* to correctly determine the target rc.
*/
running = pe_find_node_id(rsc->running_on, node->details->id);
if (running == NULL) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
} else if (rsc->role == RSC_ROLE_MASTER) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
}
crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
is_set(probe->flags, pe_action_runnable), rsc->running_on);
if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) {
top = rsc;
} else if (pe_rsc_is_clone(top) == FALSE) {
top = rsc;
} else {
crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
}
if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
/* Prevent the start from occuring if rsc isn't active, but
* don't cause it to stop if it was active already
*/
flags |= pe_order_runnable_left;
}
custom_action_order(rsc, NULL, probe,
top, generate_op_key(top->id, RSC_START, 0), NULL,
flags, data_set);
/* Before any reloads, if they exist */
custom_action_order(rsc, NULL, probe,
top, reload_key(rsc), NULL,
pe_order_optional, data_set);
if (node->details->shutdown == FALSE) {
custom_action_order(rsc, NULL, probe,
rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_optional, data_set);
}
if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* Normally rsc.start depends on probe complete which depends
* on rsc.probe. But this can't be the case in this scenario as
* it would create graph loops.
*
* So instead we explicitly order 'rsc.probe then rsc.start'
*/
} else {
order_actions(probe, complete, pe_order_implies_then);
}
return TRUE;
}
static void
native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
{
node_t *target;
GListPtr gIter = NULL;
action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set);
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if(action->needs == rsc_req_nothing) {
/* Anything other than start or promote requires nothing */
} else if (action->needs == rsc_req_stonith) {
order_actions(stonith_done, action, pe_order_optional);
} else if (safe_str_eq(action->task, RSC_START)
&& NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) {
/* if known == NULL, then we don't know if
* the resource is active on the node
* we're about to shoot
*
* in this case, regardless of action->needs,
* the only safe option is to wait until
* the node is shot before doing anything
* to with the resource
*
* it's analogous to waiting for all the probes
* for rscX to complete before starting rscX
*
* the most likely explanation is that the
* DC died and took its status with it
*/
pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
target->details->uname);
order_actions(all_stopped, action, pe_order_optional | pe_order_runnable_left);
}
}
}
static void
native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
{
char *key = NULL;
GListPtr gIter = NULL;
GListPtr action_list = NULL;
action_t *start = NULL;
resource_t *top = uber_parent(rsc);
node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Check whether the resource has a pending start action */
start = find_first_action(rsc->actions, NULL, CRMD_ACTION_START, NULL);
/* Get a list of stop actions potentially implied by the fencing */
key = stop_key(rsc);
action_list = find_actions(rsc->actions, key, target);
free(key);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (is_set(rsc->flags, pe_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit after %s is fenced",
rsc->id, target->details->uname);
} else {
crm_info("%s is implicit after %s is fenced",
action->uuid, target->details->uname);
}
/* The stop would never complete and is now implied by the fencing,
* so convert it into a pseudo-action.
*/
update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__);
update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__);
update_action_flags(action, pe_action_implied_by_stonith, __FUNCTION__, __LINE__);
if(start == NULL || start->needs > rsc_req_quorum) {
enum pe_ordering flags = pe_order_optional;
action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
if (target->details->remote_rsc) {
/* User constraints must not order a resource in a guest node
* relative to the guest node container resource. This flag
* marks constraints as generated by the cluster and thus
* immune to that check.
*/
flags |= pe_order_preserve;
}
order_actions(stonith_op, action, flags);
order_actions(stonith_op, parent_stop, flags);
}
if (is_set(rsc->flags, pe_rsc_notify)) {
/* Create a second notification that will be delivered
* immediately after the node is fenced
*
* Basic problem:
* - C is a clone active on the node to be shot and stopping on another
* - R is a resource that depends on C
*
* + C.stop depends on R.stop
* + C.stopped depends on STONITH
* + C.notify depends on C.stopped
* + C.healthy depends on C.notify
* + R.stop depends on C.healthy
*
* The extra notification here changes
* + C.healthy depends on C.notify
* into:
* + C.healthy depends on C.notify'
* + C.notify' depends on STONITH'
* thus breaking the loop
*/
create_secondary_notification(action, rsc, stonith_op, data_set);
}
/* From Bug #1601, successful fencing must be an input to a failed resources stop action.
However given group(rA, rB) running on nodeX and B.stop has failed,
A := stop healthy resource (rA.stop)
B := stop failed resource (pseudo operation B.stop)
C := stonith nodeX
A requires B, B requires C, C requires A
This loop would prevent the cluster from making progress.
This block creates the "C requires A" dependency and therefore must (at least
for now) be disabled.
Instead, run the block above and treat all resources on nodeX as B would be
(marked as a pseudo op depending on the STONITH).
TODO: Break the "A requires B" dependency in update_action() and re-enable this block
} else if(is_stonith == FALSE) {
crm_info("Moving healthy resource %s"
" off %s before fencing",
rsc->id, node->details->uname);
* stop healthy resources before the
* stonith op
*
custom_action_order(
rsc, stop_key(rsc), NULL,
NULL,strdup(CRM_OP_FENCE),stonith_op,
pe_order_optional, data_set);
*/
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
key = demote_key(rsc);
action_list = find_actions(rsc->actions, key, target);
free(key);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
|| is_set(rsc->flags, pe_rsc_failed)) {
if (is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_info(rsc,
"Demote of failed resource %s is implicit after %s is fenced",
rsc->id, target->details->uname);
} else {
pe_rsc_info(rsc, "%s is implicit after %s is fenced",
action->uuid, target->details->uname);
}
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__);
update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__);
if (start == NULL || start->needs > rsc_req_quorum) {
order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
}
}
}
g_list_free(action_list);
}
void
rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
{
if (rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
rsc_stonith_ordering(child_rsc, stonith_op, data_set);
}
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
} else {
native_start_constraints(rsc, stonith_op, data_set);
native_stop_constraints(rsc, stonith_op, data_set);
}
}
enum stack_activity {
stack_stable = 0,
stack_starting = 1,
stack_stopping = 2,
stack_middle = 4,
};
static action_t *
get_first_named_action(resource_t * rsc, const char *action, gboolean only_valid, node_t * current)
{
action_t *a = NULL;
GListPtr action_list = NULL;
char *key = generate_op_key(rsc->id, action, 0);
action_list = find_actions(rsc->actions, key, current);
if (action_list == NULL || action_list->data == NULL) {
crm_trace("%s: no %s action", rsc->id, action);
free(key);
return NULL;
}
a = action_list->data;
g_list_free(action_list);
if (only_valid && is_set(a->flags, pe_action_pseudo)) {
crm_trace("%s: pseudo", key);
a = NULL;
} else if (only_valid && is_not_set(a->flags, pe_action_runnable)) {
crm_trace("%s: runnable", key);
a = NULL;
}
free(key);
return a;
}
void
ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
action_t *other = NULL;
action_t *reload = NULL;
if (rsc->children) {
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
ReloadRsc(child_rsc, node, data_set);
}
return;
} else if (rsc->variant > pe_native) {
/* Complex resource with no children */
return;
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
return;
} else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags);
stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */
return;
} else if (node == NULL) {
pe_rsc_trace(rsc, "%s: not active", rsc->id);
return;
}
pe_rsc_trace(rsc, "Processing %s", rsc->id);
set_bit(rsc->flags, pe_rsc_reload);
reload = custom_action(
rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
/* stop = stop_action(rsc, node, optional); */
other = get_first_named_action(rsc, RSC_STOP, TRUE, node);
if (other != NULL) {
order_actions(reload, other, pe_order_optional);
}
other = get_first_named_action(rsc, RSC_DEMOTE, TRUE, node);
if (other != NULL) {
order_actions(reload, other, pe_order_optional);
}
}
void
native_append_meta(resource_t * rsc, xmlNode * xml)
{
char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
resource_t *iso_parent, *last_parent, *parent;
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container) {
crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
}
}
last_parent = iso_parent = rsc;
while (iso_parent != NULL) {
char *name = NULL;
char *iso = NULL;
if (iso_parent->isolation_wrapper == NULL) {
last_parent = iso_parent;
iso_parent = iso_parent->parent;
continue;
}
/* name of wrapper script this resource is routed through. */
name = crm_meta_name(XML_RSC_ATTR_ISOLATION_WRAPPER);
crm_xml_add(xml, name, iso_parent->isolation_wrapper);
free(name);
/* instance name for isolated environment */
name = crm_meta_name(XML_RSC_ATTR_ISOLATION_INSTANCE);
if (pe_rsc_is_clone(iso_parent)) {
/* if isolation is set at the clone/master level, we have to
* give this resource the unique isolation instance associated
* with the clone child (last_parent)*/
/* Example: cloned group. group is container
* clone myclone - iso_parent
* group mygroup - last_parent (this is the iso environment)
* rsc myrsc1 - rsc
* rsc myrsc2
* The group is what is isolated in example1. We have to make
* sure myrsc1 and myrsc2 launch in the same isolated environment.
*
* Example: cloned primitives. rsc primitive is container
* clone myclone iso_parent
* rsc myrsc1 - last_parent == rsc (this is the iso environment)
* The individual cloned primitive instances are isolated
*/
value = g_hash_table_lookup(last_parent->meta, XML_RSC_ATTR_INCARNATION);
CRM_ASSERT(value != NULL);
iso = crm_concat(crm_element_value(last_parent->xml, XML_ATTR_ID), value, '_');
crm_xml_add(xml, name, iso);
free(iso);
} else {
/*
* Example: cloned group of containers
* clone myclone
* group mygroup
* rsc myrsc1 - iso_parent (this is the iso environment)
* rsc myrsc2
*
* Example: group of containers
* group mygroup
* rsc myrsc1 - iso_parent (this is the iso environment)
* rsc myrsc2
*
* Example: group is container
* group mygroup - iso_parent ( this is iso environment)
* rsc myrsc1
* rsc myrsc2
*
* Example: single primitive
* rsc myrsc1 - iso_parent (this is the iso environment)
*/
value = g_hash_table_lookup(iso_parent->meta, XML_RSC_ATTR_INCARNATION);
if (value) {
crm_xml_add(xml, name, iso_parent->id);
iso = crm_concat(crm_element_value(iso_parent->xml, XML_ATTR_ID), value, '_');
crm_xml_add(xml, name, iso);
free(iso);
} else {
crm_xml_add(xml, name, iso_parent->id);
}
}
free(name);
break;
}
}
diff --git a/pengine/test10/594.summary b/pengine/test10/594.summary
index 6c208ee4ae..d9fe8c163f 100644
--- a/pengine/test10/594.summary
+++ b/pengine/test10/594.summary
@@ -1,54 +1,56 @@
Current cluster status:
Node hadev3 (879e65f8-4b38-4c56-9552-4752ad436669): UNCLEAN (offline)
Online: [ hadev1 hadev2 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1
rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started hadev2
child_DoFencing:1 (stonith:ssh): Started hadev1
child_DoFencing:2 (stonith:ssh): Started hadev1
Transition Summary:
+ * Fence hadev3
+ * Shutdown hadev2
* Move DcIPaddr (Started hadev2 -> hadev1)
* Move rsc_hadev2 (Started hadev2 -> hadev1)
* Stop child_DoFencing:0 (hadev2)
* Stop child_DoFencing:2 (hadev1)
Executing cluster transition:
* Resource action: DcIPaddr monitor on hadev1
* Resource action: rsc_hadev3 monitor on hadev2
* Resource action: rsc_hadev2 monitor on hadev1
* Resource action: child_DoFencing:0 monitor on hadev1
* Resource action: child_DoFencing:2 monitor on hadev2
* Pseudo action: DoFencing_stop_0
* Fencing hadev3 (reboot)
* Pseudo action: stonith_complete
* Resource action: DcIPaddr stop on hadev2
* Resource action: rsc_hadev2 stop on hadev2
* Resource action: child_DoFencing:0 stop on hadev2
* Resource action: child_DoFencing:2 stop on hadev1
* Pseudo action: DoFencing_stopped_0
* Cluster action: do_shutdown on hadev2
* Pseudo action: all_stopped
* Resource action: DcIPaddr start on hadev1
* Resource action: rsc_hadev2 start on hadev1
* Resource action: DcIPaddr monitor=5000 on hadev1
* Resource action: rsc_hadev2 monitor=5000 on hadev1
Revised cluster status:
Online: [ hadev1 hadev2 ]
OFFLINE: [ hadev3 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started hadev1
rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev1
rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev1
rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Stopped
child_DoFencing:1 (stonith:ssh): Started hadev1
child_DoFencing:2 (stonith:ssh): Stopped
diff --git a/pengine/test10/662.summary b/pengine/test10/662.summary
index 8d73b1dd28..1726f35ef1 100644
--- a/pengine/test10/662.summary
+++ b/pengine/test10/662.summary
@@ -1,66 +1,67 @@
Current cluster status:
Online: [ c001n02 c001n03 c001n04 c001n09 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n09
rsc_c001n09 (ocf::heartbeat:IPaddr): Started c001n09
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n02
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n04
child_DoFencing:3 (stonith:ssh): Started c001n09
Transition Summary:
+ * Shutdown c001n02
* Move rsc_c001n02 (Started c001n02 -> c001n03)
* Stop child_DoFencing:0 (c001n02)
Executing cluster transition:
* Resource action: DcIPaddr monitor on c001n04
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n02
* Resource action: rsc_c001n09 monitor on c001n04
* Resource action: rsc_c001n09 monitor on c001n03
* Resource action: rsc_c001n09 monitor on c001n02
* Resource action: rsc_c001n02 monitor on c001n09
* Resource action: rsc_c001n02 monitor on c001n04
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n03 monitor on c001n09
* Resource action: rsc_c001n03 monitor on c001n04
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n04 monitor on c001n09
* Resource action: rsc_c001n04 monitor on c001n03
* Resource action: child_DoFencing:0 monitor on c001n09
* Resource action: child_DoFencing:0 monitor on c001n04
* Resource action: child_DoFencing:1 monitor on c001n04
* Resource action: child_DoFencing:1 monitor on c001n02
* Resource action: child_DoFencing:2 monitor on c001n09
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n04
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Pseudo action: DoFencing_stop_0
* Resource action: rsc_c001n02 stop on c001n02
* Resource action: child_DoFencing:0 stop on c001n02
* Pseudo action: DoFencing_stopped_0
* Cluster action: do_shutdown on c001n02
* Pseudo action: all_stopped
* Resource action: rsc_c001n02 start on c001n03
* Resource action: rsc_c001n02 monitor=5000 on c001n03
Revised cluster status:
Online: [ c001n02 c001n03 c001n04 c001n09 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n09
rsc_c001n09 (ocf::heartbeat:IPaddr): Started c001n09
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Stopped
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n04
child_DoFencing:3 (stonith:ssh): Started c001n09
diff --git a/pengine/test10/797.summary b/pengine/test10/797.summary
index 6e78255393..3184eae297 100644
--- a/pengine/test10/797.summary
+++ b/pengine/test10/797.summary
@@ -1,72 +1,73 @@
Current cluster status:
Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline)
Online: [ c001n01 c001n02 c001n03 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started (Monitoring)[ c001n01 c001n03 ]
child_DoFencing:1 (stonith:ssh): Started c001n02
child_DoFencing:2 (stonith:ssh): Started c001n03
child_DoFencing:3 (stonith:ssh): Stopped
Transition Summary:
+ * Shutdown c001n02
* Stop DcIPaddr (Started c001n03)
* Stop rsc_c001n08 (Started c001n02)
* Stop rsc_c001n02 (Started c001n02)
* Stop rsc_c001n03 (Started c001n03)
* Stop rsc_c001n01 (Started c001n01)
* Restart child_DoFencing:0 (Started c001n01)
* Stop child_DoFencing:1 (c001n02)
Executing cluster transition:
* Resource action: DcIPaddr monitor on c001n02
* Resource action: DcIPaddr monitor on c001n01
* Resource action: DcIPaddr stop on c001n03
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n01
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: child_DoFencing:3 monitor on c001n01
* Pseudo action: DoFencing_stop_0
* Resource action: DcIPaddr delete on c001n03
* Resource action: rsc_c001n08 stop on c001n02
* Resource action: rsc_c001n02 stop on c001n02
* Resource action: rsc_c001n03 stop on c001n03
* Resource action: rsc_c001n01 stop on c001n01
* Resource action: child_DoFencing:0 stop on c001n03
* Resource action: child_DoFencing:0 stop on c001n01
* Resource action: child_DoFencing:1 stop on c001n02
* Pseudo action: DoFencing_stopped_0
* Pseudo action: DoFencing_start_0
* Cluster action: do_shutdown on c001n02
* Pseudo action: all_stopped
* Resource action: child_DoFencing:0 start on c001n01
* Resource action: child_DoFencing:0 monitor=5000 on c001n01
* Pseudo action: DoFencing_running_0
Revised cluster status:
Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline)
Online: [ c001n01 c001n02 c001n03 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n01
child_DoFencing:1 (stonith:ssh): Stopped
child_DoFencing:2 (stonith:ssh): Started c001n03
child_DoFencing:3 (stonith:ssh): Stopped
diff --git a/pengine/test10/829.summary b/pengine/test10/829.summary
index d95a2ffe1b..a9d25e01f6 100644
--- a/pengine/test10/829.summary
+++ b/pengine/test10/829.summary
@@ -1,63 +1,64 @@
Current cluster status:
Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline)
Online: [ c001n01 c001n03 c001n08 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 (UNCLEAN)
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n02 (UNCLEAN)
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n01
child_DoFencing:3 (stonith:ssh): Started c001n08
Transition Summary:
+ * Fence c001n02
* Move rsc_c001n02 (Started c001n02 -> c001n01)
* Stop child_DoFencing:0 (c001n02)
Executing cluster transition:
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n01
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n03 monitor on c001n08
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: child_DoFencing:0 monitor on c001n01
* Resource action: child_DoFencing:1 monitor on c001n01
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n01
* Fencing c001n02 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc_c001n02_stop_0
* Pseudo action: DoFencing_stop_0
* Resource action: rsc_c001n02 start on c001n01
* Pseudo action: child_DoFencing:0_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: all_stopped
* Resource action: rsc_c001n02 monitor=5000 on c001n01
Revised cluster status:
Online: [ c001n01 c001n03 c001n08 ]
OFFLINE: [ c001n02 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n01
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Stopped
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n01
child_DoFencing:3 (stonith:ssh): Started c001n08
diff --git a/pengine/test10/bug-1572-1.summary b/pengine/test10/bug-1572-1.summary
index 4280f7b0dd..6c37bb467d 100644
--- a/pengine/test10/bug-1572-1.summary
+++ b/pengine/test10/bug-1572-1.summary
@@ -1,84 +1,85 @@
Current cluster status:
Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
Masters: [ arc-tkincaidlx.wsicorp.com ]
Slaves: [ arc-dknightlx ]
Resource Group: grp_pgsql_mirror
fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com
pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com
IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com
Transition Summary:
+ * Shutdown arc-dknightlx
* Stop rsc_drbd_7788:0 (arc-dknightlx)
* Restart rsc_drbd_7788:1 (Master arc-tkincaidlx.wsicorp.com)
* Restart fs_mirror (Started arc-tkincaidlx.wsicorp.com)
* Restart pgsql_5555 (Started arc-tkincaidlx.wsicorp.com)
* Restart IPaddr_147_81_84_133 (Started arc-tkincaidlx.wsicorp.com)
Executing cluster transition:
* Pseudo action: ms_drbd_7788_pre_notify_demote_0
* Pseudo action: grp_pgsql_mirror_stop_0
* Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com
* Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0
* Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com
* Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com
* Pseudo action: grp_pgsql_mirror_stopped_0
* Pseudo action: ms_drbd_7788_demote_0
* Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_demoted_0
* Pseudo action: ms_drbd_7788_post_notify_demoted_0
* Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0
* Pseudo action: ms_drbd_7788_pre_notify_stop_0
* Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0
* Pseudo action: ms_drbd_7788_stop_0
* Resource action: rsc_drbd_7788:0 stop on arc-dknightlx
* Resource action: rsc_drbd_7788:1 stop on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_stopped_0
* Cluster action: do_shutdown on arc-dknightlx
* Pseudo action: ms_drbd_7788_post_notify_stopped_0
* Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0
* Pseudo action: ms_drbd_7788_pre_notify_start_0
* Pseudo action: all_stopped
* Pseudo action: ms_drbd_7788_confirmed-pre_notify_start_0
* Pseudo action: ms_drbd_7788_start_0
* Resource action: rsc_drbd_7788:1 start on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_running_0
* Pseudo action: ms_drbd_7788_post_notify_running_0
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-post_notify_running_0
* Pseudo action: ms_drbd_7788_pre_notify_promote_0
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-pre_notify_promote_0
* Pseudo action: ms_drbd_7788_promote_0
* Resource action: rsc_drbd_7788:1 promote on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_promoted_0
* Pseudo action: ms_drbd_7788_post_notify_promoted_0
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-post_notify_promoted_0
* Pseudo action: grp_pgsql_mirror_start_0
* Resource action: fs_mirror start on arc-tkincaidlx.wsicorp.com
* Resource action: pgsql_5555 start on arc-tkincaidlx.wsicorp.com
* Resource action: pgsql_5555 monitor=30000 on arc-tkincaidlx.wsicorp.com
* Resource action: IPaddr_147_81_84_133 start on arc-tkincaidlx.wsicorp.com
* Resource action: IPaddr_147_81_84_133 monitor=25000 on arc-tkincaidlx.wsicorp.com
* Pseudo action: grp_pgsql_mirror_running_0
Revised cluster status:
Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
Masters: [ arc-tkincaidlx.wsicorp.com ]
Stopped: [ arc-dknightlx ]
Resource Group: grp_pgsql_mirror
fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com
pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com
IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com
diff --git a/pengine/test10/bug-1572-2.summary b/pengine/test10/bug-1572-2.summary
index 61740276c0..a4235a73a7 100644
--- a/pengine/test10/bug-1572-2.summary
+++ b/pengine/test10/bug-1572-2.summary
@@ -1,60 +1,61 @@
Current cluster status:
Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
Masters: [ arc-tkincaidlx.wsicorp.com ]
Slaves: [ arc-dknightlx ]
Resource Group: grp_pgsql_mirror
fs_mirror (ocf::heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com
pgsql_5555 (ocf::heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com
IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com
Transition Summary:
+ * Shutdown arc-dknightlx
* Stop rsc_drbd_7788:0 (arc-dknightlx)
* Demote rsc_drbd_7788:1 (Master -> Slave arc-tkincaidlx.wsicorp.com)
* Stop fs_mirror (arc-tkincaidlx.wsicorp.com)
* Stop pgsql_5555 (arc-tkincaidlx.wsicorp.com)
* Stop IPaddr_147_81_84_133 (arc-tkincaidlx.wsicorp.com)
Executing cluster transition:
* Pseudo action: ms_drbd_7788_pre_notify_demote_0
* Pseudo action: grp_pgsql_mirror_stop_0
* Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com
* Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0
* Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com
* Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com
* Pseudo action: grp_pgsql_mirror_stopped_0
* Pseudo action: ms_drbd_7788_demote_0
* Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_demoted_0
* Pseudo action: ms_drbd_7788_post_notify_demoted_0
* Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0
* Pseudo action: ms_drbd_7788_pre_notify_stop_0
* Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0
* Pseudo action: ms_drbd_7788_stop_0
* Resource action: rsc_drbd_7788:0 stop on arc-dknightlx
* Pseudo action: ms_drbd_7788_stopped_0
* Cluster action: do_shutdown on arc-dknightlx
* Pseudo action: ms_drbd_7788_post_notify_stopped_0
* Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
* Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
Slaves: [ arc-tkincaidlx.wsicorp.com ]
Stopped: [ arc-dknightlx ]
Resource Group: grp_pgsql_mirror
fs_mirror (ocf::heartbeat:Filesystem): Stopped
pgsql_5555 (ocf::heartbeat:pgsql): Stopped
IPaddr_147_81_84_133 (ocf::heartbeat:IPaddr): Stopped
diff --git a/pengine/test10/bug-1573.summary b/pengine/test10/bug-1573.summary
index 0c1fe3d3d8..8fb2820574 100644
--- a/pengine/test10/bug-1573.summary
+++ b/pengine/test10/bug-1573.summary
@@ -1,33 +1,34 @@
Current cluster status:
Online: [ xen-b ]
OFFLINE: [ xen-c ]
Resource Group: group_1
IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Stopped
apache_2 (ocf::heartbeat:apache): Stopped
Resource Group: group_11
IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started xen-b
apache_6 (ocf::heartbeat:apache): Stopped
Transition Summary:
+ * Shutdown xen-b
* Stop IPaddr_192_168_1_102 (xen-b)
Executing cluster transition:
* Pseudo action: group_11_stop_0
* Resource action: IPaddr_192_168_1_102 stop on xen-b
* Cluster action: do_shutdown on xen-b
* Pseudo action: all_stopped
* Pseudo action: group_11_stopped_0
Revised cluster status:
Online: [ xen-b ]
OFFLINE: [ xen-c ]
Resource Group: group_1
IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Stopped
apache_2 (ocf::heartbeat:apache): Stopped
Resource Group: group_11
IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Stopped
apache_6 (ocf::heartbeat:apache): Stopped
diff --git a/pengine/test10/bug-1820-1.summary b/pengine/test10/bug-1820-1.summary
index 5edcbcbfb8..db41ac4e44 100644
--- a/pengine/test10/bug-1820-1.summary
+++ b/pengine/test10/bug-1820-1.summary
@@ -1,43 +1,44 @@
Current cluster status:
Online: [ star world ]
p1 (ocf::heartbeat:Xen): Stopped
Resource Group: gr1
test1 (ocf::heartbeat:Xen): Started star
test2 (ocf::heartbeat:Xen): Started star
Transition Summary:
+ * Shutdown star
* Start p1 (world)
* Migrate test1 (Started star -> world)
* Migrate test2 (Started star -> world)
Executing cluster transition:
* Resource action: p1 monitor on world
* Resource action: p1 monitor on star
* Pseudo action: gr1_stop_0
* Resource action: test1 migrate_to on star
* Resource action: p1 start on world
* Resource action: test1 migrate_from on world
* Resource action: test2 migrate_to on star
* Resource action: test2 migrate_from on world
* Resource action: test2 stop on star
* Resource action: test1 stop on star
* Cluster action: do_shutdown on star
* Pseudo action: all_stopped
* Pseudo action: gr1_stopped_0
* Pseudo action: gr1_start_0
* Pseudo action: test1_start_0
* Pseudo action: test2_start_0
* Pseudo action: gr1_running_0
* Resource action: test1 monitor=10000 on world
* Resource action: test2 monitor=10000 on world
Revised cluster status:
Online: [ star world ]
p1 (ocf::heartbeat:Xen): Started world
Resource Group: gr1
test1 (ocf::heartbeat:Xen): Started world
test2 (ocf::heartbeat:Xen): Started world
diff --git a/pengine/test10/bug-1820.summary b/pengine/test10/bug-1820.summary
index 592fa67f8a..6d9c021c2f 100644
--- a/pengine/test10/bug-1820.summary
+++ b/pengine/test10/bug-1820.summary
@@ -1,37 +1,38 @@
Current cluster status:
Online: [ star world ]
Resource Group: gr1
test1 (ocf::heartbeat:Xen): Started star
test2 (ocf::heartbeat:Xen): Started star
Transition Summary:
+ * Shutdown star
* Migrate test1 (Started star -> world)
* Migrate test2 (Started star -> world)
Executing cluster transition:
* Pseudo action: gr1_stop_0
* Resource action: test1 migrate_to on star
* Resource action: test1 migrate_from on world
* Resource action: test2 migrate_to on star
* Resource action: test2 migrate_from on world
* Resource action: test2 stop on star
* Resource action: test1 stop on star
* Cluster action: do_shutdown on star
* Pseudo action: all_stopped
* Pseudo action: gr1_stopped_0
* Pseudo action: gr1_start_0
* Pseudo action: test1_start_0
* Pseudo action: test2_start_0
* Pseudo action: gr1_running_0
* Resource action: test1 monitor=10000 on world
* Resource action: test2 monitor=10000 on world
Revised cluster status:
Online: [ star world ]
Resource Group: gr1
test1 (ocf::heartbeat:Xen): Started world
test2 (ocf::heartbeat:Xen): Started world
diff --git a/pengine/test10/bug-1822.summary b/pengine/test10/bug-1822.summary
index afb9fd1bea..325e408231 100644
--- a/pengine/test10/bug-1822.summary
+++ b/pengine/test10/bug-1822.summary
@@ -1,43 +1,44 @@
Current cluster status:
Online: [ process1a process2b ]
Master/Slave Set: ms-sf [ms-sf_group] (unique)
Resource Group: ms-sf_group:0
master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Slave process2b
master_slave_procdctl:0 (ocf::heartbeat:procdctl): Stopped
Resource Group: ms-sf_group:1
master_slave_Stateful:1 (ocf::heartbeat:Dummy-statful): Master process1a
master_slave_procdctl:1 (ocf::heartbeat:procdctl): Master process1a
Transition Summary:
+ * Shutdown process1a
* Demote master_slave_Stateful:1 (Master -> Stopped process1a)
* Demote master_slave_procdctl:1 (Master -> Stopped process1a)
Executing cluster transition:
* Pseudo action: ms-sf_demote_0
* Pseudo action: ms-sf_group:1_demote_0
* Resource action: master_slave_Stateful:1 demote on process1a
* Resource action: master_slave_procdctl:1 demote on process1a
* Pseudo action: ms-sf_group:1_demoted_0
* Pseudo action: ms-sf_demoted_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: ms-sf_group:1_stop_0
* Resource action: master_slave_Stateful:1 stop on process1a
* Resource action: master_slave_procdctl:1 stop on process1a
* Cluster action: do_shutdown on process1a
* Pseudo action: all_stopped
* Pseudo action: ms-sf_group:1_stopped_0
* Pseudo action: ms-sf_stopped_0
Revised cluster status:
Online: [ process1a process2b ]
Master/Slave Set: ms-sf [ms-sf_group] (unique)
Resource Group: ms-sf_group:0
master_slave_Stateful:0 (ocf::heartbeat:Dummy-statful): Slave process2b
master_slave_procdctl:0 (ocf::heartbeat:procdctl): Stopped
Resource Group: ms-sf_group:1
master_slave_Stateful:1 (ocf::heartbeat:Dummy-statful): Stopped
master_slave_procdctl:1 (ocf::heartbeat:procdctl): Stopped
diff --git a/pengine/test10/bug-5028-bottom.summary b/pengine/test10/bug-5028-bottom.summary
index fc5cf8f05a..b43ba4efde 100644
--- a/pengine/test10/bug-5028-bottom.summary
+++ b/pengine/test10/bug-5028-bottom.summary
@@ -1,23 +1,24 @@
Current cluster status:
Online: [ bl460g6a bl460g6b ]
Resource Group: dummy-g
dummy01 (ocf::heartbeat:Dummy): FAILED bl460g6a ( blocked )
dummy02 (ocf::heartbeat:Dummy-stop-NG): Started bl460g6a
Transition Summary:
+ * Shutdown bl460g6a
* Stop dummy02 (bl460g6a)
Executing cluster transition:
* Pseudo action: dummy-g_stop_0
* Resource action: dummy02 stop on bl460g6a
* Pseudo action: all_stopped
Revised cluster status:
Online: [ bl460g6a bl460g6b ]
Resource Group: dummy-g
dummy01 (ocf::heartbeat:Dummy): FAILED bl460g6a ( blocked )
dummy02 (ocf::heartbeat:Dummy-stop-NG): Stopped
diff --git a/pengine/test10/bug-5028-detach.summary b/pengine/test10/bug-5028-detach.summary
index 5e93b2a7e7..712bfa1b4a 100644
--- a/pengine/test10/bug-5028-detach.summary
+++ b/pengine/test10/bug-5028-detach.summary
@@ -1,23 +1,24 @@
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Current cluster status:
Online: [ bl460g6a bl460g6b ]
Resource Group: dummy-g
dummy01 (ocf::heartbeat:Dummy): Started bl460g6a (unmanaged)
dummy02 (ocf::heartbeat:Dummy-stop-NG): FAILED bl460g6a ( blocked )
Transition Summary:
+ * Shutdown bl460g6a
Executing cluster transition:
* Cluster action: do_shutdown on bl460g6a
Revised cluster status:
Online: [ bl460g6a bl460g6b ]
Resource Group: dummy-g
dummy01 (ocf::heartbeat:Dummy): Started bl460g6a (unmanaged)
dummy02 (ocf::heartbeat:Dummy-stop-NG): FAILED bl460g6a ( blocked )
diff --git a/pengine/test10/bug-5028.summary b/pengine/test10/bug-5028.summary
index ad7657c1dc..a85f75b403 100644
--- a/pengine/test10/bug-5028.summary
+++ b/pengine/test10/bug-5028.summary
@@ -1,22 +1,23 @@
Current cluster status:
Online: [ bl460g6a bl460g6b ]
Resource Group: dummy-g
dummy01 (ocf::heartbeat:Dummy): Started bl460g6a
dummy02 (ocf::heartbeat:Dummy-stop-NG): FAILED bl460g6a ( blocked )
Transition Summary:
+ * Shutdown bl460g6a
* Stop dummy01 (Started bl460g6a - blocked)
Executing cluster transition:
* Pseudo action: dummy-g_stop_0
* Pseudo action: dummy-g_start_0
Revised cluster status:
Online: [ bl460g6a bl460g6b ]
Resource Group: dummy-g
dummy01 (ocf::heartbeat:Dummy): Started bl460g6a
dummy02 (ocf::heartbeat:Dummy-stop-NG): FAILED bl460g6a ( blocked )
diff --git a/pengine/test10/bug-5186-partial-migrate.summary b/pengine/test10/bug-5186-partial-migrate.summary
index 4d0b10e5c7..63ed2d7565 100644
--- a/pengine/test10/bug-5186-partial-migrate.summary
+++ b/pengine/test10/bug-5186-partial-migrate.summary
@@ -1,90 +1,91 @@
Current cluster status:
Node bl460g1n7 (3232261593): UNCLEAN (offline)
Online: [ bl460g1n6 bl460g1n8 ]
prmDummy (ocf::pacemaker:Dummy): Started bl460g1n7 (UNCLEAN)
prmVM2 (ocf::heartbeat:VirtualDomain): Migrating bl460g1n7 (UNCLEAN)
Resource Group: grpStonith6
prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8
prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8
Resource Group: grpStonith7
prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6
prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6
Resource Group: grpStonith8
prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n7 (UNCLEAN)
prmStonith8-2 (stonith:external/ipmi): Started bl460g1n7 (UNCLEAN)
Clone Set: clnDiskd1 [prmDiskd1]
prmDiskd1 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN)
Started: [ bl460g1n6 bl460g1n8 ]
Clone Set: clnDiskd2 [prmDiskd2]
prmDiskd2 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN)
Started: [ bl460g1n6 bl460g1n8 ]
Clone Set: clnPing [prmPing]
prmPing (ocf::pacemaker:ping): Started bl460g1n7 (UNCLEAN)
Started: [ bl460g1n6 bl460g1n8 ]
Transition Summary:
+ * Fence bl460g1n7
* Move prmDummy (Started bl460g1n7 -> bl460g1n6)
* Move prmVM2 (Started bl460g1n7 -> bl460g1n8)
* Move prmStonith8-1 (Started bl460g1n7 -> bl460g1n6)
* Move prmStonith8-2 (Started bl460g1n7 -> bl460g1n6)
* Stop prmDiskd1:0 (bl460g1n7)
* Stop prmDiskd2:0 (bl460g1n7)
* Stop prmPing:0 (bl460g1n7)
Executing cluster transition:
* Resource action: prmVM2 stop on bl460g1n6
* Pseudo action: grpStonith8_stop_0
* Pseudo action: prmStonith8-2_stop_0
* Fencing bl460g1n7 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: prmDummy_stop_0
* Pseudo action: prmVM2_stop_0
* Pseudo action: prmStonith8-1_stop_0
* Pseudo action: clnDiskd1_stop_0
* Pseudo action: clnDiskd2_stop_0
* Pseudo action: clnPing_stop_0
* Resource action: prmDummy start on bl460g1n6
* Resource action: prmVM2 start on bl460g1n8
* Pseudo action: grpStonith8_stopped_0
* Pseudo action: grpStonith8_start_0
* Resource action: prmStonith8-1 start on bl460g1n6
* Resource action: prmStonith8-2 start on bl460g1n6
* Pseudo action: prmDiskd1_stop_0
* Pseudo action: clnDiskd1_stopped_0
* Pseudo action: prmDiskd2_stop_0
* Pseudo action: clnDiskd2_stopped_0
* Pseudo action: prmPing_stop_0
* Pseudo action: clnPing_stopped_0
* Pseudo action: all_stopped
* Resource action: prmVM2 monitor=10000 on bl460g1n8
* Pseudo action: grpStonith8_running_0
* Resource action: prmStonith8-1 monitor=10000 on bl460g1n6
* Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6
Revised cluster status:
Online: [ bl460g1n6 bl460g1n8 ]
OFFLINE: [ bl460g1n7 ]
prmDummy (ocf::pacemaker:Dummy): Started bl460g1n6
prmVM2 (ocf::heartbeat:VirtualDomain): Started bl460g1n8
Resource Group: grpStonith6
prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8
prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8
Resource Group: grpStonith7
prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6
prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6
Resource Group: grpStonith8
prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n6
prmStonith8-2 (stonith:external/ipmi): Started bl460g1n6
Clone Set: clnDiskd1 [prmDiskd1]
Started: [ bl460g1n6 bl460g1n8 ]
Stopped: [ bl460g1n7 ]
Clone Set: clnDiskd2 [prmDiskd2]
Started: [ bl460g1n6 bl460g1n8 ]
Stopped: [ bl460g1n7 ]
Clone Set: clnPing [prmPing]
Started: [ bl460g1n6 bl460g1n8 ]
Stopped: [ bl460g1n7 ]
diff --git a/pengine/test10/bug-cl-5247.summary b/pengine/test10/bug-cl-5247.summary
index a13754f979..8ea3ff015c 100644
--- a/pengine/test10/bug-cl-5247.summary
+++ b/pengine/test10/bug-cl-5247.summary
@@ -1,100 +1,101 @@
Using the original execution date of: 2015-08-12 02:53:40Z
Current cluster status:
Online: [ bl460g8n3 bl460g8n4 ]
Containers: [ pgsr01:prmDB1 ]
prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3
prmDB2 (ocf::heartbeat:VirtualDomain): FAILED bl460g8n4
Resource Group: grpStonith1
prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4
Resource Group: grpStonith2
prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3
Resource Group: master-group
vip-master (ocf::heartbeat:Dummy): FAILED pgsr02
vip-rep (ocf::heartbeat:Dummy): FAILED pgsr02
Master/Slave Set: msPostgresql [pgsql]
Masters: [ pgsr01 ]
Stopped: [ bl460g8n3 bl460g8n4 ]
Transition Summary:
+ * Fence pgsr02 (resource: prmDB2)
* Stop prmDB2 (bl460g8n4)
* Restart prmStonith1-2 (Started bl460g8n4)
* Restart prmStonith2-2 (Started bl460g8n3)
* Recover vip-master (Started pgsr02 -> pgsr01)
* Recover vip-rep (Started pgsr02 -> pgsr01)
* Demote pgsql:0 (Master -> Stopped pgsr02)
* Stop pgsr02 (bl460g8n4)
Executing cluster transition:
* Pseudo action: grpStonith1_stop_0
* Resource action: prmStonith1-2 stop on bl460g8n4
* Pseudo action: grpStonith2_stop_0
* Resource action: prmStonith2-2 stop on bl460g8n3
* Pseudo action: msPostgresql_pre_notify_demote_0
* Resource action: pgsr01 monitor on bl460g8n4
* Resource action: pgsr02 monitor on bl460g8n3
* Pseudo action: grpStonith1_stopped_0
* Pseudo action: grpStonith1_start_0
* Pseudo action: grpStonith2_stopped_0
* Pseudo action: grpStonith2_start_0
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-pre_notify_demote_0
* Pseudo action: msPostgresql_demote_0
* Resource action: pgsr02 stop on bl460g8n4
* Resource action: prmDB2 stop on bl460g8n4
* Pseudo action: stonith-pgsr02-off on pgsr02
* Pseudo action: stonith_complete
* Pseudo action: pgsql_post_notify_stop_0
* Pseudo action: pgsql_demote_0
* Pseudo action: msPostgresql_demoted_0
* Pseudo action: msPostgresql_post_notify_demoted_0
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-post_notify_demoted_0
* Pseudo action: msPostgresql_pre_notify_stop_0
* Pseudo action: master-group_stop_0
* Pseudo action: vip-rep_stop_0
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-pre_notify_stop_0
* Pseudo action: msPostgresql_stop_0
* Pseudo action: vip-master_stop_0
* Pseudo action: pgsql_stop_0
* Pseudo action: msPostgresql_stopped_0
* Pseudo action: master-group_stopped_0
* Pseudo action: master-group_start_0
* Resource action: vip-master start on pgsr01
* Resource action: vip-rep start on pgsr01
* Pseudo action: msPostgresql_post_notify_stopped_0
* Pseudo action: master-group_running_0
* Resource action: vip-master monitor=10000 on pgsr01
* Resource action: vip-rep monitor=10000 on pgsr01
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-post_notify_stopped_0
* Pseudo action: pgsql_notified_0
* Resource action: pgsql monitor=9000 on pgsr01
* Pseudo action: all_stopped
* Resource action: prmStonith1-2 start on bl460g8n4
* Resource action: prmStonith1-2 monitor=3600000 on bl460g8n4
* Resource action: prmStonith2-2 start on bl460g8n3
* Resource action: prmStonith2-2 monitor=3600000 on bl460g8n3
* Pseudo action: grpStonith1_running_0
* Pseudo action: grpStonith2_running_0
Using the original execution date of: 2015-08-12 02:53:40Z
Revised cluster status:
Online: [ bl460g8n3 bl460g8n4 ]
Containers: [ pgsr01:prmDB1 ]
prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3
prmDB2 (ocf::heartbeat:VirtualDomain): FAILED
Resource Group: grpStonith1
prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4
Resource Group: grpStonith2
prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3
Resource Group: master-group
vip-master (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ]
vip-rep (ocf::heartbeat:Dummy): FAILED[ pgsr01 pgsr02 ]
Master/Slave Set: msPostgresql [pgsql]
Masters: [ pgsr01 ]
Stopped: [ bl460g8n3 bl460g8n4 ]
diff --git a/pengine/test10/bug-lf-2508.summary b/pengine/test10/bug-lf-2508.summary
index af5e4e2492..7b436d4309 100644
--- a/pengine/test10/bug-lf-2508.summary
+++ b/pengine/test10/bug-lf-2508.summary
@@ -1,111 +1,112 @@
Current cluster status:
Node srv02 (71085d5e-1c63-49e0-8c8c-400d610b4182): UNCLEAN (offline)
Online: [ srv01 srv03 srv04 ]
Resource Group: Group01
Dummy01 (ocf::heartbeat:Dummy): Stopped
Resource Group: Group02
Dummy02 (ocf::heartbeat:Dummy): Started srv02 (UNCLEAN)
Resource Group: Group03
Dummy03 (ocf::heartbeat:Dummy): Started srv03
Clone Set: clnStonith1 [grpStonith1]
Resource Group: grpStonith1:1
prmStonith1-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN)
prmStonith1-3 (stonith:external/ssh): Started srv02 (UNCLEAN)
Started: [ srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnStonith2 [grpStonith2]
Started: [ srv01 srv03 srv04 ]
Stopped: [ srv02 ]
Clone Set: clnStonith3 [grpStonith3]
Resource Group: grpStonith3:0
prmStonith3-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN)
prmStonith3-3 (stonith:external/ssh): Started srv02 (UNCLEAN)
Resource Group: grpStonith3:1
prmStonith3-1 (stonith:external/stonith-helper): Started srv01
prmStonith3-3 (stonith:external/ssh): Stopped
Started: [ srv04 ]
Stopped: [ srv03 ]
Clone Set: clnStonith4 [grpStonith4]
Resource Group: grpStonith4:1
prmStonith4-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN)
prmStonith4-3 (stonith:external/ssh): Started srv02 (UNCLEAN)
Started: [ srv01 srv03 ]
Stopped: [ srv04 ]
Transition Summary:
+ * Fence srv02
* Start Dummy01 (srv01)
* Move Dummy02 (Started srv02 -> srv04)
* Stop prmStonith1-1:1 (srv02)
* Stop prmStonith1-3:1 (srv02)
* Stop prmStonith3-1:0 (srv02)
* Stop prmStonith3-3:0 (srv02)
* Start prmStonith3-3:1 (srv01)
* Stop prmStonith4-1:1 (srv02)
* Stop prmStonith4-3:1 (srv02)
Executing cluster transition:
* Pseudo action: Group01_start_0
* Resource action: prmStonith3-1:1 monitor=3600000 on srv01
* Fencing srv02 (reboot)
* Pseudo action: stonith_complete
* Resource action: Dummy01 start on srv01
* Pseudo action: Group02_stop_0
* Pseudo action: Dummy02_stop_0
* Pseudo action: clnStonith1_stop_0
* Pseudo action: clnStonith3_stop_0
* Pseudo action: clnStonith4_stop_0
* Pseudo action: Group01_running_0
* Resource action: Dummy01 monitor=10000 on srv01
* Pseudo action: Group02_stopped_0
* Pseudo action: Group02_start_0
* Resource action: Dummy02 start on srv04
* Pseudo action: grpStonith1:1_stop_0
* Pseudo action: prmStonith1-3:1_stop_0
* Pseudo action: grpStonith3:0_stop_0
* Pseudo action: prmStonith3-3:1_stop_0
* Pseudo action: grpStonith4:1_stop_0
* Pseudo action: prmStonith4-3:1_stop_0
* Pseudo action: Group02_running_0
* Resource action: Dummy02 monitor=10000 on srv04
* Pseudo action: prmStonith1-1:1_stop_0
* Pseudo action: prmStonith3-1:1_stop_0
* Pseudo action: prmStonith4-1:1_stop_0
* Pseudo action: all_stopped
* Pseudo action: grpStonith1:1_stopped_0
* Pseudo action: clnStonith1_stopped_0
* Pseudo action: grpStonith3:0_stopped_0
* Pseudo action: clnStonith3_stopped_0
* Pseudo action: clnStonith3_start_0
* Pseudo action: grpStonith4:1_stopped_0
* Pseudo action: clnStonith4_stopped_0
* Pseudo action: grpStonith3:1_start_0
* Resource action: prmStonith3-3:1 start on srv01
* Pseudo action: grpStonith3:1_running_0
* Resource action: prmStonith3-3:1 monitor=3600000 on srv01
* Pseudo action: clnStonith3_running_0
Revised cluster status:
Online: [ srv01 srv03 srv04 ]
OFFLINE: [ srv02 ]
Resource Group: Group01
Dummy01 (ocf::heartbeat:Dummy): Started srv01
Resource Group: Group02
Dummy02 (ocf::heartbeat:Dummy): Started srv04
Resource Group: Group03
Dummy03 (ocf::heartbeat:Dummy): Started srv03
Clone Set: clnStonith1 [grpStonith1]
Started: [ srv03 srv04 ]
Stopped: [ srv01 srv02 ]
Clone Set: clnStonith2 [grpStonith2]
Started: [ srv01 srv03 srv04 ]
Stopped: [ srv02 ]
Clone Set: clnStonith3 [grpStonith3]
Started: [ srv01 srv04 ]
Stopped: [ srv02 srv03 ]
Clone Set: clnStonith4 [grpStonith4]
Started: [ srv01 srv03 ]
Stopped: [ srv02 srv04 ]
diff --git a/pengine/test10/bug-lf-2551.summary b/pengine/test10/bug-lf-2551.summary
index 158eb73032..ffb7c6d933 100644
--- a/pengine/test10/bug-lf-2551.summary
+++ b/pengine/test10/bug-lf-2551.summary
@@ -1,225 +1,226 @@
Current cluster status:
Node hex-9: UNCLEAN (offline)
Online: [ hex-0 hex-7 hex-8 ]
vm-00 (ocf::heartbeat:Xen): Started hex-0
Clone Set: base-clone [base-group]
Resource Group: base-group:3
dlm (ocf::pacemaker:controld): Started hex-9 (UNCLEAN)
o2cb (ocf::ocfs2:o2cb): Started hex-9 (UNCLEAN)
clvm (ocf::lvm2:clvmd): Started hex-9 (UNCLEAN)
cmirrord (ocf::lvm2:cmirrord): Started hex-9 (UNCLEAN)
vg1 (ocf::heartbeat:LVM): Started hex-9 (UNCLEAN)
ocfs2-1 (ocf::heartbeat:Filesystem): Started hex-9 (UNCLEAN)
Started: [ hex-0 hex-7 hex-8 ]
vm-01 (ocf::heartbeat:Xen): Started hex-7
vm-02 (ocf::heartbeat:Xen): Started hex-8
vm-03 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-04 (ocf::heartbeat:Xen): Started hex-7
vm-05 (ocf::heartbeat:Xen): Started hex-8
fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN)
vm-06 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-07 (ocf::heartbeat:Xen): Started hex-7
vm-08 (ocf::heartbeat:Xen): Started hex-8
vm-09 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-10 (ocf::heartbeat:Xen): Started hex-0
vm-11 (ocf::heartbeat:Xen): Started hex-7
vm-12 (ocf::heartbeat:Xen): Started hex-8
vm-13 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-14 (ocf::heartbeat:Xen): Started hex-0
vm-15 (ocf::heartbeat:Xen): Started hex-7
vm-16 (ocf::heartbeat:Xen): Started hex-8
vm-17 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-18 (ocf::heartbeat:Xen): Started hex-0
vm-19 (ocf::heartbeat:Xen): Started hex-7
vm-20 (ocf::heartbeat:Xen): Started hex-8
vm-21 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-22 (ocf::heartbeat:Xen): Started hex-0
vm-23 (ocf::heartbeat:Xen): Started hex-7
vm-24 (ocf::heartbeat:Xen): Started hex-8
vm-25 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-26 (ocf::heartbeat:Xen): Started hex-0
vm-27 (ocf::heartbeat:Xen): Started hex-7
vm-28 (ocf::heartbeat:Xen): Started hex-8
vm-29 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-30 (ocf::heartbeat:Xen): Started hex-0
vm-31 (ocf::heartbeat:Xen): Started hex-7
vm-32 (ocf::heartbeat:Xen): Started hex-8
dummy1 (ocf::heartbeat:Dummy): Started hex-9 (UNCLEAN)
vm-33 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-34 (ocf::heartbeat:Xen): Started hex-0
vm-35 (ocf::heartbeat:Xen): Started hex-7
vm-36 (ocf::heartbeat:Xen): Started hex-8
vm-37 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-38 (ocf::heartbeat:Xen): Started hex-0
vm-39 (ocf::heartbeat:Xen): Started hex-7
vm-40 (ocf::heartbeat:Xen): Started hex-8
vm-41 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-42 (ocf::heartbeat:Xen): Started hex-0
vm-43 (ocf::heartbeat:Xen): Started hex-7
vm-44 (ocf::heartbeat:Xen): Started hex-8
vm-45 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-46 (ocf::heartbeat:Xen): Started hex-0
vm-47 (ocf::heartbeat:Xen): Started hex-7
vm-48 (ocf::heartbeat:Xen): Started hex-8
vm-49 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-50 (ocf::heartbeat:Xen): Started hex-0
vm-51 (ocf::heartbeat:Xen): Started hex-7
vm-52 (ocf::heartbeat:Xen): Started hex-8
vm-53 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-54 (ocf::heartbeat:Xen): Started hex-0
vm-55 (ocf::heartbeat:Xen): Started hex-7
vm-56 (ocf::heartbeat:Xen): Started hex-8
vm-57 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-58 (ocf::heartbeat:Xen): Started hex-0
vm-59 (ocf::heartbeat:Xen): Started hex-7
vm-60 (ocf::heartbeat:Xen): Started hex-8
vm-61 (ocf::heartbeat:Xen): Started hex-9 (UNCLEAN)
vm-62 (ocf::heartbeat:Xen): Stopped
vm-63 (ocf::heartbeat:Xen): Stopped
vm-64 (ocf::heartbeat:Xen): Stopped
Transition Summary:
+ * Fence hex-9
* Move fencing-sbd (Started hex-9 -> hex-0)
* Move dummy1 (Started hex-9 -> hex-0)
* Stop dlm:3 (hex-9)
* Stop o2cb:3 (hex-9)
* Stop clvm:3 (hex-9)
* Stop cmirrord:3 (hex-9)
* Stop vg1:3 (hex-9)
* Stop ocfs2-1:3 (hex-9)
* Stop vm-03 (hex-9)
* Stop vm-06 (hex-9)
* Stop vm-09 (hex-9)
* Stop vm-13 (hex-9)
* Stop vm-17 (hex-9)
* Stop vm-21 (hex-9)
* Stop vm-25 (hex-9)
* Stop vm-29 (hex-9)
* Stop vm-33 (hex-9)
* Stop vm-37 (hex-9)
* Stop vm-41 (hex-9)
* Stop vm-45 (hex-9)
* Stop vm-49 (hex-9)
* Stop vm-53 (hex-9)
* Stop vm-57 (hex-9)
* Stop vm-61 (hex-9)
Executing cluster transition:
* Pseudo action: fencing-sbd_stop_0
* Resource action: dummy1 monitor=300000 on hex-8
* Resource action: dummy1 monitor=300000 on hex-7
* Fencing hex-9 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: load_stopped_hex-8
* Pseudo action: load_stopped_hex-7
* Pseudo action: load_stopped_hex-0
* Resource action: fencing-sbd start on hex-0
* Pseudo action: dummy1_stop_0
* Pseudo action: vm-03_stop_0
* Pseudo action: vm-06_stop_0
* Pseudo action: vm-09_stop_0
* Pseudo action: vm-13_stop_0
* Pseudo action: vm-17_stop_0
* Pseudo action: vm-21_stop_0
* Pseudo action: vm-25_stop_0
* Pseudo action: vm-29_stop_0
* Pseudo action: vm-33_stop_0
* Pseudo action: vm-37_stop_0
* Pseudo action: vm-41_stop_0
* Pseudo action: vm-45_stop_0
* Pseudo action: vm-49_stop_0
* Pseudo action: vm-53_stop_0
* Pseudo action: vm-57_stop_0
* Pseudo action: vm-61_stop_0
* Pseudo action: load_stopped_hex-9
* Resource action: dummy1 start on hex-0
* Pseudo action: base-clone_stop_0
* Resource action: dummy1 monitor=30000 on hex-0
* Pseudo action: base-group:3_stop_0
* Pseudo action: ocfs2-1:3_stop_0
* Pseudo action: vg1:3_stop_0
* Pseudo action: cmirrord:3_stop_0
* Pseudo action: clvm:3_stop_0
* Pseudo action: o2cb:3_stop_0
* Pseudo action: dlm:3_stop_0
* Pseudo action: all_stopped
* Pseudo action: base-group:3_stopped_0
* Pseudo action: base-clone_stopped_0
Revised cluster status:
Online: [ hex-0 hex-7 hex-8 ]
OFFLINE: [ hex-9 ]
vm-00 (ocf::heartbeat:Xen): Started hex-0
Clone Set: base-clone [base-group]
Started: [ hex-0 hex-7 hex-8 ]
Stopped: [ hex-9 ]
vm-01 (ocf::heartbeat:Xen): Started hex-7
vm-02 (ocf::heartbeat:Xen): Started hex-8
vm-03 (ocf::heartbeat:Xen): Stopped
vm-04 (ocf::heartbeat:Xen): Started hex-7
vm-05 (ocf::heartbeat:Xen): Started hex-8
fencing-sbd (stonith:external/sbd): Started hex-0
vm-06 (ocf::heartbeat:Xen): Stopped
vm-07 (ocf::heartbeat:Xen): Started hex-7
vm-08 (ocf::heartbeat:Xen): Started hex-8
vm-09 (ocf::heartbeat:Xen): Stopped
vm-10 (ocf::heartbeat:Xen): Started hex-0
vm-11 (ocf::heartbeat:Xen): Started hex-7
vm-12 (ocf::heartbeat:Xen): Started hex-8
vm-13 (ocf::heartbeat:Xen): Stopped
vm-14 (ocf::heartbeat:Xen): Started hex-0
vm-15 (ocf::heartbeat:Xen): Started hex-7
vm-16 (ocf::heartbeat:Xen): Started hex-8
vm-17 (ocf::heartbeat:Xen): Stopped
vm-18 (ocf::heartbeat:Xen): Started hex-0
vm-19 (ocf::heartbeat:Xen): Started hex-7
vm-20 (ocf::heartbeat:Xen): Started hex-8
vm-21 (ocf::heartbeat:Xen): Stopped
vm-22 (ocf::heartbeat:Xen): Started hex-0
vm-23 (ocf::heartbeat:Xen): Started hex-7
vm-24 (ocf::heartbeat:Xen): Started hex-8
vm-25 (ocf::heartbeat:Xen): Stopped
vm-26 (ocf::heartbeat:Xen): Started hex-0
vm-27 (ocf::heartbeat:Xen): Started hex-7
vm-28 (ocf::heartbeat:Xen): Started hex-8
vm-29 (ocf::heartbeat:Xen): Stopped
vm-30 (ocf::heartbeat:Xen): Started hex-0
vm-31 (ocf::heartbeat:Xen): Started hex-7
vm-32 (ocf::heartbeat:Xen): Started hex-8
dummy1 (ocf::heartbeat:Dummy): Started hex-0
vm-33 (ocf::heartbeat:Xen): Stopped
vm-34 (ocf::heartbeat:Xen): Started hex-0
vm-35 (ocf::heartbeat:Xen): Started hex-7
vm-36 (ocf::heartbeat:Xen): Started hex-8
vm-37 (ocf::heartbeat:Xen): Stopped
vm-38 (ocf::heartbeat:Xen): Started hex-0
vm-39 (ocf::heartbeat:Xen): Started hex-7
vm-40 (ocf::heartbeat:Xen): Started hex-8
vm-41 (ocf::heartbeat:Xen): Stopped
vm-42 (ocf::heartbeat:Xen): Started hex-0
vm-43 (ocf::heartbeat:Xen): Started hex-7
vm-44 (ocf::heartbeat:Xen): Started hex-8
vm-45 (ocf::heartbeat:Xen): Stopped
vm-46 (ocf::heartbeat:Xen): Started hex-0
vm-47 (ocf::heartbeat:Xen): Started hex-7
vm-48 (ocf::heartbeat:Xen): Started hex-8
vm-49 (ocf::heartbeat:Xen): Stopped
vm-50 (ocf::heartbeat:Xen): Started hex-0
vm-51 (ocf::heartbeat:Xen): Started hex-7
vm-52 (ocf::heartbeat:Xen): Started hex-8
vm-53 (ocf::heartbeat:Xen): Stopped
vm-54 (ocf::heartbeat:Xen): Started hex-0
vm-55 (ocf::heartbeat:Xen): Started hex-7
vm-56 (ocf::heartbeat:Xen): Started hex-8
vm-57 (ocf::heartbeat:Xen): Stopped
vm-58 (ocf::heartbeat:Xen): Started hex-0
vm-59 (ocf::heartbeat:Xen): Started hex-7
vm-60 (ocf::heartbeat:Xen): Started hex-8
vm-61 (ocf::heartbeat:Xen): Stopped
vm-62 (ocf::heartbeat:Xen): Stopped
vm-63 (ocf::heartbeat:Xen): Stopped
vm-64 (ocf::heartbeat:Xen): Stopped
diff --git a/pengine/test10/bug-lf-2606.summary b/pengine/test10/bug-lf-2606.summary
index 3e74d4bcf5..ab93bb35bc 100644
--- a/pengine/test10/bug-lf-2606.summary
+++ b/pengine/test10/bug-lf-2606.summary
@@ -1,44 +1,45 @@
1 of 5 resources DISABLED and 0 BLOCKED from being started due to failures
Current cluster status:
Node node2: UNCLEAN (online)
Online: [ node1 ]
rsc_stonith (stonith:null): Started node1
rsc1 (ocf::pacemaker:Dummy): FAILED node2 ( disabled )
rsc2 (ocf::pacemaker:Dummy): Started node2
Master/Slave Set: ms3 [rsc3]
Masters: [ node2 ]
Slaves: [ node1 ]
Transition Summary:
+ * Fence node2
* Stop rsc1 (node2)
* Move rsc2 (Started node2 -> node1)
* Demote rsc3:1 (Master -> Stopped node2)
Executing cluster transition:
* Pseudo action: ms3_demote_0
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc3:1_demote_0
* Pseudo action: ms3_demoted_0
* Pseudo action: ms3_stop_0
* Resource action: rsc2 start on node1
* Pseudo action: rsc3:1_stop_0
* Pseudo action: ms3_stopped_0
* Pseudo action: all_stopped
* Resource action: rsc2 monitor=10000 on node1
Revised cluster status:
Online: [ node1 ]
OFFLINE: [ node2 ]
rsc_stonith (stonith:null): Started node1
rsc1 (ocf::pacemaker:Dummy): Stopped ( disabled )
rsc2 (ocf::pacemaker:Dummy): Started node1
Master/Slave Set: ms3 [rsc3]
Slaves: [ node1 ]
Stopped: [ node2 ]
diff --git a/pengine/test10/bug-rh-1097457.summary b/pengine/test10/bug-rh-1097457.summary
index f8d2c1446d..8fe474a164 100644
--- a/pengine/test10/bug-rh-1097457.summary
+++ b/pengine/test10/bug-rh-1097457.summary
@@ -1,104 +1,105 @@
2 of 26 resources DISABLED and 0 BLOCKED from being started due to failures
Current cluster status:
Online: [ lama2 lama3 ]
Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ]
restofencelama2 (stonith:fence_ipmilan): Started lama3
restofencelama3 (stonith:fence_ipmilan): Started lama2
VM1 (ocf::heartbeat:VirtualDomain): Started lama2
FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1
FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1
VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3
VM3 (ocf::heartbeat:VirtualDomain): Started lama3
FSlun3 (ocf::heartbeat:Filesystem): FAILED lamaVM2
FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3
FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled )
FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled )
FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3
Resource Group: lamaVM1-G1
FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1
FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1
Resource Group: lamaVM1-G2
FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1
FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1
Resource Group: lamaVM1-G3
FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1
FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1
Resource Group: lamaVM2-G4
FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2
FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2
Clone Set: FAKE6-clone [FAKE6]
Started: [ lamaVM1 lamaVM2 lamaVM3 ]
Transition Summary:
+ * Fence lamaVM2 (resource: VM2)
* Recover VM2 (Started lama3)
* Recover FSlun3 (Started lamaVM2 -> lama2)
* Restart FAKE4 (Started lamaVM2)
* Restart FAKE4-IP (Started lamaVM2)
* Restart FAKE6:2 (Started lamaVM2)
* Restart lamaVM2 (Started lama3)
Executing cluster transition:
* Resource action: lamaVM2 stop on lama3
* Resource action: VM2 stop on lama3
* Pseudo action: stonith-lamaVM2-reboot on lamaVM2
* Pseudo action: stonith_complete
* Resource action: VM2 start on lama3
* Resource action: VM2 monitor=10000 on lama3
* Pseudo action: lamaVM2-G4_stop_0
* Pseudo action: FAKE4-IP_stop_0
* Pseudo action: FAKE6-clone_stop_0
* Resource action: lamaVM2 start on lama3
* Resource action: lamaVM2 monitor=30000 on lama3
* Resource action: FSlun3 monitor=10000 on lamaVM2
* Pseudo action: FAKE4_stop_0
* Pseudo action: FAKE6_stop_0
* Pseudo action: FAKE6-clone_stopped_0
* Pseudo action: FAKE6-clone_start_0
* Pseudo action: lamaVM2-G4_stopped_0
* Resource action: FAKE6 start on lamaVM2
* Resource action: FAKE6 monitor=30000 on lamaVM2
* Pseudo action: FAKE6-clone_running_0
* Pseudo action: FSlun3_stop_0
* Pseudo action: all_stopped
* Resource action: FSlun3 start on lama2
* Pseudo action: lamaVM2-G4_start_0
* Resource action: FAKE4 start on lamaVM2
* Resource action: FAKE4 monitor=30000 on lamaVM2
* Resource action: FAKE4-IP start on lamaVM2
* Resource action: FAKE4-IP monitor=30000 on lamaVM2
* Resource action: FSlun3 monitor=10000 on lama2
* Pseudo action: lamaVM2-G4_running_0
Revised cluster status:
Online: [ lama2 lama3 ]
Containers: [ lamaVM1:VM1 lamaVM2:VM2 lamaVM3:VM3 ]
restofencelama2 (stonith:fence_ipmilan): Started lama3
restofencelama3 (stonith:fence_ipmilan): Started lama2
VM1 (ocf::heartbeat:VirtualDomain): Started lama2
FSlun1 (ocf::heartbeat:Filesystem): Started lamaVM1
FSlun2 (ocf::heartbeat:Filesystem): Started lamaVM1
VM2 (ocf::heartbeat:VirtualDomain): FAILED lama3
VM3 (ocf::heartbeat:VirtualDomain): Started lama3
FSlun3 (ocf::heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ]
FSlun4 (ocf::heartbeat:Filesystem): Started lamaVM3
FAKE5-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled )
FAKE6-IP (ocf::heartbeat:IPaddr2): Stopped ( disabled )
FAKE5 (ocf::heartbeat:Dummy): Started lamaVM3
Resource Group: lamaVM1-G1
FAKE1 (ocf::heartbeat:Dummy): Started lamaVM1
FAKE1-IP (ocf::heartbeat:IPaddr2): Started lamaVM1
Resource Group: lamaVM1-G2
FAKE2 (ocf::heartbeat:Dummy): Started lamaVM1
FAKE2-IP (ocf::heartbeat:IPaddr2): Started lamaVM1
Resource Group: lamaVM1-G3
FAKE3 (ocf::heartbeat:Dummy): Started lamaVM1
FAKE3-IP (ocf::heartbeat:IPaddr2): Started lamaVM1
Resource Group: lamaVM2-G4
FAKE4 (ocf::heartbeat:Dummy): Started lamaVM2
FAKE4-IP (ocf::heartbeat:IPaddr2): Started lamaVM2
Clone Set: FAKE6-clone [FAKE6]
Started: [ lamaVM1 lamaVM2 lamaVM3 ]
diff --git a/pengine/test10/colocate-primitive-with-clone.summary b/pengine/test10/colocate-primitive-with-clone.summary
index 5e4c511a97..e0f685fb4f 100644
--- a/pengine/test10/colocate-primitive-with-clone.summary
+++ b/pengine/test10/colocate-primitive-with-clone.summary
@@ -1,125 +1,126 @@
Current cluster status:
Online: [ srv01 srv02 srv03 srv04 ]
Resource Group: UMgroup01
UmVIPcheck (ocf::heartbeat:Dummy): Stopped
UmIPaddr (ocf::heartbeat:Dummy): Stopped
UmDummy01 (ocf::heartbeat:Dummy): Stopped
UmDummy02 (ocf::heartbeat:Dummy): Stopped
Resource Group: OVDBgroup02-1
prmExPostgreSQLDB1 (ocf::heartbeat:Dummy): Started srv04
prmFsPostgreSQLDB1-1 (ocf::heartbeat:Dummy): Started srv04
prmFsPostgreSQLDB1-2 (ocf::heartbeat:Dummy): Started srv04
prmFsPostgreSQLDB1-3 (ocf::heartbeat:Dummy): Started srv04
prmIpPostgreSQLDB1 (ocf::heartbeat:Dummy): Started srv04
prmApPostgreSQLDB1 (ocf::heartbeat:Dummy): Started srv04
Resource Group: OVDBgroup02-2
prmExPostgreSQLDB2 (ocf::heartbeat:Dummy): Started srv02
prmFsPostgreSQLDB2-1 (ocf::heartbeat:Dummy): Started srv02
prmFsPostgreSQLDB2-2 (ocf::heartbeat:Dummy): Started srv02
prmFsPostgreSQLDB2-3 (ocf::heartbeat:Dummy): Started srv02
prmIpPostgreSQLDB2 (ocf::heartbeat:Dummy): Started srv02
prmApPostgreSQLDB2 (ocf::heartbeat:Dummy): Started srv02
Resource Group: OVDBgroup02-3
prmExPostgreSQLDB3 (ocf::heartbeat:Dummy): Started srv03
prmFsPostgreSQLDB3-1 (ocf::heartbeat:Dummy): Started srv03
prmFsPostgreSQLDB3-2 (ocf::heartbeat:Dummy): Started srv03
prmFsPostgreSQLDB3-3 (ocf::heartbeat:Dummy): Started srv03
prmIpPostgreSQLDB3 (ocf::heartbeat:Dummy): Started srv03
prmApPostgreSQLDB3 (ocf::heartbeat:Dummy): Started srv03
Resource Group: grpStonith1
prmStonithN1 (stonith:external/ssh): Started srv04
Resource Group: grpStonith2
prmStonithN2 (stonith:external/ssh): Started srv03
Resource Group: grpStonith3
prmStonithN3 (stonith:external/ssh): Started srv02
Resource Group: grpStonith4
prmStonithN4 (stonith:external/ssh): Started srv03
Clone Set: clnUMgroup01 [clnUmResource]
Started: [ srv04 ]
Stopped: [ srv01 srv02 srv03 ]
Clone Set: clnPingd [clnPrmPingd]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnDiskd1 [clnPrmDiskd1]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnG3dummy1 [clnG3dummy01]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnG3dummy2 [clnG3dummy02]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Transition Summary:
+ * Shutdown srv01
* Start UmVIPcheck (srv04)
* Start UmIPaddr (srv04)
* Start UmDummy01 (srv04)
* Start UmDummy02 (srv04)
Executing cluster transition:
* Pseudo action: UMgroup01_start_0
* Resource action: UmVIPcheck start on srv04
* Resource action: UmIPaddr start on srv04
* Resource action: UmDummy01 start on srv04
* Resource action: UmDummy02 start on srv04
* Cluster action: do_shutdown on srv01
* Pseudo action: UMgroup01_running_0
* Resource action: UmIPaddr monitor=10000 on srv04
* Resource action: UmDummy01 monitor=10000 on srv04
* Resource action: UmDummy02 monitor=10000 on srv04
Revised cluster status:
Online: [ srv01 srv02 srv03 srv04 ]
Resource Group: UMgroup01
UmVIPcheck (ocf::heartbeat:Dummy): Started srv04
UmIPaddr (ocf::heartbeat:Dummy): Started srv04
UmDummy01 (ocf::heartbeat:Dummy): Started srv04
UmDummy02 (ocf::heartbeat:Dummy): Started srv04
Resource Group: OVDBgroup02-1
prmExPostgreSQLDB1 (ocf::heartbeat:Dummy): Started srv04
prmFsPostgreSQLDB1-1 (ocf::heartbeat:Dummy): Started srv04
prmFsPostgreSQLDB1-2 (ocf::heartbeat:Dummy): Started srv04
prmFsPostgreSQLDB1-3 (ocf::heartbeat:Dummy): Started srv04
prmIpPostgreSQLDB1 (ocf::heartbeat:Dummy): Started srv04
prmApPostgreSQLDB1 (ocf::heartbeat:Dummy): Started srv04
Resource Group: OVDBgroup02-2
prmExPostgreSQLDB2 (ocf::heartbeat:Dummy): Started srv02
prmFsPostgreSQLDB2-1 (ocf::heartbeat:Dummy): Started srv02
prmFsPostgreSQLDB2-2 (ocf::heartbeat:Dummy): Started srv02
prmFsPostgreSQLDB2-3 (ocf::heartbeat:Dummy): Started srv02
prmIpPostgreSQLDB2 (ocf::heartbeat:Dummy): Started srv02
prmApPostgreSQLDB2 (ocf::heartbeat:Dummy): Started srv02
Resource Group: OVDBgroup02-3
prmExPostgreSQLDB3 (ocf::heartbeat:Dummy): Started srv03
prmFsPostgreSQLDB3-1 (ocf::heartbeat:Dummy): Started srv03
prmFsPostgreSQLDB3-2 (ocf::heartbeat:Dummy): Started srv03
prmFsPostgreSQLDB3-3 (ocf::heartbeat:Dummy): Started srv03
prmIpPostgreSQLDB3 (ocf::heartbeat:Dummy): Started srv03
prmApPostgreSQLDB3 (ocf::heartbeat:Dummy): Started srv03
Resource Group: grpStonith1
prmStonithN1 (stonith:external/ssh): Started srv04
Resource Group: grpStonith2
prmStonithN2 (stonith:external/ssh): Started srv03
Resource Group: grpStonith3
prmStonithN3 (stonith:external/ssh): Started srv02
Resource Group: grpStonith4
prmStonithN4 (stonith:external/ssh): Started srv03
Clone Set: clnUMgroup01 [clnUmResource]
Started: [ srv04 ]
Stopped: [ srv01 srv02 srv03 ]
Clone Set: clnPingd [clnPrmPingd]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnDiskd1 [clnPrmDiskd1]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnG3dummy1 [clnG3dummy01]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
Clone Set: clnG3dummy2 [clnG3dummy02]
Started: [ srv02 srv03 srv04 ]
Stopped: [ srv01 ]
diff --git a/pengine/test10/concurrent-fencing.summary b/pengine/test10/concurrent-fencing.summary
index 10b2fdef20..a274c3b614 100644
--- a/pengine/test10/concurrent-fencing.summary
+++ b/pengine/test10/concurrent-fencing.summary
@@ -1,24 +1,27 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (offline)
Node node2 (uuid2): UNCLEAN (offline)
Node node3 (uuid3): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
Transition Summary:
+ * Fence node3
+ * Fence node2
+ * Fence node1
Executing cluster transition:
* Fencing node3 (reboot)
* Fencing node1 (reboot)
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ node1 node2 node3 ]
stonith-1 (stonith:dummy): Stopped
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
diff --git a/pengine/test10/guest-node-host-dies.summary b/pengine/test10/guest-node-host-dies.summary
index c6180caae1..717c43d82b 100644
--- a/pengine/test10/guest-node-host-dies.summary
+++ b/pengine/test10/guest-node-host-dies.summary
@@ -1,79 +1,82 @@
Current cluster status:
Node rhel7-1 (1): UNCLEAN (offline)
Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
Fencing (stonith:fence_xvm): Started rhel7-4
rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-1 ( UNCLEAN )
container1 (ocf::heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN)
container2 (ocf::heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN)
Master/Slave Set: lxc-ms-master [lxc-ms]
Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
Transition Summary:
+ * Fence rhel7-1
+ * Fence lxc2 (resource: container2)
+ * Fence lxc1 (resource: container1)
* Restart Fencing (Started rhel7-4)
* Move rsc_rhel7-1 (Started rhel7-1 -> rhel7-5)
* Recover container1 (Started rhel7-1 -> rhel7-2)
* Recover container2 (Started rhel7-1 -> rhel7-3)
* Recover lxc-ms:0 (Master lxc1)
* Recover lxc-ms:1 (Slave lxc2)
* Move lxc1 (Started rhel7-1 -> rhel7-2)
* Move lxc2 (Started rhel7-1 -> rhel7-3)
Executing cluster transition:
* Resource action: Fencing stop on rhel7-4
* Pseudo action: lxc-ms-master_demote_0
* Resource action: lxc1 monitor on rhel7-5
* Resource action: lxc1 monitor on rhel7-4
* Resource action: lxc1 monitor on rhel7-3
* Resource action: lxc2 monitor on rhel7-5
* Resource action: lxc2 monitor on rhel7-4
* Resource action: lxc2 monitor on rhel7-2
* Fencing rhel7-1 (reboot)
* Pseudo action: rsc_rhel7-1_stop_0
* Pseudo action: lxc1_stop_0
* Pseudo action: lxc2_stop_0
* Pseudo action: container1_stop_0
* Pseudo action: container2_stop_0
* Pseudo action: stonith-lxc2-reboot on lxc2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Pseudo action: stonith_complete
* Resource action: rsc_rhel7-1 start on rhel7-5
* Resource action: container1 start on rhel7-2
* Resource action: container2 start on rhel7-3
* Pseudo action: lxc-ms_demote_0
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Resource action: lxc1 start on rhel7-2
* Resource action: lxc2 start on rhel7-3
* Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: lxc1 monitor=30000 on rhel7-2
* Resource action: lxc2 monitor=30000 on rhel7-3
* Pseudo action: all_stopped
* Resource action: Fencing start on rhel7-4
* Resource action: Fencing monitor=120000 on rhel7-4
* Resource action: lxc-ms start on lxc1
* Resource action: lxc-ms start on lxc2
* Pseudo action: lxc-ms-master_running_0
* Resource action: lxc-ms monitor=10000 on lxc2
* Pseudo action: lxc-ms-master_promote_0
* Resource action: lxc-ms promote on lxc1
* Pseudo action: lxc-ms-master_promoted_0
Revised cluster status:
Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
OFFLINE: [ rhel7-1 ]
Containers: [ lxc1:container1 lxc2:container2 ]
Fencing (stonith:fence_xvm): Started rhel7-4
rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Started rhel7-5
container1 (ocf::heartbeat:VirtualDomain): Started rhel7-2
container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3
Master/Slave Set: lxc-ms-master [lxc-ms]
Masters: [ lxc1 ]
Slaves: [ lxc2 ]
diff --git a/pengine/test10/inc12.summary b/pengine/test10/inc12.summary
index 3df5a59751..5068b7e95a 100644
--- a/pengine/test10/inc12.summary
+++ b/pengine/test10/inc12.summary
@@ -1,131 +1,137 @@
Current cluster status:
Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n05
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04
rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05
rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07
Clone Set: DoFencing [child_DoFencing]
Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ]
Stopped: [ c001n03 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave c001n04
ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave c001n04
ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave c001n05
ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave c001n05
ocf_msdummy:6 (ocf::heartbeat:Stateful): Slave c001n06
ocf_msdummy:7 (ocf::heartbeat:Stateful): Slave c001n06
ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave c001n07
ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave c001n07
ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave c001n02
ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave c001n02
Transition Summary:
+ * Shutdown c001n07
+ * Shutdown c001n06
+ * Shutdown c001n05
+ * Shutdown c001n04
+ * Shutdown c001n03
+ * Shutdown c001n02
* Stop ocf_192.168.100.181 (c001n02)
* Stop heartbeat_192.168.100.182 (c001n02)
* Stop ocf_192.168.100.183 (c001n02)
* Stop lsb_dummy (c001n04)
* Stop rsc_c001n03 (c001n05)
* Stop rsc_c001n02 (c001n02)
* Stop rsc_c001n04 (c001n04)
* Stop rsc_c001n05 (c001n05)
* Stop rsc_c001n06 (c001n06)
* Stop rsc_c001n07 (c001n07)
* Stop child_DoFencing:0 (c001n02)
* Stop child_DoFencing:1 (c001n04)
* Stop child_DoFencing:2 (c001n05)
* Stop child_DoFencing:3 (c001n06)
* Stop child_DoFencing:4 (c001n07)
* Stop ocf_msdummy:10 (c001n02)
* Stop ocf_msdummy:11 (c001n02)
* Stop ocf_msdummy:2 (c001n04)
* Stop ocf_msdummy:3 (c001n04)
* Stop ocf_msdummy:4 (c001n05)
* Stop ocf_msdummy:5 (c001n05)
* Stop ocf_msdummy:6 (c001n06)
* Stop ocf_msdummy:7 (c001n06)
* Stop ocf_msdummy:8 (c001n07)
* Stop ocf_msdummy:9 (c001n07)
Executing cluster transition:
* Pseudo action: group-1_stop_0
* Resource action: ocf_192.168.100.183 stop on c001n02
* Resource action: lsb_dummy stop on c001n04
* Resource action: rsc_c001n03 stop on c001n05
* Resource action: rsc_c001n02 stop on c001n02
* Resource action: rsc_c001n04 stop on c001n04
* Resource action: rsc_c001n05 stop on c001n05
* Resource action: rsc_c001n06 stop on c001n06
* Resource action: rsc_c001n07 stop on c001n07
* Pseudo action: DoFencing_stop_0
* Pseudo action: master_rsc_1_stop_0
* Resource action: heartbeat_192.168.100.182 stop on c001n02
* Resource action: child_DoFencing:1 stop on c001n02
* Resource action: child_DoFencing:2 stop on c001n04
* Resource action: child_DoFencing:3 stop on c001n05
* Resource action: child_DoFencing:4 stop on c001n06
* Resource action: child_DoFencing:5 stop on c001n07
* Pseudo action: DoFencing_stopped_0
* Resource action: ocf_msdummy:10 stop on c001n02
* Resource action: ocf_msdummy:11 stop on c001n02
* Resource action: ocf_msdummy:2 stop on c001n04
* Resource action: ocf_msdummy:3 stop on c001n04
* Resource action: ocf_msdummy:4 stop on c001n05
* Resource action: ocf_msdummy:5 stop on c001n05
* Resource action: ocf_msdummy:6 stop on c001n06
* Resource action: ocf_msdummy:7 stop on c001n06
* Resource action: ocf_msdummy:8 stop on c001n07
* Resource action: ocf_msdummy:9 stop on c001n07
* Pseudo action: master_rsc_1_stopped_0
* Cluster action: do_shutdown on c001n07
* Cluster action: do_shutdown on c001n06
* Cluster action: do_shutdown on c001n05
* Cluster action: do_shutdown on c001n04
* Resource action: ocf_192.168.100.181 stop on c001n02
* Cluster action: do_shutdown on c001n02
* Pseudo action: all_stopped
* Pseudo action: group-1_stopped_0
* Cluster action: do_shutdown on c001n03
Revised cluster status:
Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped
heartbeat_192.168.100.182 (heartbeat:IPaddr): Stopped
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped
Clone Set: DoFencing [child_DoFencing]
Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped
diff --git a/pengine/test10/interleave-pseudo-stop.summary b/pengine/test10/interleave-pseudo-stop.summary
index a682462837..cf30da07d7 100644
--- a/pengine/test10/interleave-pseudo-stop.summary
+++ b/pengine/test10/interleave-pseudo-stop.summary
@@ -1,82 +1,83 @@
Current cluster status:
Node node1 (f6d93040-a9ad-4745-a647-57ed32444ca8): UNCLEAN (offline)
Online: [ node2 ]
Clone Set: stonithcloneset [stonithclone]
stonithclone (stonith:external/ssh): Started node1 (UNCLEAN)
Started: [ node2 ]
Clone Set: evmscloneset [evmsclone]
evmsclone (ocf::heartbeat:EvmsSCC): Started node1 (UNCLEAN)
Started: [ node2 ]
Clone Set: imagestorecloneset [imagestoreclone]
imagestoreclone (ocf::heartbeat:Filesystem): Started node1 (UNCLEAN)
Started: [ node2 ]
Clone Set: configstorecloneset [configstoreclone]
configstoreclone (ocf::heartbeat:Filesystem): Started node1 (UNCLEAN)
Started: [ node2 ]
Transition Summary:
+ * Fence node1
* Stop stonithclone:1 (node1)
* Stop evmsclone:1 (node1)
* Stop imagestoreclone:1 (node1)
* Stop configstoreclone:1 (node1)
Executing cluster transition:
* Pseudo action: evmscloneset_pre_notify_stop_0
* Pseudo action: imagestorecloneset_pre_notify_stop_0
* Pseudo action: configstorecloneset_pre_notify_stop_0
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: stonithcloneset_stop_0
* Resource action: evmsclone:1 notify on node2
* Pseudo action: evmsclone:0_post_notify_stop_0
* Pseudo action: evmscloneset_confirmed-pre_notify_stop_0
* Resource action: imagestoreclone:1 notify on node2
* Pseudo action: imagestoreclone:0_post_notify_stop_0
* Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0
* Pseudo action: imagestorecloneset_stop_0
* Resource action: configstoreclone:1 notify on node2
* Pseudo action: configstoreclone:0_post_notify_stop_0
* Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0
* Pseudo action: configstorecloneset_stop_0
* Pseudo action: stonithclone:0_stop_0
* Pseudo action: stonithcloneset_stopped_0
* Pseudo action: imagestoreclone:0_stop_0
* Pseudo action: imagestorecloneset_stopped_0
* Pseudo action: configstoreclone:0_stop_0
* Pseudo action: configstorecloneset_stopped_0
* Pseudo action: imagestorecloneset_post_notify_stopped_0
* Pseudo action: configstorecloneset_post_notify_stopped_0
* Resource action: imagestoreclone:1 notify on node2
* Pseudo action: imagestoreclone:0_notified_0
* Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0
* Resource action: configstoreclone:1 notify on node2
* Pseudo action: configstoreclone:0_notified_0
* Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0
* Pseudo action: evmscloneset_stop_0
* Pseudo action: evmsclone:0_stop_0
* Pseudo action: evmscloneset_stopped_0
* Pseudo action: evmscloneset_post_notify_stopped_0
* Resource action: evmsclone:1 notify on node2
* Pseudo action: evmsclone:0_notified_0
* Pseudo action: evmscloneset_confirmed-post_notify_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
Clone Set: stonithcloneset [stonithclone]
Started: [ node2 ]
Stopped: [ node1 ]
Clone Set: evmscloneset [evmsclone]
Started: [ node2 ]
Stopped: [ node1 ]
Clone Set: imagestorecloneset [imagestoreclone]
Started: [ node2 ]
Stopped (disabled): [ node1 ]
Clone Set: configstorecloneset [configstoreclone]
Started: [ node2 ]
Stopped: [ node1 ]
diff --git a/pengine/test10/master-7.summary b/pengine/test10/master-7.summary
index 747254246b..348b4eef39 100644
--- a/pengine/test10/master-7.summary
+++ b/pengine/test10/master-7.summary
@@ -1,120 +1,121 @@
Current cluster status:
Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline)
Online: [ c001n02 c001n03 c001n08 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN)
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN)
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN)
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n02
child_DoFencing:3 (stonith:ssh): Started c001n08
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN)
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 ( UNCLEAN )
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
Transition Summary:
+ * Fence c001n01
* Move DcIPaddr (Started c001n01 -> c001n03)
* Move ocf_192.168.100.181 (Started c001n03 -> c001n02)
* Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02)
* Move ocf_192.168.100.183 (Started c001n03 -> c001n02)
* Move lsb_dummy (Started c001n02 -> c001n08)
* Move rsc_c001n01 (Started c001n01 -> c001n03)
* Stop child_DoFencing:0 (c001n01)
* Demote ocf_msdummy:0 (Master -> Stopped c001n01)
* Stop ocf_msdummy:4 (c001n01)
Executing cluster transition:
* Pseudo action: group-1_stop_0
* Resource action: ocf_192.168.100.183 stop on c001n03
* Resource action: lsb_dummy stop on c001n02
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n03
* Resource action: ocf_msdummy:4 monitor on c001n02
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n02
* Pseudo action: master_rsc_1_demote_0
* Fencing c001n01 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: DcIPaddr_stop_0
* Resource action: heartbeat_192.168.100.182 stop on c001n03
* Resource action: lsb_dummy start on c001n08
* Pseudo action: rsc_c001n01_stop_0
* Pseudo action: DoFencing_stop_0
* Pseudo action: ocf_msdummy:0_demote_0
* Pseudo action: master_rsc_1_demoted_0
* Pseudo action: master_rsc_1_stop_0
* Resource action: DcIPaddr start on c001n03
* Resource action: ocf_192.168.100.181 stop on c001n03
* Resource action: lsb_dummy monitor=5000 on c001n08
* Resource action: rsc_c001n01 start on c001n03
* Pseudo action: child_DoFencing:0_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: ocf_msdummy:0_stop_0
* Pseudo action: ocf_msdummy:4_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Pseudo action: all_stopped
* Resource action: DcIPaddr monitor=5000 on c001n03
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: ocf_192.168.100.181 start on c001n02
* Resource action: heartbeat_192.168.100.182 start on c001n02
* Resource action: ocf_192.168.100.183 start on c001n02
* Resource action: rsc_c001n01 monitor=5000 on c001n03
* Pseudo action: group-1_running_0
* Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
* Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
* Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
Revised cluster status:
Online: [ c001n02 c001n03 c001n08 ]
OFFLINE: [ c001n01 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Stopped
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n02
child_DoFencing:3 (stonith:ssh): Started c001n08
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
diff --git a/pengine/test10/master-8.summary b/pengine/test10/master-8.summary
index 450737b50e..b77c88465e 100644
--- a/pengine/test10/master-8.summary
+++ b/pengine/test10/master-8.summary
@@ -1,124 +1,125 @@
Current cluster status:
Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline)
Online: [ c001n02 c001n03 c001n08 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN)
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n03
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n03
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 (UNCLEAN)
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN)
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n02
child_DoFencing:3 (stonith:ssh): Started c001n08
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN)
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
Transition Summary:
+ * Fence c001n01
* Move DcIPaddr (Started c001n01 -> c001n03)
* Move ocf_192.168.100.181 (Started c001n03 -> c001n02)
* Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02)
* Move ocf_192.168.100.183 (Started c001n03 -> c001n02)
* Move lsb_dummy (Started c001n02 -> c001n08)
* Move rsc_c001n01 (Started c001n01 -> c001n03)
* Stop child_DoFencing:0 (c001n01)
* Demote ocf_msdummy:0 (Master -> Slave c001n01 - blocked)
* Move ocf_msdummy:0 (Slave c001n01 -> c001n03)
Executing cluster transition:
* Pseudo action: group-1_stop_0
* Resource action: ocf_192.168.100.183 stop on c001n03
* Resource action: lsb_dummy stop on c001n02
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n03
* Resource action: ocf_msdummy:4 monitor on c001n02
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n03
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n02
* Pseudo action: master_rsc_1_demote_0
* Fencing c001n01 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: DcIPaddr_stop_0
* Resource action: heartbeat_192.168.100.182 stop on c001n03
* Resource action: lsb_dummy start on c001n08
* Pseudo action: rsc_c001n01_stop_0
* Pseudo action: DoFencing_stop_0
* Pseudo action: ocf_msdummy:0_demote_0
* Pseudo action: master_rsc_1_demoted_0
* Pseudo action: master_rsc_1_stop_0
* Resource action: DcIPaddr start on c001n03
* Resource action: ocf_192.168.100.181 stop on c001n03
* Resource action: lsb_dummy monitor=5000 on c001n08
* Resource action: rsc_c001n01 start on c001n03
* Pseudo action: child_DoFencing:0_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: ocf_msdummy:0_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Pseudo action: master_rsc_1_start_0
* Pseudo action: all_stopped
* Resource action: DcIPaddr monitor=5000 on c001n03
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: ocf_192.168.100.181 start on c001n02
* Resource action: heartbeat_192.168.100.182 start on c001n02
* Resource action: ocf_192.168.100.183 start on c001n02
* Resource action: rsc_c001n01 monitor=5000 on c001n03
* Resource action: ocf_msdummy:0 start on c001n03
* Pseudo action: master_rsc_1_running_0
* Pseudo action: group-1_running_0
* Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
* Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
* Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
* Resource action: ocf_msdummy:0 monitor=5000 on c001n03
Revised cluster status:
Online: [ c001n02 c001n03 c001n08 ]
OFFLINE: [ c001n01 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Stopped
child_DoFencing:1 (stonith:ssh): Started c001n03
child_DoFencing:2 (stonith:ssh): Started c001n02
child_DoFencing:3 (stonith:ssh): Started c001n08
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
diff --git a/pengine/test10/master-9.summary b/pengine/test10/master-9.summary
index 4f57a79bdc..c1cd4a1cbf 100644
--- a/pengine/test10/master-9.summary
+++ b/pengine/test10/master-9.summary
@@ -1,99 +1,100 @@
Current cluster status:
Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline)
Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline)
Online: [ ibm1 va1 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_127.0.0.11 (ocf::heartbeat:IPaddr): Stopped
heartbeat_127.0.0.12 (heartbeat:IPaddr): Stopped
ocf_127.0.0.13 (ocf::heartbeat:IPaddr): Stopped
lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped
rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped
rsc_ibm1 (ocf::heartbeat:IPaddr): Stopped
rsc_va1 (ocf::heartbeat:IPaddr): Stopped
rsc_test02 (ocf::heartbeat:IPaddr): Stopped
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started va1
child_DoFencing:1 (stonith:ssh): Started ibm1
child_DoFencing:2 (stonith:ssh): Stopped
child_DoFencing:3 (stonith:ssh): Stopped
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:1 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:2 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:3 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:4 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:5 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:6 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:7 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
Transition Summary:
+ * Shutdown ibm1
* Start DcIPaddr (va1 - blocked)
* Start ocf_127.0.0.11 (va1 - blocked)
* Start heartbeat_127.0.0.12 (va1 - blocked)
* Start ocf_127.0.0.13 (va1 - blocked)
* Start lsb_dummy (va1 - blocked)
* Start rsc_sgi2 (va1 - blocked)
* Start rsc_ibm1 (va1 - blocked)
* Start rsc_va1 (va1 - blocked)
* Start rsc_test02 (va1 - blocked)
* Stop child_DoFencing:1 (ibm1)
* Start ocf_msdummy:0 (va1 - blocked)
* Start ocf_msdummy:1 (va1 - blocked)
Executing cluster transition:
* Resource action: child_DoFencing:1 monitor on va1
* Resource action: child_DoFencing:2 monitor on va1
* Resource action: child_DoFencing:2 monitor on ibm1
* Resource action: child_DoFencing:3 monitor on va1
* Resource action: child_DoFencing:3 monitor on ibm1
* Pseudo action: DoFencing_stop_0
* Resource action: ocf_msdummy:2 monitor on va1
* Resource action: ocf_msdummy:2 monitor on ibm1
* Resource action: ocf_msdummy:3 monitor on va1
* Resource action: ocf_msdummy:3 monitor on ibm1
* Resource action: ocf_msdummy:4 monitor on va1
* Resource action: ocf_msdummy:4 monitor on ibm1
* Resource action: ocf_msdummy:5 monitor on va1
* Resource action: ocf_msdummy:5 monitor on ibm1
* Resource action: ocf_msdummy:6 monitor on va1
* Resource action: ocf_msdummy:6 monitor on ibm1
* Resource action: ocf_msdummy:7 monitor on va1
* Resource action: ocf_msdummy:7 monitor on ibm1
* Resource action: child_DoFencing:1 stop on ibm1
* Pseudo action: DoFencing_stopped_0
* Cluster action: do_shutdown on ibm1
* Pseudo action: all_stopped
Revised cluster status:
Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline)
Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline)
Online: [ ibm1 va1 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_127.0.0.11 (ocf::heartbeat:IPaddr): Stopped
heartbeat_127.0.0.12 (heartbeat:IPaddr): Stopped
ocf_127.0.0.13 (ocf::heartbeat:IPaddr): Stopped
lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped
rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped
rsc_ibm1 (ocf::heartbeat:IPaddr): Stopped
rsc_va1 (ocf::heartbeat:IPaddr): Stopped
rsc_test02 (ocf::heartbeat:IPaddr): Stopped
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started va1
child_DoFencing:1 (stonith:ssh): Stopped
child_DoFencing:2 (stonith:ssh): Stopped
child_DoFencing:3 (stonith:ssh): Stopped
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:1 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:2 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:3 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:4 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:5 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:6 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:7 (ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
diff --git a/pengine/test10/migrate-fencing.summary b/pengine/test10/migrate-fencing.summary
index e8bcd452ce..831e49acb2 100644
--- a/pengine/test10/migrate-fencing.summary
+++ b/pengine/test10/migrate-fencing.summary
@@ -1,107 +1,108 @@
Current cluster status:
Node pcmk-4: UNCLEAN (online)
Online: [ pcmk-1 pcmk-2 pcmk-3 ]
Clone Set: Fencing [FencingChild]
Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Resource Group: group-1
r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-4
r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-4
r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-4
rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1
rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2
rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3
rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4
lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-4
migrator (ocf::pacemaker:Dummy): Started pcmk-1
Clone Set: Connectivity [ping-1]
Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Master/Slave Set: master-1 [stateful-1]
Masters: [ pcmk-4 ]
Slaves: [ pcmk-1 pcmk-2 pcmk-3 ]
Transition Summary:
+ * Fence pcmk-4
* Stop FencingChild:0 (pcmk-4)
* Move r192.168.101.181 (Started pcmk-4 -> pcmk-1)
* Move r192.168.101.182 (Started pcmk-4 -> pcmk-1)
* Move r192.168.101.183 (Started pcmk-4 -> pcmk-1)
* Move rsc_pcmk-4 (Started pcmk-4 -> pcmk-2)
* Move lsb-dummy (Started pcmk-4 -> pcmk-1)
* Migrate migrator (Started pcmk-1 -> pcmk-3)
* Stop ping-1:0 (pcmk-4)
* Demote stateful-1:0 (Master -> Stopped pcmk-4)
* Promote stateful-1:1 (Slave -> Master pcmk-1)
Executing cluster transition:
* Resource action: stateful-1:3 monitor=15000 on pcmk-3
* Resource action: stateful-1:2 monitor=15000 on pcmk-2
* Fencing pcmk-4 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: Fencing_stop_0
* Pseudo action: rsc_pcmk-4_stop_0
* Pseudo action: lsb-dummy_stop_0
* Resource action: migrator migrate_to on pcmk-1
* Pseudo action: Connectivity_stop_0
* Pseudo action: FencingChild:0_stop_0
* Pseudo action: Fencing_stopped_0
* Pseudo action: group-1_stop_0
* Pseudo action: r192.168.101.183_stop_0
* Resource action: rsc_pcmk-4 start on pcmk-2
* Resource action: migrator migrate_from on pcmk-3
* Resource action: migrator stop on pcmk-1
* Pseudo action: ping-1:0_stop_0
* Pseudo action: Connectivity_stopped_0
* Pseudo action: r192.168.101.182_stop_0
* Resource action: rsc_pcmk-4 monitor=5000 on pcmk-2
* Pseudo action: migrator_start_0
* Pseudo action: r192.168.101.181_stop_0
* Resource action: migrator monitor=10000 on pcmk-3
* Pseudo action: group-1_stopped_0
* Pseudo action: master-1_demote_0
* Pseudo action: stateful-1:0_demote_0
* Pseudo action: master-1_demoted_0
* Pseudo action: master-1_stop_0
* Pseudo action: stateful-1:0_stop_0
* Pseudo action: master-1_stopped_0
* Pseudo action: all_stopped
* Pseudo action: master-1_promote_0
* Resource action: stateful-1:1 promote on pcmk-1
* Pseudo action: master-1_promoted_0
* Pseudo action: group-1_start_0
* Resource action: r192.168.101.181 start on pcmk-1
* Resource action: r192.168.101.182 start on pcmk-1
* Resource action: r192.168.101.183 start on pcmk-1
* Resource action: stateful-1:1 monitor=16000 on pcmk-1
* Pseudo action: group-1_running_0
* Resource action: r192.168.101.181 monitor=5000 on pcmk-1
* Resource action: r192.168.101.182 monitor=5000 on pcmk-1
* Resource action: r192.168.101.183 monitor=5000 on pcmk-1
* Resource action: lsb-dummy start on pcmk-1
* Resource action: lsb-dummy monitor=5000 on pcmk-1
Revised cluster status:
Online: [ pcmk-1 pcmk-2 pcmk-3 ]
OFFLINE: [ pcmk-4 ]
Clone Set: Fencing [FencingChild]
Started: [ pcmk-1 pcmk-2 pcmk-3 ]
Stopped: [ pcmk-4 ]
Resource Group: group-1
r192.168.101.181 (ocf::heartbeat:IPaddr): Started pcmk-1
r192.168.101.182 (ocf::heartbeat:IPaddr): Started pcmk-1
r192.168.101.183 (ocf::heartbeat:IPaddr): Started pcmk-1
rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1
rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2
rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3
rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-2
lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1
migrator (ocf::pacemaker:Dummy): Started pcmk-3
Clone Set: Connectivity [ping-1]
Started: [ pcmk-1 pcmk-2 pcmk-3 ]
Stopped: [ pcmk-4 ]
Master/Slave Set: master-1 [stateful-1]
Masters: [ pcmk-1 ]
Slaves: [ pcmk-2 pcmk-3 ]
Stopped: [ pcmk-4 ]
diff --git a/pengine/test10/migrate-shutdown.summary b/pengine/test10/migrate-shutdown.summary
index e634a5c373..630d58da3c 100644
--- a/pengine/test10/migrate-shutdown.summary
+++ b/pengine/test10/migrate-shutdown.summary
@@ -1,91 +1,95 @@
Current cluster status:
Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Fencing (stonith:fence_xvm): Started pcmk-1
Resource Group: group-1
r192.168.122.105 (ocf::heartbeat:IPaddr): Started pcmk-2
r192.168.122.106 (ocf::heartbeat:IPaddr): Started pcmk-2
r192.168.122.107 (ocf::heartbeat:IPaddr): Started pcmk-2
rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1
rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2
rsc_pcmk-3 (ocf::heartbeat:IPaddr): Stopped
rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4
lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2
migrator (ocf::pacemaker:Dummy): Started pcmk-1
Clone Set: Connectivity [ping-1]
Started: [ pcmk-1 pcmk-2 pcmk-4 ]
Stopped: [ pcmk-3 ]
Master/Slave Set: master-1 [stateful-1]
Masters: [ pcmk-2 ]
Slaves: [ pcmk-1 pcmk-4 ]
Stopped: [ pcmk-3 ]
Transition Summary:
+ * Shutdown pcmk-4
+ * Shutdown pcmk-3
+ * Shutdown pcmk-2
+ * Shutdown pcmk-1
* Stop Fencing (pcmk-1)
* Stop r192.168.122.105 (pcmk-2)
* Stop r192.168.122.106 (pcmk-2)
* Stop r192.168.122.107 (pcmk-2)
* Stop rsc_pcmk-1 (pcmk-1)
* Stop rsc_pcmk-2 (pcmk-2)
* Stop rsc_pcmk-4 (pcmk-4)
* Stop lsb-dummy (pcmk-2)
* Stop migrator (pcmk-1)
* Stop ping-1:0 (pcmk-1)
* Stop ping-1:1 (pcmk-2)
* Stop ping-1:2 (pcmk-4)
* Stop stateful-1:0 (pcmk-1)
* Demote stateful-1:1 (Master -> Stopped pcmk-2)
* Stop stateful-1:2 (pcmk-4)
Executing cluster transition:
* Resource action: Fencing stop on pcmk-1
* Resource action: rsc_pcmk-1 stop on pcmk-1
* Resource action: rsc_pcmk-2 stop on pcmk-2
* Resource action: rsc_pcmk-4 stop on pcmk-4
* Resource action: lsb-dummy stop on pcmk-2
* Resource action: migrator stop on pcmk-1
* Resource action: migrator stop on pcmk-3
* Pseudo action: Connectivity_stop_0
* Cluster action: do_shutdown on pcmk-3
* Pseudo action: group-1_stop_0
* Resource action: r192.168.122.107 stop on pcmk-2
* Resource action: ping-1:0 stop on pcmk-1
* Resource action: ping-1:1 stop on pcmk-2
* Resource action: ping-1:3 stop on pcmk-4
* Pseudo action: Connectivity_stopped_0
* Resource action: r192.168.122.106 stop on pcmk-2
* Resource action: r192.168.122.105 stop on pcmk-2
* Pseudo action: group-1_stopped_0
* Pseudo action: master-1_demote_0
* Resource action: stateful-1:0 demote on pcmk-2
* Pseudo action: master-1_demoted_0
* Pseudo action: master-1_stop_0
* Resource action: stateful-1:2 stop on pcmk-1
* Resource action: stateful-1:0 stop on pcmk-2
* Resource action: stateful-1:3 stop on pcmk-4
* Pseudo action: master-1_stopped_0
* Cluster action: do_shutdown on pcmk-4
* Cluster action: do_shutdown on pcmk-2
* Cluster action: do_shutdown on pcmk-1
* Pseudo action: all_stopped
Revised cluster status:
Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Fencing (stonith:fence_xvm): Stopped
Resource Group: group-1
r192.168.122.105 (ocf::heartbeat:IPaddr): Stopped
r192.168.122.106 (ocf::heartbeat:IPaddr): Stopped
r192.168.122.107 (ocf::heartbeat:IPaddr): Stopped
rsc_pcmk-1 (ocf::heartbeat:IPaddr): Stopped
rsc_pcmk-2 (ocf::heartbeat:IPaddr): Stopped
rsc_pcmk-3 (ocf::heartbeat:IPaddr): Stopped
rsc_pcmk-4 (ocf::heartbeat:IPaddr): Stopped
lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped
migrator (ocf::pacemaker:Dummy): Stopped
Clone Set: Connectivity [ping-1]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Master/Slave Set: master-1 [stateful-1]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
diff --git a/pengine/test10/novell-239082.summary b/pengine/test10/novell-239082.summary
index b2c28caf1a..b596de485b 100644
--- a/pengine/test10/novell-239082.summary
+++ b/pengine/test10/novell-239082.summary
@@ -1,58 +1,59 @@
Current cluster status:
Online: [ xen-1 xen-2 ]
fs_1 (ocf::heartbeat:Filesystem): Started xen-1
Master/Slave Set: ms-drbd0 [drbd0]
Masters: [ xen-1 ]
Slaves: [ xen-2 ]
Transition Summary:
+ * Shutdown xen-1
* Move fs_1 (Started xen-1 -> xen-2)
* Promote drbd0:0 (Slave -> Master xen-2)
* Demote drbd0:1 (Master -> Stopped xen-1)
Executing cluster transition:
* Resource action: fs_1 stop on xen-1
* Pseudo action: ms-drbd0_pre_notify_demote_0
* Resource action: drbd0:0 notify on xen-2
* Resource action: drbd0:1 notify on xen-1
* Pseudo action: ms-drbd0_confirmed-pre_notify_demote_0
* Pseudo action: ms-drbd0_demote_0
* Resource action: drbd0:1 demote on xen-1
* Pseudo action: ms-drbd0_demoted_0
* Pseudo action: ms-drbd0_post_notify_demoted_0
* Resource action: drbd0:0 notify on xen-2
* Resource action: drbd0:1 notify on xen-1
* Pseudo action: ms-drbd0_confirmed-post_notify_demoted_0
* Pseudo action: ms-drbd0_pre_notify_stop_0
* Resource action: drbd0:0 notify on xen-2
* Resource action: drbd0:1 notify on xen-1
* Pseudo action: ms-drbd0_confirmed-pre_notify_stop_0
* Pseudo action: ms-drbd0_stop_0
* Resource action: drbd0:1 stop on xen-1
* Pseudo action: ms-drbd0_stopped_0
* Cluster action: do_shutdown on xen-1
* Pseudo action: ms-drbd0_post_notify_stopped_0
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_stopped_0
* Pseudo action: all_stopped
* Pseudo action: ms-drbd0_pre_notify_promote_0
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-pre_notify_promote_0
* Pseudo action: ms-drbd0_promote_0
* Resource action: drbd0:0 promote on xen-2
* Pseudo action: ms-drbd0_promoted_0
* Pseudo action: ms-drbd0_post_notify_promoted_0
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_promoted_0
* Resource action: fs_1 start on xen-2
Revised cluster status:
Online: [ xen-1 xen-2 ]
fs_1 (ocf::heartbeat:Filesystem): Started xen-2
Master/Slave Set: ms-drbd0 [drbd0]
Masters: [ xen-2 ]
Stopped: [ xen-1 ]
diff --git a/pengine/test10/novell-252693.summary b/pengine/test10/novell-252693.summary
index 47bad6f25a..23f0632d43 100644
--- a/pengine/test10/novell-252693.summary
+++ b/pengine/test10/novell-252693.summary
@@ -1,89 +1,90 @@
Current cluster status:
Online: [ node1 node2 ]
Clone Set: stonithcloneset [stonithclone]
Started: [ node1 node2 ]
Clone Set: evmsdcloneset [evmsdclone]
Started: [ node1 node2 ]
Clone Set: evmscloneset [evmsclone]
Started: [ node1 node2 ]
Clone Set: imagestorecloneset [imagestoreclone]
Started: [ node1 node2 ]
Clone Set: configstorecloneset [configstoreclone]
Started: [ node1 node2 ]
sles10 (ocf::heartbeat:Xen): Started node1
Transition Summary:
+ * Shutdown node1
* Stop stonithclone:1 (node1)
* Stop evmsdclone:1 (node1)
* Stop evmsclone:1 (node1)
* Stop imagestoreclone:1 (node1)
* Stop configstoreclone:1 (node1)
* Migrate sles10 (Started node1 -> node2)
Executing cluster transition:
* Pseudo action: stonithcloneset_stop_0
* Pseudo action: evmscloneset_pre_notify_stop_0
* Pseudo action: imagestorecloneset_pre_notify_stop_0
* Pseudo action: configstorecloneset_pre_notify_stop_0
* Resource action: sles10 migrate_to on node1
* Resource action: stonithclone:1 stop on node1
* Pseudo action: stonithcloneset_stopped_0
* Resource action: evmsclone:0 notify on node2
* Resource action: evmsclone:1 notify on node1
* Pseudo action: evmscloneset_confirmed-pre_notify_stop_0
* Resource action: imagestoreclone:0 notify on node2
* Resource action: imagestoreclone:0 notify on node1
* Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0
* Pseudo action: imagestorecloneset_stop_0
* Resource action: configstoreclone:0 notify on node2
* Resource action: configstoreclone:0 notify on node1
* Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0
* Pseudo action: configstorecloneset_stop_0
* Resource action: sles10 migrate_from on node2
* Resource action: sles10 stop on node1
* Resource action: imagestoreclone:0 stop on node1
* Pseudo action: imagestorecloneset_stopped_0
* Resource action: configstoreclone:0 stop on node1
* Pseudo action: configstorecloneset_stopped_0
* Pseudo action: sles10_start_0
* Pseudo action: imagestorecloneset_post_notify_stopped_0
* Pseudo action: configstorecloneset_post_notify_stopped_0
* Resource action: sles10 monitor=10000 on node2
* Resource action: imagestoreclone:0 notify on node2
* Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0
* Resource action: configstoreclone:0 notify on node2
* Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0
* Pseudo action: evmscloneset_stop_0
* Resource action: evmsclone:1 stop on node1
* Pseudo action: evmscloneset_stopped_0
* Pseudo action: evmscloneset_post_notify_stopped_0
* Resource action: evmsclone:0 notify on node2
* Pseudo action: evmscloneset_confirmed-post_notify_stopped_0
* Pseudo action: evmsdcloneset_stop_0
* Resource action: evmsdclone:1 stop on node1
* Pseudo action: evmsdcloneset_stopped_0
* Cluster action: do_shutdown on node1
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node1 node2 ]
Clone Set: stonithcloneset [stonithclone]
Started: [ node2 ]
Stopped: [ node1 ]
Clone Set: evmsdcloneset [evmsdclone]
Started: [ node2 ]
Stopped: [ node1 ]
Clone Set: evmscloneset [evmsclone]
Started: [ node2 ]
Stopped: [ node1 ]
Clone Set: imagestorecloneset [imagestoreclone]
Started: [ node2 ]
Stopped: [ node1 ]
Clone Set: configstorecloneset [configstoreclone]
Started: [ node2 ]
Stopped: [ node1 ]
sles10 (ocf::heartbeat:Xen): Started node2
diff --git a/pengine/test10/params-2.summary b/pengine/test10/params-2.summary
index 891ea33a5b..f4169f2d80 100644
--- a/pengine/test10/params-2.summary
+++ b/pengine/test10/params-2.summary
@@ -1,36 +1,37 @@
Current cluster status:
Online: [ node1 node2 node3 ]
rsc1 (lsb:apache): Started node1
rsc2 (lsb:apache): Started node2
rsc3 (lsb:apache): Stopped
Transition Summary:
+ * Shutdown node1
* Stop rsc1 (node1)
* Restart rsc2 (Started node2)
* Start rsc3 (node3)
Executing cluster transition:
* Resource action: rsc1 monitor on node3
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node3
* Resource action: rsc2 monitor on node1
* Resource action: rsc2 stop on node2
* Resource action: rsc3 monitor on node2
* Resource action: rsc3 monitor on node1
* Resource action: rsc3 delete on node3
* Resource action: rsc1 stop on node1
* Resource action: rsc2 delete on node2
* Resource action: rsc3 start on node3
* Cluster action: do_shutdown on node1
* Pseudo action: all_stopped
* Resource action: rsc2 start on node2
Revised cluster status:
Online: [ node1 node2 node3 ]
rsc1 (lsb:apache): Stopped
rsc2 (lsb:apache): Started node2
rsc3 (lsb:apache): Started node3
diff --git a/pengine/test10/per-op-failcount.summary b/pengine/test10/per-op-failcount.summary
index 81d761b4cc..f050d59243 100644
--- a/pengine/test10/per-op-failcount.summary
+++ b/pengine/test10/per-op-failcount.summary
@@ -1,33 +1,34 @@
Using the original execution date of: 2017-04-06 09:04:22Z
Current cluster status:
Node rh73-01-snmp (3232238265): UNCLEAN (online)
Online: [ rh73-02-snmp ]
prmDummy (ocf::pacemaker:Dummy): FAILED rh73-01-snmp
prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp
prmStonith2-1 (stonith:external/ssh): Started rh73-01-snmp
Transition Summary:
+ * Fence rh73-01-snmp
* Recover prmDummy (Started rh73-01-snmp -> rh73-02-snmp)
* Move prmStonith2-1 (Started rh73-01-snmp -> rh73-02-snmp)
Executing cluster transition:
* Pseudo action: prmStonith2-1_stop_0
* Fencing rh73-01-snmp (reboot)
* Pseudo action: stonith_complete
* Pseudo action: prmDummy_stop_0
* Resource action: prmStonith2-1 start on rh73-02-snmp
* Pseudo action: all_stopped
* Resource action: prmDummy start on rh73-02-snmp
* Resource action: prmDummy monitor=10000 on rh73-02-snmp
Using the original execution date of: 2017-04-06 09:04:22Z
Revised cluster status:
Online: [ rh73-02-snmp ]
OFFLINE: [ rh73-01-snmp ]
prmDummy (ocf::pacemaker:Dummy): Started rh73-02-snmp
prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp
prmStonith2-1 (stonith:external/ssh): Started rh73-02-snmp
diff --git a/pengine/test10/rec-node-11.summary b/pengine/test10/rec-node-11.summary
index 1ed0eab9fc..eb967ddedf 100644
--- a/pengine/test10/rec-node-11.summary
+++ b/pengine/test10/rec-node-11.summary
@@ -1,46 +1,47 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (online)
Online: [ node2 ]
stonith-1 (stonith:dummy): Stopped
Resource Group: group1
rsc1 (heartbeat:apache): Started node1
rsc2 (heartbeat:apache): Started node1
rsc3 (heartbeat:apache): Started node2
Transition Summary:
+ * Fence node1
* Start stonith-1 (node2)
* Move rsc1 (Started node1 -> node2)
* Move rsc2 (Started node1 -> node2)
* Restart rsc3 (Started node2)
Executing cluster transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: group1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc1_stop_0
* Pseudo action: group1_stopped_0
* Resource action: rsc3 stop on node2
* Pseudo action: all_stopped
* Resource action: stonith-1 start on node2
* Resource action: rsc3 start on node2
* Pseudo action: group1_start_0
* Resource action: rsc1 start on node2
* Resource action: rsc2 start on node2
* Pseudo action: group1_running_0
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
stonith-1 (stonith:dummy): Started node2
Resource Group: group1
rsc1 (heartbeat:apache): Started node2
rsc2 (heartbeat:apache): Started node2
rsc3 (heartbeat:apache): Started node2
diff --git a/pengine/test10/rec-node-12.summary b/pengine/test10/rec-node-12.summary
index 82a03ace55..26f9b1309d 100644
--- a/pengine/test10/rec-node-12.summary
+++ b/pengine/test10/rec-node-12.summary
@@ -1,91 +1,92 @@
Current cluster status:
Node c001n02 (e9bdfde9-01b0-421f-acd8-8a65a53e775f): UNCLEAN (offline)
Online: [ c001n01 c001n03 c001n08 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Stopped
child_DoFencing:1 (stonith:ssh): Stopped
child_DoFencing:2 (stonith:ssh): Stopped
child_DoFencing:3 (stonith:ssh): Stopped
Transition Summary:
+ * Fence c001n02
* Start DcIPaddr (c001n08)
* Start rsc_c001n08 (c001n08)
* Start rsc_c001n02 (c001n01)
* Start rsc_c001n03 (c001n03)
* Start rsc_c001n01 (c001n01)
* Start child_DoFencing:0 (c001n03)
* Start child_DoFencing:1 (c001n01)
* Start child_DoFencing:2 (c001n08)
Executing cluster transition:
* Resource action: DcIPaddr monitor on c001n08
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n01
* Resource action: rsc_c001n08 monitor on c001n08
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n03 monitor on c001n08
* Resource action: rsc_c001n03 monitor on c001n03
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n01
* Resource action: child_DoFencing:0 monitor on c001n08
* Resource action: child_DoFencing:0 monitor on c001n03
* Resource action: child_DoFencing:0 monitor on c001n01
* Resource action: child_DoFencing:1 monitor on c001n08
* Resource action: child_DoFencing:1 monitor on c001n03
* Resource action: child_DoFencing:1 monitor on c001n01
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n01
* Resource action: child_DoFencing:3 monitor on c001n08
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n01
* Pseudo action: DoFencing_start_0
* Fencing c001n02 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: DcIPaddr start on c001n08
* Resource action: rsc_c001n08 start on c001n08
* Resource action: rsc_c001n02 start on c001n01
* Resource action: rsc_c001n03 start on c001n03
* Resource action: rsc_c001n01 start on c001n01
* Resource action: child_DoFencing:0 start on c001n03
* Resource action: child_DoFencing:1 start on c001n01
* Resource action: child_DoFencing:2 start on c001n08
* Pseudo action: DoFencing_running_0
* Resource action: DcIPaddr monitor=5000 on c001n08
* Resource action: rsc_c001n08 monitor=5000 on c001n08
* Resource action: rsc_c001n02 monitor=5000 on c001n01
* Resource action: rsc_c001n03 monitor=5000 on c001n03
* Resource action: rsc_c001n01 monitor=5000 on c001n01
* Resource action: child_DoFencing:0 monitor=5000 on c001n03
* Resource action: child_DoFencing:1 monitor=5000 on c001n01
* Resource action: child_DoFencing:2 monitor=5000 on c001n08
Revised cluster status:
Online: [ c001n01 c001n03 c001n08 ]
OFFLINE: [ c001n02 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n01
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n03
child_DoFencing:1 (stonith:ssh): Started c001n01
child_DoFencing:2 (stonith:ssh): Started c001n08
child_DoFencing:3 (stonith:ssh): Stopped
diff --git a/pengine/test10/rec-node-13.summary b/pengine/test10/rec-node-13.summary
index 7c6f52b0b1..e273a79769 100644
--- a/pengine/test10/rec-node-13.summary
+++ b/pengine/test10/rec-node-13.summary
@@ -1,79 +1,80 @@
Current cluster status:
Node c001n04 (9e080e6d-7a25-4dac-be89-f6f4f128623d): UNCLEAN (online)
Online: [ c001n02 c001n06 c001n07 ]
OFFLINE: [ c001n03 c001n05 ]
Clone Set: DoFencing [child_DoFencing]
Started: [ c001n02 c001n06 c001n07 ]
Stopped: [ c001n03 c001n04 c001n05 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06
rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
Transition Summary:
+ * Fence c001n04
* Stop ocf_msdummy:6 (c001n04)
Executing cluster transition:
* Fencing c001n04 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: master_rsc_1_stop_0
* Pseudo action: ocf_msdummy:6_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ c001n02 c001n06 c001n07 ]
OFFLINE: [ c001n03 c001n04 c001n05 ]
Clone Set: DoFencing [child_DoFencing]
Started: [ c001n02 c001n06 c001n07 ]
Stopped: [ c001n03 c001n04 c001n05 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06
rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
diff --git a/pengine/test10/rec-node-14.summary b/pengine/test10/rec-node-14.summary
index d97d943eb7..532f3d9c30 100644
--- a/pengine/test10/rec-node-14.summary
+++ b/pengine/test10/rec-node-14.summary
@@ -1,24 +1,27 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (offline)
Node node2 (uuid2): UNCLEAN (offline)
Node node3 (uuid3): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
Transition Summary:
+ * Fence node3
+ * Fence node2
+ * Fence node1
Executing cluster transition:
* Fencing node1 (reboot)
* Fencing node3 (reboot)
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ node1 node2 node3 ]
stonith-1 (stonith:dummy): Stopped
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
diff --git a/pengine/test10/rec-node-15.summary b/pengine/test10/rec-node-15.summary
index 1c3e8eab37..8d886805c7 100644
--- a/pengine/test10/rec-node-15.summary
+++ b/pengine/test10/rec-node-15.summary
@@ -1,87 +1,88 @@
Current cluster status:
Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby
Node sapcl03 (0bfb78a2-fcd2-4f52-8a06-2d17437a6750): UNCLEAN (offline)
Online: [ sapcl01 ]
stonith-1 (stonith:dummy): Stopped
Resource Group: app01
IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01
LVM_2 (ocf::heartbeat:LVM): Started sapcl01
Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01
Resource Group: app02
IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl02
LVM_12 (ocf::heartbeat:LVM): Started sapcl02
Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl02
Resource Group: oracle
IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Stopped
LVM_22 (ocf::heartbeat:LVM): Stopped
Filesystem_23 (ocf::heartbeat:Filesystem): Stopped
oracle_24 (ocf::heartbeat:oracle): Stopped
oralsnr_25 (ocf::heartbeat:oralsnr): Stopped
Transition Summary:
+ * Fence sapcl03
* Start stonith-1 (sapcl01)
* Move IPaddr_192_168_1_102 (Started sapcl02 -> sapcl01)
* Move LVM_12 (Started sapcl02 -> sapcl01)
* Move Filesystem_13 (Started sapcl02 -> sapcl01)
* Start IPaddr_192_168_1_104 (sapcl01)
* Start LVM_22 (sapcl01)
* Start Filesystem_23 (sapcl01)
* Start oracle_24 (sapcl01)
* Start oralsnr_25 (sapcl01)
Executing cluster transition:
* Resource action: stonith-1 monitor on sapcl02
* Resource action: stonith-1 monitor on sapcl01
* Pseudo action: app02_stop_0
* Resource action: Filesystem_13 stop on sapcl02
* Pseudo action: oracle_start_0
* Fencing sapcl03 (reboot)
* Pseudo action: stonith_complete
* Resource action: LVM_12 stop on sapcl02
* Resource action: IPaddr_192_168_1_104 start on sapcl01
* Resource action: LVM_22 start on sapcl01
* Resource action: Filesystem_23 start on sapcl01
* Resource action: oracle_24 start on sapcl01
* Resource action: oralsnr_25 start on sapcl01
* Resource action: IPaddr_192_168_1_102 stop on sapcl02
* Pseudo action: oracle_running_0
* Resource action: IPaddr_192_168_1_104 monitor=5000 on sapcl01
* Resource action: LVM_22 monitor=120000 on sapcl01
* Resource action: Filesystem_23 monitor=120000 on sapcl01
* Resource action: oracle_24 monitor=120000 on sapcl01
* Resource action: oralsnr_25 monitor=120000 on sapcl01
* Pseudo action: all_stopped
* Resource action: stonith-1 start on sapcl01
* Pseudo action: app02_stopped_0
* Pseudo action: app02_start_0
* Resource action: IPaddr_192_168_1_102 start on sapcl01
* Resource action: LVM_12 start on sapcl01
* Resource action: Filesystem_13 start on sapcl01
* Pseudo action: app02_running_0
* Resource action: IPaddr_192_168_1_102 monitor=5000 on sapcl01
* Resource action: LVM_12 monitor=120000 on sapcl01
* Resource action: Filesystem_13 monitor=120000 on sapcl01
Revised cluster status:
Node sapcl02 (09fa194c-d7e1-41fa-a0d0-afd79a139181): standby
Online: [ sapcl01 ]
OFFLINE: [ sapcl03 ]
stonith-1 (stonith:dummy): Started sapcl01
Resource Group: app01
IPaddr_192_168_1_101 (ocf::heartbeat:IPaddr): Started sapcl01
LVM_2 (ocf::heartbeat:LVM): Started sapcl01
Filesystem_3 (ocf::heartbeat:Filesystem): Started sapcl01
Resource Group: app02
IPaddr_192_168_1_102 (ocf::heartbeat:IPaddr): Started sapcl01
LVM_12 (ocf::heartbeat:LVM): Started sapcl01
Filesystem_13 (ocf::heartbeat:Filesystem): Started sapcl01
Resource Group: oracle
IPaddr_192_168_1_104 (ocf::heartbeat:IPaddr): Started sapcl01
LVM_22 (ocf::heartbeat:LVM): Started sapcl01
Filesystem_23 (ocf::heartbeat:Filesystem): Started sapcl01
oracle_24 (ocf::heartbeat:oracle): Started sapcl01
oralsnr_25 (ocf::heartbeat:oralsnr): Started sapcl01
diff --git a/pengine/test10/rec-node-2.summary b/pengine/test10/rec-node-2.summary
index 6dc5098331..5c8db02943 100644
--- a/pengine/test10/rec-node-2.summary
+++ b/pengine/test10/rec-node-2.summary
@@ -1,61 +1,62 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (offline)
Online: [ node2 ]
stonith-1 (stonith:dummy): Stopped
rsc1 (heartbeat:apache): Stopped
rsc2 (heartbeat:apache): Stopped
Resource Group: group1
rsc3 (heartbeat:apache): Stopped
rsc4 (heartbeat:apache): Stopped
Resource Group: group2
rsc5 (heartbeat:apache): Stopped
rsc6 (heartbeat:apache): Stopped
Transition Summary:
+ * Fence node1
* Start stonith-1 (node2)
* Start rsc1 (node2)
* Start rsc2 (node2)
* Start rsc3 (node2)
* Start rsc4 (node2)
* Start rsc5 (node2)
* Start rsc6 (node2)
Executing cluster transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Pseudo action: group1_start_0
* Resource action: rsc3 monitor on node2
* Resource action: rsc4 monitor on node2
* Pseudo action: group2_start_0
* Resource action: rsc5 monitor on node2
* Resource action: rsc6 monitor on node2
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: stonith-1 start on node2
* Resource action: rsc1 start on node2
* Resource action: rsc2 start on node2
* Resource action: rsc3 start on node2
* Resource action: rsc4 start on node2
* Resource action: rsc5 start on node2
* Resource action: rsc6 start on node2
* Pseudo action: group1_running_0
* Pseudo action: group2_running_0
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
stonith-1 (stonith:dummy): Started node2
rsc1 (heartbeat:apache): Started node2
rsc2 (heartbeat:apache): Started node2
Resource Group: group1
rsc3 (heartbeat:apache): Started node2
rsc4 (heartbeat:apache): Started node2
Resource Group: group2
rsc5 (heartbeat:apache): Started node2
rsc6 (heartbeat:apache): Started node2
diff --git a/pengine/test10/rec-node-4.summary b/pengine/test10/rec-node-4.summary
index a64580c115..761573f0cb 100644
--- a/pengine/test10/rec-node-4.summary
+++ b/pengine/test10/rec-node-4.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (offline)
Online: [ node2 ]
stonith-1 (stonith:dummy): Stopped
rsc1 (heartbeat:apache): Started node1 (UNCLEAN)
rsc2 (heartbeat:apache): Started node1 (UNCLEAN)
Transition Summary:
+ * Fence node1
* Start stonith-1 (node2)
* Move rsc1 (Started node1 -> node2)
* Move rsc2 (Started node1 -> node2)
Executing cluster transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: all_stopped
* Resource action: stonith-1 start on node2
* Resource action: rsc1 start on node2
* Resource action: rsc2 start on node2
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
stonith-1 (stonith:dummy): Started node2
rsc1 (heartbeat:apache): Started node2
rsc2 (heartbeat:apache): Started node2
diff --git a/pengine/test10/rec-node-6.summary b/pengine/test10/rec-node-6.summary
index cf02414f12..fb294fbcff 100644
--- a/pengine/test10/rec-node-6.summary
+++ b/pengine/test10/rec-node-6.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (online)
Online: [ node2 ]
stonith-1 (stonith:dummy): Stopped
rsc1 (heartbeat:apache): Started node1
rsc2 (heartbeat:apache): Started node1
Transition Summary:
+ * Fence node1
* Start stonith-1 (node2)
* Move rsc1 (Started node1 -> node2)
* Move rsc2 (Started node1 -> node2)
Executing cluster transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: all_stopped
* Resource action: stonith-1 start on node2
* Resource action: rsc1 start on node2
* Resource action: rsc2 start on node2
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
stonith-1 (stonith:dummy): Started node2
rsc1 (heartbeat:apache): Started node2
rsc2 (heartbeat:apache): Started node2
diff --git a/pengine/test10/rec-node-7.summary b/pengine/test10/rec-node-7.summary
index a64580c115..761573f0cb 100644
--- a/pengine/test10/rec-node-7.summary
+++ b/pengine/test10/rec-node-7.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node node1 (uuid1): UNCLEAN (offline)
Online: [ node2 ]
stonith-1 (stonith:dummy): Stopped
rsc1 (heartbeat:apache): Started node1 (UNCLEAN)
rsc2 (heartbeat:apache): Started node1 (UNCLEAN)
Transition Summary:
+ * Fence node1
* Start stonith-1 (node2)
* Move rsc1 (Started node1 -> node2)
* Move rsc2 (Started node1 -> node2)
Executing cluster transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: all_stopped
* Resource action: stonith-1 start on node2
* Resource action: rsc1 start on node2
* Resource action: rsc2 start on node2
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
stonith-1 (stonith:dummy): Started node2
rsc1 (heartbeat:apache): Started node2
rsc2 (heartbeat:apache): Started node2
diff --git a/pengine/test10/rec-rsc-5.summary b/pengine/test10/rec-rsc-5.summary
index bfb9a3cfd7..7bcb1a3f41 100644
--- a/pengine/test10/rec-rsc-5.summary
+++ b/pengine/test10/rec-rsc-5.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node node2 (uuid2): UNCLEAN (online)
Online: [ node1 ]
stonith-1 (stonith:dummy): Stopped
rsc1 (heartbeat:apache): FAILED node2
rsc2 (heartbeat:apache): Started node2
Transition Summary:
+ * Fence node2
* Start stonith-1 (node1)
* Recover rsc1 (Started node2 -> node1)
* Move rsc2 (Started node2 -> node1)
Executing cluster transition:
* Resource action: stonith-1 monitor on node1
* Resource action: rsc1 monitor on node1
* Resource action: rsc2 monitor on node1
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: all_stopped
* Resource action: stonith-1 start on node1
* Resource action: rsc1 start on node1
* Resource action: rsc2 start on node1
Revised cluster status:
Online: [ node1 ]
OFFLINE: [ node2 ]
stonith-1 (stonith:dummy): Started node1
rsc1 (heartbeat:apache): Started node1
rsc2 (heartbeat:apache): Started node1
diff --git a/pengine/test10/remote-fence-before-reconnect.summary b/pengine/test10/remote-fence-before-reconnect.summary
index 88ca48cfbf..520f5cfbb9 100644
--- a/pengine/test10/remote-fence-before-reconnect.summary
+++ b/pengine/test10/remote-fence-before-reconnect.summary
@@ -1,38 +1,39 @@
Current cluster status:
RemoteNode c7auto4: UNCLEAN (offline)
Online: [ c7auto1 c7auto2 c7auto3 ]
shooter (stonith:fence_phd_kvm): Started c7auto2
c7auto4 (ocf::pacemaker:remote): FAILED c7auto1
fake1 (ocf::heartbeat:Dummy): Started c7auto3
fake2 (ocf::heartbeat:Dummy): Started c7auto4 (UNCLEAN)
fake3 (ocf::heartbeat:Dummy): Started c7auto1
fake4 (ocf::heartbeat:Dummy): Started c7auto2
fake5 (ocf::heartbeat:Dummy): Started c7auto3
Transition Summary:
+ * Fence c7auto4
* Stop c7auto4 (c7auto1)
* Move fake2 (Started c7auto4 -> c7auto1)
Executing cluster transition:
* Fencing c7auto4 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: fake2_stop_0
* Resource action: c7auto4 stop on c7auto1
* Resource action: fake2 start on c7auto1
* Pseudo action: all_stopped
* Resource action: fake2 monitor=10000 on c7auto1
Revised cluster status:
RemoteNode c7auto4: UNCLEAN (offline)
Online: [ c7auto1 c7auto2 c7auto3 ]
shooter (stonith:fence_phd_kvm): Started c7auto2
c7auto4 (ocf::pacemaker:remote): FAILED
fake1 (ocf::heartbeat:Dummy): Started c7auto3
fake2 (ocf::heartbeat:Dummy): Started c7auto1
fake3 (ocf::heartbeat:Dummy): Started c7auto1
fake4 (ocf::heartbeat:Dummy): Started c7auto2
fake5 (ocf::heartbeat:Dummy): Started c7auto3
diff --git a/pengine/test10/remote-fence-unclean.summary b/pengine/test10/remote-fence-unclean.summary
index cd246e4c0a..06940fc1d7 100644
--- a/pengine/test10/remote-fence-unclean.summary
+++ b/pengine/test10/remote-fence-unclean.summary
@@ -1,46 +1,47 @@
Current cluster status:
RemoteNode remote1: UNCLEAN (offline)
Online: [ 18builder 18node1 18node2 ]
shooter (stonith:fence_xvm): Started 18builder
remote1 (ocf::pacemaker:remote): FAILED 18node1
FAKE1 (ocf::heartbeat:Dummy): Started 18node2
FAKE2 (ocf::heartbeat:Dummy): Started remote1 (UNCLEAN)
FAKE3 (ocf::heartbeat:Dummy): Started 18builder
FAKE4 (ocf::heartbeat:Dummy): Started 18node1
Transition Summary:
+ * Fence remote1
* Recover remote1 (Started 18node1)
* Move FAKE2 (Started remote1 -> 18builder)
* Move FAKE3 (Started 18builder -> 18node1)
* Move FAKE4 (Started 18node1 -> 18node2)
Executing cluster transition:
* Resource action: FAKE3 stop on 18builder
* Resource action: FAKE4 stop on 18node1
* Fencing remote1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: FAKE2_stop_0
* Resource action: FAKE3 start on 18node1
* Resource action: FAKE4 start on 18node2
* Resource action: remote1 stop on 18node1
* Resource action: FAKE2 start on 18builder
* Resource action: FAKE3 monitor=60000 on 18node1
* Resource action: FAKE4 monitor=60000 on 18node2
* Pseudo action: all_stopped
* Resource action: remote1 start on 18node1
* Resource action: remote1 monitor=60000 on 18node1
* Resource action: FAKE2 monitor=60000 on 18builder
Revised cluster status:
Online: [ 18builder 18node1 18node2 ]
RemoteOnline: [ remote1 ]
shooter (stonith:fence_xvm): Started 18builder
remote1 (ocf::pacemaker:remote): Started 18node1
FAKE1 (ocf::heartbeat:Dummy): Started 18node2
FAKE2 (ocf::heartbeat:Dummy): Started 18builder
FAKE3 (ocf::heartbeat:Dummy): Started 18node1
FAKE4 (ocf::heartbeat:Dummy): Started 18node2
diff --git a/pengine/test10/remote-fence-unclean2.summary b/pengine/test10/remote-fence-unclean2.summary
index 78ff784076..96f5d69076 100644
--- a/pengine/test10/remote-fence-unclean2.summary
+++ b/pengine/test10/remote-fence-unclean2.summary
@@ -1,30 +1,31 @@
Current cluster status:
Node rhel7-alt1 (1): standby
Node rhel7-alt2 (2): standby
RemoteNode rhel7-alt4: UNCLEAN (offline)
OFFLINE: [ rhel7-alt3 ]
shooter (stonith:fence_xvm): Stopped
rhel7-alt4 (ocf::pacemaker:remote): Stopped
fake (ocf::heartbeat:Dummy): Started rhel7-alt4 (UNCLEAN)
Transition Summary:
+ * Fence rhel7-alt4
* Stop fake (rhel7-alt4)
Executing cluster transition:
* Fencing rhel7-alt4 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: fake_stop_0
* Pseudo action: all_stopped
Revised cluster status:
Node rhel7-alt1 (1): standby
Node rhel7-alt2 (2): standby
OFFLINE: [ rhel7-alt3 ]
RemoteOFFLINE: [ rhel7-alt4 ]
shooter (stonith:fence_xvm): Stopped
rhel7-alt4 (ocf::pacemaker:remote): Stopped
fake (ocf::heartbeat:Dummy): Stopped
diff --git a/pengine/test10/remote-partial-migrate2.summary b/pengine/test10/remote-partial-migrate2.summary
index 6a65975c50..197bd8c7fe 100644
--- a/pengine/test10/remote-partial-migrate2.summary
+++ b/pengine/test10/remote-partial-migrate2.summary
@@ -1,207 +1,208 @@
Current cluster status:
Node pcmk4 (4): UNCLEAN (offline)
Online: [ pcmk1 pcmk2 pcmk3 ]
RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote5 ]
RemoteOFFLINE: [ pcmk_remote4 ]
shooter (stonith:fence_docker_cts): Started pcmk3
pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1
pcmk_remote2 (ocf::pacemaker:remote): Started [ pcmk1 pcmk3 ]
pcmk_remote3 (ocf::pacemaker:remote): Started pcmk3
pcmk_remote4 (ocf::pacemaker:remote): Stopped
pcmk_remote5 (ocf::pacemaker:remote): Started pcmk1
FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE5 (ocf::heartbeat:Dummy): Started pcmk1
FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE9 (ocf::heartbeat:Dummy): Started pcmk2
FAKE10 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE11 (ocf::heartbeat:Dummy): Started pcmk1
FAKE12 (ocf::heartbeat:Dummy): Started pcmk1
FAKE13 (ocf::heartbeat:Dummy): Started pcmk3
FAKE14 (ocf::heartbeat:Dummy): Started pcmk2
FAKE15 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE17 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE18 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE19 (ocf::heartbeat:Dummy): Started pcmk3
FAKE20 (ocf::heartbeat:Dummy): Started pcmk2
FAKE21 (ocf::heartbeat:Dummy): Started pcmk1
FAKE22 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE24 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE26 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE27 (ocf::heartbeat:Dummy): Started pcmk3
FAKE28 (ocf::heartbeat:Dummy): Started pcmk3
FAKE29 (ocf::heartbeat:Dummy): Started pcmk2
FAKE30 (ocf::heartbeat:Dummy): Started pcmk1
FAKE31 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE35 (ocf::heartbeat:Dummy): Started pcmk1
FAKE36 (ocf::heartbeat:Dummy): Started pcmk3
FAKE37 (ocf::heartbeat:Dummy): Started pcmk2
FAKE38 (ocf::heartbeat:Dummy): Started pcmk2
FAKE39 (ocf::heartbeat:Dummy): Started pcmk1
FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE43 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE44 (ocf::heartbeat:Dummy): Started pcmk2
FAKE45 (ocf::heartbeat:Dummy): Started pcmk3
FAKE46 (ocf::heartbeat:Dummy): Started pcmk1
FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE48 (ocf::heartbeat:Dummy): Started pcmk1
FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE50 (ocf::heartbeat:Dummy): Started pcmk_remote5
Transition Summary:
+ * Fence pcmk4
* Migrate pcmk_remote2 (Started pcmk3 -> pcmk1)
* Start pcmk_remote4 (pcmk2)
* Migrate pcmk_remote5 (Started pcmk1 -> pcmk2)
* Move FAKE5 (Started pcmk1 -> pcmk_remote4)
* Move FAKE9 (Started pcmk2 -> pcmk_remote4)
* Move FAKE12 (Started pcmk1 -> pcmk2)
* Move FAKE14 (Started pcmk2 -> pcmk_remote1)
* Move FAKE17 (Started pcmk_remote1 -> pcmk_remote4)
* Move FAKE25 (Started pcmk_remote1 -> pcmk_remote4)
* Move FAKE28 (Started pcmk3 -> pcmk1)
* Move FAKE30 (Started pcmk1 -> pcmk_remote1)
* Move FAKE33 (Started pcmk_remote1 -> pcmk_remote4)
* Move FAKE38 (Started pcmk2 -> pcmk_remote1)
* Move FAKE39 (Started pcmk1 -> pcmk_remote2)
* Move FAKE41 (Started pcmk_remote2 -> pcmk_remote4)
* Move FAKE47 (Started pcmk_remote1 -> pcmk_remote2)
* Move FAKE48 (Started pcmk1 -> pcmk_remote3)
* Move FAKE49 (Started pcmk_remote3 -> pcmk_remote4)
Executing cluster transition:
* Resource action: FAKE5 stop on pcmk1
* Resource action: FAKE9 stop on pcmk2
* Resource action: FAKE12 stop on pcmk1
* Resource action: FAKE14 stop on pcmk2
* Resource action: FAKE17 stop on pcmk_remote1
* Resource action: FAKE25 stop on pcmk_remote1
* Resource action: FAKE28 stop on pcmk3
* Resource action: FAKE30 stop on pcmk1
* Resource action: FAKE33 stop on pcmk_remote1
* Resource action: FAKE38 stop on pcmk2
* Resource action: FAKE39 stop on pcmk1
* Resource action: FAKE47 stop on pcmk_remote1
* Resource action: FAKE48 stop on pcmk1
* Resource action: FAKE49 stop on pcmk_remote3
* Fencing pcmk4 (reboot)
* Pseudo action: stonith_complete
* Resource action: pcmk_remote2 migrate_from on pcmk1
* Resource action: pcmk_remote2 stop on pcmk3
* Resource action: pcmk_remote4 start on pcmk2
* Resource action: pcmk_remote5 migrate_to on pcmk1
* Resource action: FAKE5 start on pcmk_remote4
* Resource action: FAKE9 start on pcmk_remote4
* Resource action: FAKE12 start on pcmk2
* Resource action: FAKE14 start on pcmk_remote1
* Resource action: FAKE17 start on pcmk_remote4
* Resource action: FAKE25 start on pcmk_remote4
* Resource action: FAKE28 start on pcmk1
* Resource action: FAKE30 start on pcmk_remote1
* Resource action: FAKE33 start on pcmk_remote4
* Resource action: FAKE38 start on pcmk_remote1
* Resource action: FAKE48 start on pcmk_remote3
* Resource action: FAKE49 start on pcmk_remote4
* Pseudo action: pcmk_remote2_start_0
* Resource action: pcmk_remote4 monitor=60000 on pcmk2
* Resource action: pcmk_remote5 migrate_from on pcmk2
* Resource action: pcmk_remote5 stop on pcmk1
* Resource action: FAKE5 monitor=10000 on pcmk_remote4
* Resource action: FAKE9 monitor=10000 on pcmk_remote4
* Resource action: FAKE12 monitor=10000 on pcmk2
* Resource action: FAKE14 monitor=10000 on pcmk_remote1
* Resource action: FAKE17 monitor=10000 on pcmk_remote4
* Resource action: FAKE25 monitor=10000 on pcmk_remote4
* Resource action: FAKE28 monitor=10000 on pcmk1
* Resource action: FAKE30 monitor=10000 on pcmk_remote1
* Resource action: FAKE33 monitor=10000 on pcmk_remote4
* Resource action: FAKE38 monitor=10000 on pcmk_remote1
* Resource action: FAKE39 start on pcmk_remote2
* Resource action: FAKE41 stop on pcmk_remote2
* Resource action: FAKE47 start on pcmk_remote2
* Resource action: FAKE48 monitor=10000 on pcmk_remote3
* Resource action: FAKE49 monitor=10000 on pcmk_remote4
* Pseudo action: all_stopped
* Resource action: pcmk_remote2 monitor=60000 on pcmk1
* Pseudo action: pcmk_remote5_start_0
* Resource action: FAKE39 monitor=10000 on pcmk_remote2
* Resource action: FAKE41 start on pcmk_remote4
* Resource action: FAKE47 monitor=10000 on pcmk_remote2
* Resource action: pcmk_remote5 monitor=60000 on pcmk2
* Resource action: FAKE41 monitor=10000 on pcmk_remote4
Revised cluster status:
Online: [ pcmk1 pcmk2 pcmk3 ]
OFFLINE: [ pcmk4 ]
RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 pcmk_remote5 ]
shooter (stonith:fence_docker_cts): Started pcmk3
pcmk_remote1 (ocf::pacemaker:remote): Started pcmk1
pcmk_remote2 (ocf::pacemaker:remote): Started pcmk1
pcmk_remote3 (ocf::pacemaker:remote): Started pcmk3
pcmk_remote4 (ocf::pacemaker:remote): Started pcmk2
pcmk_remote5 (ocf::pacemaker:remote): Started pcmk2
FAKE1 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE2 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE3 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE4 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE5 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE6 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE7 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE8 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE9 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE10 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE11 (ocf::heartbeat:Dummy): Started pcmk1
FAKE12 (ocf::heartbeat:Dummy): Started pcmk2
FAKE13 (ocf::heartbeat:Dummy): Started pcmk3
FAKE14 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE15 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE16 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE17 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE18 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE19 (ocf::heartbeat:Dummy): Started pcmk3
FAKE20 (ocf::heartbeat:Dummy): Started pcmk2
FAKE21 (ocf::heartbeat:Dummy): Started pcmk1
FAKE22 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE23 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE24 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE25 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE26 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE27 (ocf::heartbeat:Dummy): Started pcmk3
FAKE28 (ocf::heartbeat:Dummy): Started pcmk1
FAKE29 (ocf::heartbeat:Dummy): Started pcmk2
FAKE30 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE31 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE32 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE33 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE34 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE35 (ocf::heartbeat:Dummy): Started pcmk1
FAKE36 (ocf::heartbeat:Dummy): Started pcmk3
FAKE37 (ocf::heartbeat:Dummy): Started pcmk2
FAKE38 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE39 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE40 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE41 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE42 (ocf::heartbeat:Dummy): Started pcmk_remote5
FAKE43 (ocf::heartbeat:Dummy): Started pcmk_remote1
FAKE44 (ocf::heartbeat:Dummy): Started pcmk2
FAKE45 (ocf::heartbeat:Dummy): Started pcmk3
FAKE46 (ocf::heartbeat:Dummy): Started pcmk1
FAKE47 (ocf::heartbeat:Dummy): Started pcmk_remote2
FAKE48 (ocf::heartbeat:Dummy): Started pcmk_remote3
FAKE49 (ocf::heartbeat:Dummy): Started pcmk_remote4
FAKE50 (ocf::heartbeat:Dummy): Started pcmk_remote5
diff --git a/pengine/test10/remote-recover-all.summary b/pengine/test10/remote-recover-all.summary
index 881f4490a8..3f29f70dc6 100644
--- a/pengine/test10/remote-recover-all.summary
+++ b/pengine/test10/remote-recover-all.summary
@@ -1,151 +1,154 @@
Using the original execution date of: 2017-05-03 13:33:24Z
Current cluster status:
Node controller-1 (2): UNCLEAN (offline)
Online: [ controller-0 controller-2 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 galera-2 ]
Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN)
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
Clone Set: haproxy-clone [haproxy]
haproxy (systemd:haproxy): Started controller-1 (UNCLEAN)
Started: [ controller-0 controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN)
Transition Summary:
+ * Fence messaging-1
+ * Fence galera-2
+ * Fence controller-1
* Stop messaging-1 (controller-1)
* Move galera-0 (Started controller-1 -> controller-2)
* Stop galera-2 (controller-1)
* Stop rabbitmq:2 (messaging-1)
* Demote galera:1 (Master -> Stopped galera-2)
* Stop redis:0 (controller-1)
* Move ip-172.17.1.14 (Started controller-1 -> controller-2)
* Move ip-172.17.1.17 (Started controller-1 -> controller-2)
* Move ip-172.17.4.11 (Started controller-1 -> controller-2)
* Stop haproxy:0 (controller-1)
* Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0)
* Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0)
* Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2)
Executing cluster transition:
* Pseudo action: galera-master_demote_0
* Pseudo action: redis-master_pre_notify_stop_0
* Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
* Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0
* Fencing controller-1 (reboot)
* Pseudo action: galera-0_stop_0
* Pseudo action: redis_post_notify_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-pre_notify_stop_0
* Pseudo action: redis-master_stop_0
* Pseudo action: haproxy-clone_stop_0
* Fencing galera-2 (reboot)
* Pseudo action: galera_demote_0
* Pseudo action: galera-master_demoted_0
* Pseudo action: galera-master_stop_0
* Pseudo action: redis_stop_0
* Pseudo action: redis-master_stopped_0
* Pseudo action: haproxy_stop_0
* Pseudo action: haproxy-clone_stopped_0
* Fencing messaging-1 (reboot)
* Pseudo action: stonith_complete
* Resource action: galera-0 start on controller-2
* Pseudo action: rabbitmq_post_notify_stop_0
* Pseudo action: rabbitmq-clone_stop_0
* Pseudo action: galera_stop_0
* Resource action: galera monitor=10000 on galera-0
* Pseudo action: galera-master_stopped_0
* Pseudo action: redis-master_post_notify_stopped_0
* Pseudo action: ip-172.17.1.14_stop_0
* Pseudo action: ip-172.17.1.17_stop_0
* Pseudo action: ip-172.17.4.11_stop_0
* Resource action: galera-0 monitor=20000 on controller-2
* Pseudo action: galera-2_stop_0
* Resource action: rabbitmq notify on messaging-2
* Resource action: rabbitmq notify on messaging-0
* Pseudo action: rabbitmq_notified_0
* Pseudo action: rabbitmq_stop_0
* Pseudo action: rabbitmq-clone_stopped_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-post_notify_stopped_0
* Resource action: ip-172.17.1.14 start on controller-2
* Resource action: ip-172.17.1.17 start on controller-2
* Resource action: ip-172.17.4.11 start on controller-2
* Pseudo action: messaging-1_stop_0
* Pseudo action: redis_notified_0
* Resource action: ip-172.17.1.14 monitor=10000 on controller-2
* Resource action: ip-172.17.1.17 monitor=10000 on controller-2
* Resource action: ip-172.17.4.11 monitor=10000 on controller-2
* Pseudo action: all_stopped
* Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
* Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
Using the original execution date of: 2017-05-03 13:33:24Z
Revised cluster status:
Online: [ controller-0 controller-2 ]
OFFLINE: [ controller-1 ]
RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ]
RemoteOFFLINE: [ galera-2 messaging-1 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Stopped
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-2
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Stopped
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 ]
Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2
Clone Set: haproxy-clone [haproxy]
Started: [ controller-0 controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2
diff --git a/pengine/test10/remote-recover-connection.summary b/pengine/test10/remote-recover-connection.summary
index 57b5e01da6..43507af6f5 100644
--- a/pengine/test10/remote-recover-connection.summary
+++ b/pengine/test10/remote-recover-connection.summary
@@ -1,139 +1,140 @@
Using the original execution date of: 2017-05-03 13:33:24Z
Current cluster status:
Node controller-1 (2): UNCLEAN (offline)
Online: [ controller-0 controller-2 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 galera-2 ]
Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN)
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
Clone Set: haproxy-clone [haproxy]
haproxy (systemd:haproxy): Started controller-1 (UNCLEAN)
Started: [ controller-0 controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN)
Transition Summary:
+ * Fence controller-1
* Move messaging-1 (Started controller-1 -> controller-2)
* Move galera-0 (Started controller-1 -> controller-2)
* Move galera-2 (Started controller-1 -> controller-2)
* Stop redis:0 (controller-1)
* Move ip-172.17.1.14 (Started controller-1 -> controller-2)
* Move ip-172.17.1.17 (Started controller-1 -> controller-2)
* Move ip-172.17.4.11 (Started controller-1 -> controller-2)
* Stop haproxy:0 (controller-1)
* Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0)
* Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0)
* Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2)
Executing cluster transition:
* Pseudo action: redis-master_pre_notify_stop_0
* Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
* Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0
* Fencing controller-1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: messaging-1_stop_0
* Pseudo action: galera-0_stop_0
* Pseudo action: galera-2_stop_0
* Pseudo action: redis_post_notify_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-pre_notify_stop_0
* Pseudo action: redis-master_stop_0
* Pseudo action: haproxy-clone_stop_0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
* Resource action: messaging-1 start on controller-2
* Resource action: galera-0 start on controller-2
* Resource action: galera-2 start on controller-2
* Resource action: rabbitmq monitor=10000 on messaging-1
* Resource action: galera monitor=10000 on galera-2
* Resource action: galera monitor=10000 on galera-0
* Pseudo action: redis_stop_0
* Pseudo action: redis-master_stopped_0
* Pseudo action: haproxy_stop_0
* Pseudo action: haproxy-clone_stopped_0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
* Resource action: messaging-1 monitor=20000 on controller-2
* Resource action: galera-0 monitor=20000 on controller-2
* Resource action: galera-2 monitor=20000 on controller-2
* Pseudo action: redis-master_post_notify_stopped_0
* Pseudo action: ip-172.17.1.14_stop_0
* Pseudo action: ip-172.17.1.17_stop_0
* Pseudo action: ip-172.17.4.11_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-post_notify_stopped_0
* Resource action: ip-172.17.1.14 start on controller-2
* Resource action: ip-172.17.1.17 start on controller-2
* Resource action: ip-172.17.4.11 start on controller-2
* Pseudo action: redis_notified_0
* Resource action: ip-172.17.1.14 monitor=10000 on controller-2
* Resource action: ip-172.17.1.17 monitor=10000 on controller-2
* Resource action: ip-172.17.4.11 monitor=10000 on controller-2
* Pseudo action: all_stopped
Using the original execution date of: 2017-05-03 13:33:24Z
Revised cluster status:
Online: [ controller-0 controller-2 ]
OFFLINE: [ controller-1 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-2
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-2
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-2
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 galera-2 ]
Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2
Clone Set: haproxy-clone [haproxy]
Started: [ controller-0 controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2
diff --git a/pengine/test10/remote-recover-fail.summary b/pengine/test10/remote-recover-fail.summary
index 5953e34592..ec2d70117b 100644
--- a/pengine/test10/remote-recover-fail.summary
+++ b/pengine/test10/remote-recover-fail.summary
@@ -1,53 +1,54 @@
Current cluster status:
RemoteNode rhel7-auto4: UNCLEAN (offline)
Online: [ rhel7-auto2 rhel7-auto3 ]
OFFLINE: [ rhel7-auto1 ]
shooter (stonith:fence_xvm): Started rhel7-auto3
rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto2
FAKE1 (ocf::heartbeat:Dummy): Stopped
FAKE2 (ocf::heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN)
FAKE3 (ocf::heartbeat:Dummy): Started rhel7-auto2
FAKE4 (ocf::heartbeat:Dummy): Started rhel7-auto3
FAKE5 (ocf::heartbeat:Dummy): Started rhel7-auto3
FAKE6 (ocf::heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN)
Transition Summary:
+ * Fence rhel7-auto4
* Recover rhel7-auto4 (Started rhel7-auto2)
* Start FAKE1 (rhel7-auto2)
* Move FAKE2 (Started rhel7-auto4 -> rhel7-auto3)
* Move FAKE6 (Started rhel7-auto4 -> rhel7-auto2)
Executing cluster transition:
* Resource action: FAKE3 monitor=10000 on rhel7-auto2
* Resource action: FAKE4 monitor=10000 on rhel7-auto3
* Fencing rhel7-auto4 (reboot)
* Pseudo action: stonith_complete
* Resource action: FAKE1 start on rhel7-auto2
* Pseudo action: FAKE2_stop_0
* Pseudo action: FAKE6_stop_0
* Resource action: rhel7-auto4 stop on rhel7-auto2
* Resource action: FAKE1 monitor=10000 on rhel7-auto2
* Resource action: FAKE2 start on rhel7-auto3
* Resource action: FAKE6 start on rhel7-auto2
* Pseudo action: all_stopped
* Resource action: rhel7-auto4 start on rhel7-auto2
* Resource action: FAKE2 monitor=10000 on rhel7-auto3
* Resource action: FAKE6 monitor=10000 on rhel7-auto2
* Resource action: rhel7-auto4 monitor=60000 on rhel7-auto2
Revised cluster status:
Online: [ rhel7-auto2 rhel7-auto3 ]
OFFLINE: [ rhel7-auto1 ]
RemoteOnline: [ rhel7-auto4 ]
shooter (stonith:fence_xvm): Started rhel7-auto3
rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto2
FAKE1 (ocf::heartbeat:Dummy): Started rhel7-auto2
FAKE2 (ocf::heartbeat:Dummy): Started rhel7-auto3
FAKE3 (ocf::heartbeat:Dummy): Started rhel7-auto2
FAKE4 (ocf::heartbeat:Dummy): Started rhel7-auto3
FAKE5 (ocf::heartbeat:Dummy): Started rhel7-auto3
FAKE6 (ocf::heartbeat:Dummy): Started rhel7-auto2
diff --git a/pengine/test10/remote-recover-no-resources.summary b/pengine/test10/remote-recover-no-resources.summary
index 8bfeb437be..48355a4c79 100644
--- a/pengine/test10/remote-recover-no-resources.summary
+++ b/pengine/test10/remote-recover-no-resources.summary
@@ -1,143 +1,145 @@
Using the original execution date of: 2017-05-03 13:33:24Z
Current cluster status:
Node controller-1 (2): UNCLEAN (offline)
Online: [ controller-0 controller-2 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 ]
Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN)
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
Clone Set: haproxy-clone [haproxy]
haproxy (systemd:haproxy): Started controller-1 (UNCLEAN)
Started: [ controller-0 controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN)
Transition Summary:
+ * Fence messaging-1
+ * Fence controller-1
* Stop messaging-1 (controller-1)
* Move galera-0 (Started controller-1 -> controller-2)
* Stop galera-2 (controller-1)
* Stop rabbitmq:2 (messaging-1)
* Stop redis:0 (controller-1)
* Move ip-172.17.1.14 (Started controller-1 -> controller-2)
* Move ip-172.17.1.17 (Started controller-1 -> controller-2)
* Move ip-172.17.4.11 (Started controller-1 -> controller-2)
* Stop haproxy:0 (controller-1)
* Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0)
* Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0)
* Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2)
Executing cluster transition:
* Pseudo action: redis-master_pre_notify_stop_0
* Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
* Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0
* Fencing controller-1 (reboot)
* Pseudo action: galera-0_stop_0
* Pseudo action: galera-2_stop_0
* Pseudo action: redis_post_notify_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-pre_notify_stop_0
* Pseudo action: redis-master_stop_0
* Pseudo action: haproxy-clone_stop_0
* Fencing messaging-1 (reboot)
* Pseudo action: stonith_complete
* Resource action: galera-0 start on controller-2
* Pseudo action: rabbitmq_post_notify_stop_0
* Pseudo action: rabbitmq-clone_stop_0
* Resource action: galera monitor=10000 on galera-0
* Pseudo action: redis_stop_0
* Pseudo action: redis-master_stopped_0
* Pseudo action: haproxy_stop_0
* Pseudo action: haproxy-clone_stopped_0
* Resource action: galera-0 monitor=20000 on controller-2
* Resource action: rabbitmq notify on messaging-2
* Resource action: rabbitmq notify on messaging-0
* Pseudo action: rabbitmq_notified_0
* Pseudo action: rabbitmq_stop_0
* Pseudo action: rabbitmq-clone_stopped_0
* Pseudo action: redis-master_post_notify_stopped_0
* Pseudo action: ip-172.17.1.14_stop_0
* Pseudo action: ip-172.17.1.17_stop_0
* Pseudo action: ip-172.17.4.11_stop_0
* Pseudo action: messaging-1_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-post_notify_stopped_0
* Resource action: ip-172.17.1.14 start on controller-2
* Resource action: ip-172.17.1.17 start on controller-2
* Resource action: ip-172.17.4.11 start on controller-2
* Pseudo action: redis_notified_0
* Resource action: ip-172.17.1.14 monitor=10000 on controller-2
* Resource action: ip-172.17.1.17 monitor=10000 on controller-2
* Resource action: ip-172.17.4.11 monitor=10000 on controller-2
* Pseudo action: all_stopped
* Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
* Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
Using the original execution date of: 2017-05-03 13:33:24Z
Revised cluster status:
Online: [ controller-0 controller-2 ]
OFFLINE: [ controller-1 ]
RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ]
RemoteOFFLINE: [ galera-2 messaging-1 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Stopped
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-2
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Stopped
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 ]
Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2
Clone Set: haproxy-clone [haproxy]
Started: [ controller-0 controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2
diff --git a/pengine/test10/remote-recover-unknown.summary b/pengine/test10/remote-recover-unknown.summary
index 7562f129ff..212af4ed16 100644
--- a/pengine/test10/remote-recover-unknown.summary
+++ b/pengine/test10/remote-recover-unknown.summary
@@ -1,144 +1,147 @@
Using the original execution date of: 2017-05-03 13:33:24Z
Current cluster status:
Node controller-1 (2): UNCLEAN (offline)
Online: [ controller-0 controller-2 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 ]
Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN)
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
Clone Set: haproxy-clone [haproxy]
haproxy (systemd:haproxy): Started controller-1 (UNCLEAN)
Started: [ controller-0 controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN)
Transition Summary:
+ * Fence messaging-1
+ * Fence galera-2
+ * Fence controller-1
* Stop messaging-1 (controller-1)
* Move galera-0 (Started controller-1 -> controller-2)
* Stop galera-2 (controller-1)
* Stop rabbitmq:2 (messaging-1)
* Stop redis:0 (controller-1)
* Move ip-172.17.1.14 (Started controller-1 -> controller-2)
* Move ip-172.17.1.17 (Started controller-1 -> controller-2)
* Move ip-172.17.4.11 (Started controller-1 -> controller-2)
* Stop haproxy:0 (controller-1)
* Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0)
* Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0)
* Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2)
Executing cluster transition:
* Pseudo action: redis-master_pre_notify_stop_0
* Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
* Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0
* Fencing controller-1 (reboot)
* Pseudo action: galera-0_stop_0
* Pseudo action: galera-2_stop_0
* Pseudo action: redis_post_notify_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-pre_notify_stop_0
* Pseudo action: redis-master_stop_0
* Pseudo action: haproxy-clone_stop_0
* Fencing galera-2 (reboot)
* Pseudo action: redis_stop_0
* Pseudo action: redis-master_stopped_0
* Pseudo action: haproxy_stop_0
* Pseudo action: haproxy-clone_stopped_0
* Fencing messaging-1 (reboot)
* Pseudo action: stonith_complete
* Resource action: galera-0 start on controller-2
* Pseudo action: rabbitmq_post_notify_stop_0
* Pseudo action: rabbitmq-clone_stop_0
* Resource action: galera monitor=10000 on galera-0
* Pseudo action: redis-master_post_notify_stopped_0
* Pseudo action: ip-172.17.1.14_stop_0
* Pseudo action: ip-172.17.1.17_stop_0
* Pseudo action: ip-172.17.4.11_stop_0
* Resource action: galera-0 monitor=20000 on controller-2
* Resource action: rabbitmq notify on messaging-2
* Resource action: rabbitmq notify on messaging-0
* Pseudo action: rabbitmq_notified_0
* Pseudo action: rabbitmq_stop_0
* Pseudo action: rabbitmq-clone_stopped_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-post_notify_stopped_0
* Resource action: ip-172.17.1.14 start on controller-2
* Resource action: ip-172.17.1.17 start on controller-2
* Resource action: ip-172.17.4.11 start on controller-2
* Pseudo action: messaging-1_stop_0
* Pseudo action: redis_notified_0
* Resource action: ip-172.17.1.14 monitor=10000 on controller-2
* Resource action: ip-172.17.1.17 monitor=10000 on controller-2
* Resource action: ip-172.17.4.11 monitor=10000 on controller-2
* Pseudo action: all_stopped
* Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
* Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
Using the original execution date of: 2017-05-03 13:33:24Z
Revised cluster status:
Online: [ controller-0 controller-2 ]
OFFLINE: [ controller-1 ]
RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ]
RemoteOFFLINE: [ galera-2 messaging-1 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Stopped
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-2
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Stopped
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 ]
Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2
Clone Set: haproxy-clone [haproxy]
Started: [ controller-0 controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2
diff --git a/pengine/test10/remote-recovery.summary b/pengine/test10/remote-recovery.summary
index 57b5e01da6..43507af6f5 100644
--- a/pengine/test10/remote-recovery.summary
+++ b/pengine/test10/remote-recovery.summary
@@ -1,139 +1,140 @@
Using the original execution date of: 2017-05-03 13:33:24Z
Current cluster status:
Node controller-1 (2): UNCLEAN (offline)
Online: [ controller-0 controller-2 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-1 (UNCLEAN)
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 galera-2 ]
Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
redis (ocf::heartbeat:redis): Slave controller-1 (UNCLEAN)
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-1 (UNCLEAN)
Clone Set: haproxy-clone [haproxy]
haproxy (systemd:haproxy): Started controller-1 (UNCLEAN)
Started: [ controller-0 controller-2 ]
Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN)
Transition Summary:
+ * Fence controller-1
* Move messaging-1 (Started controller-1 -> controller-2)
* Move galera-0 (Started controller-1 -> controller-2)
* Move galera-2 (Started controller-1 -> controller-2)
* Stop redis:0 (controller-1)
* Move ip-172.17.1.14 (Started controller-1 -> controller-2)
* Move ip-172.17.1.17 (Started controller-1 -> controller-2)
* Move ip-172.17.4.11 (Started controller-1 -> controller-2)
* Stop haproxy:0 (controller-1)
* Restart stonith-fence_ipmilan-525400bbf613 (Started controller-0)
* Restart stonith-fence_ipmilan-525400b4f6bd (Started controller-0)
* Move stonith-fence_ipmilan-5254005bdbb5 (Started controller-1 -> controller-2)
Executing cluster transition:
* Pseudo action: redis-master_pre_notify_stop_0
* Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
* Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
* Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
* Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0
* Fencing controller-1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: messaging-1_stop_0
* Pseudo action: galera-0_stop_0
* Pseudo action: galera-2_stop_0
* Pseudo action: redis_post_notify_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-pre_notify_stop_0
* Pseudo action: redis-master_stop_0
* Pseudo action: haproxy-clone_stop_0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
* Resource action: messaging-1 start on controller-2
* Resource action: galera-0 start on controller-2
* Resource action: galera-2 start on controller-2
* Resource action: rabbitmq monitor=10000 on messaging-1
* Resource action: galera monitor=10000 on galera-2
* Resource action: galera monitor=10000 on galera-0
* Pseudo action: redis_stop_0
* Pseudo action: redis-master_stopped_0
* Pseudo action: haproxy_stop_0
* Pseudo action: haproxy-clone_stopped_0
* Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
* Resource action: messaging-1 monitor=20000 on controller-2
* Resource action: galera-0 monitor=20000 on controller-2
* Resource action: galera-2 monitor=20000 on controller-2
* Pseudo action: redis-master_post_notify_stopped_0
* Pseudo action: ip-172.17.1.14_stop_0
* Pseudo action: ip-172.17.1.17_stop_0
* Pseudo action: ip-172.17.4.11_stop_0
* Resource action: redis notify on controller-0
* Resource action: redis notify on controller-2
* Pseudo action: redis-master_confirmed-post_notify_stopped_0
* Resource action: ip-172.17.1.14 start on controller-2
* Resource action: ip-172.17.1.17 start on controller-2
* Resource action: ip-172.17.4.11 start on controller-2
* Pseudo action: redis_notified_0
* Resource action: ip-172.17.1.14 monitor=10000 on controller-2
* Resource action: ip-172.17.1.17 monitor=10000 on controller-2
* Resource action: ip-172.17.4.11 monitor=10000 on controller-2
* Pseudo action: all_stopped
Using the original execution date of: 2017-05-03 13:33:24Z
Revised cluster status:
Online: [ controller-0 controller-2 ]
OFFLINE: [ controller-1 ]
RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
messaging-0 (ocf::pacemaker:remote): Started controller-0
messaging-1 (ocf::pacemaker:remote): Started controller-2
messaging-2 (ocf::pacemaker:remote): Started controller-0
galera-0 (ocf::pacemaker:remote): Started controller-2
galera-1 (ocf::pacemaker:remote): Started controller-0
galera-2 (ocf::pacemaker:remote): Started controller-2
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ messaging-0 messaging-1 messaging-2 ]
Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ galera-0 galera-1 galera-2 ]
Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ controller-0 ]
Slaves: [ controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
ip-192.168.24.6 (ocf::heartbeat:IPaddr2): Started controller-0
ip-10.0.0.102 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2
ip-172.17.3.15 (ocf::heartbeat:IPaddr2): Started controller-0
ip-172.17.4.11 (ocf::heartbeat:IPaddr2): Started controller-2
Clone Set: haproxy-clone [haproxy]
Started: [ controller-0 controller-2 ]
Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0
stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2
diff --git a/pengine/test10/remote-unclean2.summary b/pengine/test10/remote-unclean2.summary
index 0a73cbd4d0..ad4af90c63 100644
--- a/pengine/test10/remote-unclean2.summary
+++ b/pengine/test10/remote-unclean2.summary
@@ -1,26 +1,27 @@
Current cluster status:
RemoteNode rhel7-auto4: UNCLEAN (offline)
Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
shooter (stonith:fence_xvm): Started rhel7-auto2
rhel7-auto4 (ocf::pacemaker:remote): FAILED rhel7-auto1
Transition Summary:
+ * Fence rhel7-auto4
* Recover rhel7-auto4 (Started rhel7-auto1)
Executing cluster transition:
* Resource action: rhel7-auto4 stop on rhel7-auto1
* Fencing rhel7-auto4 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: rhel7-auto4 start on rhel7-auto1
* Resource action: rhel7-auto4 monitor=60000 on rhel7-auto1
Revised cluster status:
Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
RemoteOnline: [ rhel7-auto4 ]
shooter (stonith:fence_xvm): Started rhel7-auto2
rhel7-auto4 (ocf::pacemaker:remote): Started rhel7-auto1
diff --git a/pengine/test10/simple7.summary b/pengine/test10/simple7.summary
index 2c2818fffb..021c15ffba 100644
--- a/pengine/test10/simple7.summary
+++ b/pengine/test10/simple7.summary
@@ -1,19 +1,20 @@
Current cluster status:
Online: [ node1 ]
rsc1 (heartbeat:apache): Started node1
Transition Summary:
+ * Shutdown node1
* Stop rsc1 (node1)
Executing cluster transition:
* Resource action: rsc1 stop on node1
* Cluster action: do_shutdown on node1
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node1 ]
rsc1 (heartbeat:apache): Stopped
diff --git a/pengine/test10/start-then-stop-with-unfence.summary b/pengine/test10/start-then-stop-with-unfence.summary
index df7d9e3c7d..4430cc09d7 100644
--- a/pengine/test10/start-then-stop-with-unfence.summary
+++ b/pengine/test10/start-then-stop-with-unfence.summary
@@ -1,42 +1,44 @@
Current cluster status:
Online: [ rhel7-node1.example.com rhel7-node2.example.com ]
mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com
mpath-node1 (stonith:fence_mpath): Stopped
ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com
ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com
Clone Set: jrummy-clone [jrummy]
Started: [ rhel7-node2.example.com ]
Stopped: [ rhel7-node1.example.com ]
Transition Summary:
+ * Fence rhel7-node2.example.com
+ * Fence rhel7-node1.example.com
* Start mpath-node1 (rhel7-node1.example.com)
* Move ip1 (Started rhel7-node2.example.com -> rhel7-node1.example.com)
* Start jrummy:1 (rhel7-node1.example.com)
Executing cluster transition:
* Pseudo action: jrummy-clone_start_0
* Fencing rhel7-node1.example.com (on)
* Resource action: mpath-node2 monitor on rhel7-node1.example.com
* Resource action: mpath-node1 monitor on rhel7-node1.example.com
* Resource action: jrummy start on rhel7-node1.example.com
* Pseudo action: jrummy-clone_running_0
* Resource action: mpath-node1 start on rhel7-node1.example.com
* Resource action: ip1 stop on rhel7-node2.example.com
* Resource action: jrummy monitor=10000 on rhel7-node1.example.com
* Pseudo action: all_stopped
* Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com
* Resource action: ip1 start on rhel7-node1.example.com
* Resource action: ip1 monitor=10000 on rhel7-node1.example.com
Revised cluster status:
Online: [ rhel7-node1.example.com rhel7-node2.example.com ]
mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com
mpath-node1 (stonith:fence_mpath): Started rhel7-node1.example.com
ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node1.example.com
ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com
Clone Set: jrummy-clone [jrummy]
Started: [ rhel7-node1.example.com rhel7-node2.example.com ]
diff --git a/pengine/test10/stonith-0.summary b/pengine/test10/stonith-0.summary
index 0fcaf80cbe..24008a1ab4 100644
--- a/pengine/test10/stonith-0.summary
+++ b/pengine/test10/stonith-0.summary
@@ -1,109 +1,111 @@
Current cluster status:
Node c001n03 (f5e1d2de-73da-432a-9d5c-37472253c2ee): UNCLEAN (online)
Node c001n05 (52a5ea5e-86ee-442c-b251-0bc9825c517e): UNCLEAN (online)
Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started [ c001n03 c001n05 ]
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n03
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): FAILED [ c001n03 c001n05 ]
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04
rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05
rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
Clone Set: DoFencing [child_DoFencing]
Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
Stopped: [ c001n03 c001n05 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04
ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04
ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
Transition Summary:
+ * Fence c001n05
+ * Fence c001n03
* Move ocf_192.168.100.181 (Started c001n03 -> c001n02)
* Move heartbeat_192.168.100.182 (Started c001n03 -> c001n02)
* Recover ocf_192.168.100.183 (Started c001n03 -> c001n02)
* Move rsc_c001n05 (Started c001n05 -> c001n07)
* Move rsc_c001n07 (Started c001n03 -> c001n07)
Executing cluster transition:
* Resource action: child_DoFencing:4 monitor=20000 on c001n08
* Fencing c001n05 (reboot)
* Fencing c001n03 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: group-1_stop_0
* Pseudo action: ocf_192.168.100.183_stop_0
* Pseudo action: ocf_192.168.100.183_stop_0
* Pseudo action: rsc_c001n05_stop_0
* Pseudo action: rsc_c001n07_stop_0
* Pseudo action: heartbeat_192.168.100.182_stop_0
* Resource action: rsc_c001n05 start on c001n07
* Resource action: rsc_c001n07 start on c001n07
* Pseudo action: ocf_192.168.100.181_stop_0
* Pseudo action: ocf_192.168.100.181_stop_0
* Resource action: rsc_c001n05 monitor=5000 on c001n07
* Resource action: rsc_c001n07 monitor=5000 on c001n07
* Pseudo action: all_stopped
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: ocf_192.168.100.181 start on c001n02
* Resource action: heartbeat_192.168.100.182 start on c001n02
* Resource action: ocf_192.168.100.183 start on c001n02
* Pseudo action: group-1_running_0
* Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
* Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
* Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
Revised cluster status:
Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
OFFLINE: [ c001n03 c001n05 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
Resource Group: group-1
ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02
heartbeat_192.168.100.182 (heartbeat:IPaddr): Started c001n02
ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02
lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04
rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06
rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08
Clone Set: DoFencing [child_DoFencing]
Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
Stopped: [ c001n03 c001n05 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02
ocf_msdummy:1 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02
ocf_msdummy:2 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
ocf_msdummy:3 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07
ocf_msdummy:4 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:5 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08
ocf_msdummy:6 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:7 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:8 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:9 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
ocf_msdummy:10 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04
ocf_msdummy:11 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04
ocf_msdummy:12 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
ocf_msdummy:13 (ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06
diff --git a/pengine/test10/stonith-1.summary b/pengine/test10/stonith-1.summary
index 589e4023d0..85560214e7 100644
--- a/pengine/test10/stonith-1.summary
+++ b/pengine/test10/stonith-1.summary
@@ -1,112 +1,113 @@
Current cluster status:
Node sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): UNCLEAN (offline)
Online: [ sles-1 sles-2 sles-4 ]
Resource Group: group-1
r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1
r192.168.100.182 (heartbeat:IPaddr): Started sles-1
r192.168.100.183 (ocf::heartbeat:IPaddr): Stopped
lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2
migrator (ocf::heartbeat:Dummy): Started sles-3 (UNCLEAN)
rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1
rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2
rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3 (UNCLEAN)
rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4
Clone Set: DoFencing [child_DoFencing]
child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN)
Started: [ sles-1 sles-2 ]
Stopped: [ sles-4 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN )
ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-3 ( UNCLEAN )
ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped
Transition Summary:
+ * Fence sles-3
* Start r192.168.100.183 (sles-1)
* Move migrator (Started sles-3 -> sles-4)
* Move rsc_sles-3 (Started sles-3 -> sles-4)
* Move child_DoFencing:2 (Started sles-3 -> sles-4)
* Start ocf_msdummy:0 (sles-4)
* Start ocf_msdummy:1 (sles-1)
* Move ocf_msdummy:2 (Slave sles-3 -> sles-2)
* Start ocf_msdummy:3 (sles-4)
* Start ocf_msdummy:4 (sles-1)
* Move ocf_msdummy:5 (Slave sles-3 -> sles-2)
Executing cluster transition:
* Pseudo action: group-1_start_0
* Resource action: r192.168.100.182 monitor=5000 on sles-1
* Resource action: lsb_dummy monitor=5000 on sles-2
* Resource action: rsc_sles-2 monitor=5000 on sles-2
* Resource action: rsc_sles-4 monitor=5000 on sles-4
* Pseudo action: DoFencing_stop_0
* Fencing sles-3 (reboot)
* Pseudo action: stonith_complete
* Resource action: r192.168.100.183 start on sles-1
* Pseudo action: migrator_stop_0
* Pseudo action: rsc_sles-3_stop_0
* Pseudo action: child_DoFencing:2_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: DoFencing_start_0
* Pseudo action: master_rsc_1_stop_0
* Pseudo action: group-1_running_0
* Resource action: r192.168.100.183 monitor=5000 on sles-1
* Resource action: migrator start on sles-4
* Resource action: rsc_sles-3 start on sles-4
* Resource action: child_DoFencing:2 start on sles-4
* Pseudo action: DoFencing_running_0
* Pseudo action: ocf_msdummy:2_stop_0
* Pseudo action: ocf_msdummy:5_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Pseudo action: master_rsc_1_start_0
* Pseudo action: all_stopped
* Resource action: migrator monitor=10000 on sles-4
* Resource action: rsc_sles-3 monitor=5000 on sles-4
* Resource action: child_DoFencing:2 monitor=60000 on sles-4
* Resource action: ocf_msdummy:0 start on sles-4
* Resource action: ocf_msdummy:1 start on sles-1
* Resource action: ocf_msdummy:2 start on sles-2
* Resource action: ocf_msdummy:3 start on sles-4
* Resource action: ocf_msdummy:4 start on sles-1
* Resource action: ocf_msdummy:5 start on sles-2
* Pseudo action: master_rsc_1_running_0
* Resource action: ocf_msdummy:0 monitor=5000 on sles-4
* Resource action: ocf_msdummy:1 monitor=5000 on sles-1
* Resource action: ocf_msdummy:2 monitor=5000 on sles-2
* Resource action: ocf_msdummy:3 monitor=5000 on sles-4
* Resource action: ocf_msdummy:4 monitor=5000 on sles-1
* Resource action: ocf_msdummy:5 monitor=5000 on sles-2
Revised cluster status:
Online: [ sles-1 sles-2 sles-4 ]
OFFLINE: [ sles-3 ]
Resource Group: group-1
r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1
r192.168.100.182 (heartbeat:IPaddr): Started sles-1
r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1
lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2
migrator (ocf::heartbeat:Dummy): Started sles-4
rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1
rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2
rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-4
rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4
Clone Set: DoFencing [child_DoFencing]
Started: [ sles-1 sles-2 sles-4 ]
Stopped: [ sles-3 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-4
ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-1
ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-2
ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-4
ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-1
ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-2
ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped
diff --git a/pengine/test10/stonith-2.summary b/pengine/test10/stonith-2.summary
index b02c9b4e75..0f7cb99ea8 100644
--- a/pengine/test10/stonith-2.summary
+++ b/pengine/test10/stonith-2.summary
@@ -1,77 +1,78 @@
Current cluster status:
Node sles-5 (434915c6-7b40-4d30-95ff-dc0ff3dc005a): UNCLEAN (offline)
Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
Resource Group: group-1
r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1
r192.168.100.182 (heartbeat:IPaddr): Started sles-1
r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1
lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2
migrator (ocf::heartbeat:Dummy): Started sles-3
rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1
rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2
rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3
rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4
rsc_sles-5 (ocf::heartbeat:IPaddr): Stopped
rsc_sles-6 (ocf::heartbeat:IPaddr): Started sles-6
Clone Set: DoFencing [child_DoFencing]
Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
Stopped: [ sles-5 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-3
ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-4
ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-4
ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-1
ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-2
ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-1
ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave sles-6
ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave sles-6
ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave sles-2
ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave sles-3
Transition Summary:
+ * Fence sles-5
* Start rsc_sles-5 (sles-6)
Executing cluster transition:
* Fencing sles-5 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: rsc_sles-5 start on sles-6
* Resource action: rsc_sles-5 monitor=5000 on sles-6
Revised cluster status:
Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
OFFLINE: [ sles-5 ]
Resource Group: group-1
r192.168.100.181 (ocf::heartbeat:IPaddr): Started sles-1
r192.168.100.182 (heartbeat:IPaddr): Started sles-1
r192.168.100.183 (ocf::heartbeat:IPaddr): Started sles-1
lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2
migrator (ocf::heartbeat:Dummy): Started sles-3
rsc_sles-1 (ocf::heartbeat:IPaddr): Started sles-1
rsc_sles-2 (ocf::heartbeat:IPaddr): Started sles-2
rsc_sles-3 (ocf::heartbeat:IPaddr): Started sles-3
rsc_sles-4 (ocf::heartbeat:IPaddr): Started sles-4
rsc_sles-5 (ocf::heartbeat:IPaddr): Started sles-6
rsc_sles-6 (ocf::heartbeat:IPaddr): Started sles-6
Clone Set: DoFencing [child_DoFencing]
Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
Stopped: [ sles-5 ]
Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
ocf_msdummy:0 (ocf::heartbeat:Stateful): Slave sles-3
ocf_msdummy:1 (ocf::heartbeat:Stateful): Slave sles-4
ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave sles-4
ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave sles-1
ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave sles-2
ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave sles-1
ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped
ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave sles-6
ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave sles-6
ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave sles-2
ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave sles-3
diff --git a/pengine/test10/stonith-3.summary b/pengine/test10/stonith-3.summary
index 64b21d4e7b..cb25cc3e18 100644
--- a/pengine/test10/stonith-3.summary
+++ b/pengine/test10/stonith-3.summary
@@ -1,36 +1,37 @@
Current cluster status:
Node rh5node1 (286f4fcb-519e-4a23-b39f-9ab0017d0442): UNCLEAN (offline)
Online: [ rh5node2 ]
prmIpPostgreSQLDB (ocf::heartbeat:IPaddr): Stopped
Clone Set: clnStonith [grpStonith]
Stopped: [ rh5node1 rh5node2 ]
Transition Summary:
+ * Fence rh5node1
* Start prmIpPostgreSQLDB (rh5node2)
* Start prmStonith:0 (rh5node2)
Executing cluster transition:
* Resource action: prmIpPostgreSQLDB monitor on rh5node2
* Resource action: prmStonith:0 monitor on rh5node2
* Pseudo action: clnStonith_start_0
* Fencing rh5node1 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: prmIpPostgreSQLDB start on rh5node2
* Pseudo action: grpStonith:0_start_0
* Resource action: prmStonith:0 start on rh5node2
* Resource action: prmIpPostgreSQLDB monitor=30000 on rh5node2
* Pseudo action: grpStonith:0_running_0
* Pseudo action: clnStonith_running_0
Revised cluster status:
Online: [ rh5node2 ]
OFFLINE: [ rh5node1 ]
prmIpPostgreSQLDB (ocf::heartbeat:IPaddr): Started rh5node2
Clone Set: clnStonith [grpStonith]
Started: [ rh5node2 ]
Stopped: [ rh5node1 ]
diff --git a/pengine/test10/stonith-4.summary b/pengine/test10/stonith-4.summary
index 78118c55c9..dc7cd89566 100644
--- a/pengine/test10/stonith-4.summary
+++ b/pengine/test10/stonith-4.summary
@@ -1,36 +1,40 @@
Current cluster status:
Node pcmk-10 (110): UNCLEAN (online)
Node pcmk-11 (111): pending
Node pcmk-2 (102): pending
Node pcmk-3 (103): pending
Node pcmk-5 (105): UNCLEAN (offline)
Node pcmk-7 (107): UNCLEAN (online)
Node pcmk-8 (108): UNCLEAN (offline)
Node pcmk-9 (109): pending
Online: [ pcmk-1 ]
OFFLINE: [ pcmk-4 pcmk-6 ]
Fencing (stonith:fence_xvm): Stopped
Transition Summary:
+ * Fence pcmk-8
+ * Fence pcmk-7
+ * Fence pcmk-5
+ * Fence pcmk-10
* Start Fencing (pcmk-1 - blocked)
Executing cluster transition:
* Fencing pcmk-10 (reboot)
* Fencing pcmk-5 (reboot)
* Fencing pcmk-7 (reboot)
* Fencing pcmk-8 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
Node pcmk-11 (111): pending
Node pcmk-2 (102): pending
Node pcmk-3 (103): pending
Node pcmk-9 (109): pending
Online: [ pcmk-1 ]
OFFLINE: [ pcmk-10 pcmk-4 pcmk-5 pcmk-6 pcmk-7 pcmk-8 ]
Fencing (stonith:fence_xvm): Stopped
diff --git a/pengine/test10/stop-failure-no-quorum.summary b/pengine/test10/stop-failure-no-quorum.summary
index 7c14ce238d..d864f1a4b2 100644
--- a/pengine/test10/stop-failure-no-quorum.summary
+++ b/pengine/test10/stop-failure-no-quorum.summary
@@ -1,45 +1,46 @@
Current cluster status:
Node pcmk-2 (102): UNCLEAN (online)
Node pcmk-3 (103): UNCLEAN (offline)
Node pcmk-4 (104): UNCLEAN (offline)
Online: [ pcmk-1 ]
Clone Set: dlm-clone [dlm]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Clone Set: clvm-clone [clvm]
clvm (lsb:clvmd): FAILED pcmk-2
clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked )
Stopped: [ pcmk-1 pcmk-3 pcmk-4 ]
ClusterIP (ocf::heartbeat:IPaddr2): Stopped
Fencing (stonith:fence_xvm): Stopped
Transition Summary:
+ * Fence pcmk-2
* Start dlm:0 (pcmk-1 - blocked)
* Stop clvm:0 (pcmk-2)
* Start clvm:2 (pcmk-1 - blocked)
* Start ClusterIP (pcmk-1 - blocked)
* Start Fencing (pcmk-1 - blocked)
Executing cluster transition:
* Fencing pcmk-2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: clvm-clone_stop_0
* Pseudo action: clvm_stop_0
* Pseudo action: clvm-clone_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Node pcmk-3 (103): UNCLEAN (offline)
Node pcmk-4 (104): UNCLEAN (offline)
Online: [ pcmk-1 ]
OFFLINE: [ pcmk-2 ]
Clone Set: dlm-clone [dlm]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Clone Set: clvm-clone [clvm]
clvm (lsb:clvmd): FAILED pcmk-3 ( UNCLEAN, blocked )
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
ClusterIP (ocf::heartbeat:IPaddr2): Stopped
Fencing (stonith:fence_xvm): Stopped
diff --git a/pengine/test10/stop-failure-with-fencing.summary b/pengine/test10/stop-failure-with-fencing.summary
index 72417e68c6..e01b6c49c9 100644
--- a/pengine/test10/stop-failure-with-fencing.summary
+++ b/pengine/test10/stop-failure-with-fencing.summary
@@ -1,44 +1,45 @@
Current cluster status:
Node pcmk-2 (102): UNCLEAN (online)
Node pcmk-3 (103): UNCLEAN (offline)
Node pcmk-4 (104): UNCLEAN (offline)
Online: [ pcmk-1 ]
Clone Set: dlm-clone [dlm]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Clone Set: clvm-clone [clvm]
clvm (lsb:clvmd): FAILED pcmk-2
Stopped: [ pcmk-1 pcmk-3 pcmk-4 ]
ClusterIP (ocf::heartbeat:IPaddr2): Stopped
Fencing (stonith:fence_xvm): Stopped
Transition Summary:
+ * Fence pcmk-2
* Start dlm:0 (pcmk-1 - blocked)
* Stop clvm:0 (pcmk-2)
* Start clvm:1 (pcmk-1 - blocked)
* Start ClusterIP (pcmk-1 - blocked)
* Start Fencing (pcmk-1 - blocked)
Executing cluster transition:
* Resource action: Fencing monitor on pcmk-1
* Fencing pcmk-2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: clvm-clone_stop_0
* Pseudo action: clvm_stop_0
* Pseudo action: clvm-clone_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Node pcmk-3 (103): UNCLEAN (offline)
Node pcmk-4 (104): UNCLEAN (offline)
Online: [ pcmk-1 ]
OFFLINE: [ pcmk-2 ]
Clone Set: dlm-clone [dlm]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
Clone Set: clvm-clone [clvm]
Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
ClusterIP (ocf::heartbeat:IPaddr2): Stopped
Fencing (stonith:fence_xvm): Stopped
diff --git a/pengine/test10/systemhealth1.summary b/pengine/test10/systemhealth1.summary
index bbdbcafbc1..37b0b49a70 100644
--- a/pengine/test10/systemhealth1.summary
+++ b/pengine/test10/systemhealth1.summary
@@ -1,24 +1,26 @@
Current cluster status:
Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline)
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
+ * Fence hs21c
Executing cluster transition:
* Fencing hs21d (reboot)
* Fencing hs21c (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ hs21c hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealth2.summary b/pengine/test10/systemhealth2.summary
index 86c6bd4fe4..a37ce18034 100644
--- a/pengine/test10/systemhealth2.summary
+++ b/pengine/test10/systemhealth2.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
* Start stonith-1 (hs21c)
* Start apache_1 (hs21c)
* Start nfs_1 (hs21c)
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: stonith-1 start on hs21c
* Resource action: apache_1 start on hs21c
* Resource action: nfs_1 start on hs21c
* Resource action: apache_1 monitor=10000 on hs21c
* Resource action: nfs_1 monitor=20000 on hs21c
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Started hs21c
apache_1 (ocf::heartbeat:apache): Started hs21c
nfs_1 (ocf::heartbeat:Filesystem): Started hs21c
diff --git a/pengine/test10/systemhealth3.summary b/pengine/test10/systemhealth3.summary
index 86c6bd4fe4..a37ce18034 100644
--- a/pengine/test10/systemhealth3.summary
+++ b/pengine/test10/systemhealth3.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
* Start stonith-1 (hs21c)
* Start apache_1 (hs21c)
* Start nfs_1 (hs21c)
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: stonith-1 start on hs21c
* Resource action: apache_1 start on hs21c
* Resource action: nfs_1 start on hs21c
* Resource action: apache_1 monitor=10000 on hs21c
* Resource action: nfs_1 monitor=20000 on hs21c
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Started hs21c
apache_1 (ocf::heartbeat:apache): Started hs21c
nfs_1 (ocf::heartbeat:Filesystem): Started hs21c
diff --git a/pengine/test10/systemhealthm1.summary b/pengine/test10/systemhealthm1.summary
index bbdbcafbc1..37b0b49a70 100644
--- a/pengine/test10/systemhealthm1.summary
+++ b/pengine/test10/systemhealthm1.summary
@@ -1,24 +1,26 @@
Current cluster status:
Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline)
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
+ * Fence hs21c
Executing cluster transition:
* Fencing hs21d (reboot)
* Fencing hs21c (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ hs21c hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealthm2.summary b/pengine/test10/systemhealthm2.summary
index 86c6bd4fe4..a37ce18034 100644
--- a/pengine/test10/systemhealthm2.summary
+++ b/pengine/test10/systemhealthm2.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
* Start stonith-1 (hs21c)
* Start apache_1 (hs21c)
* Start nfs_1 (hs21c)
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: stonith-1 start on hs21c
* Resource action: apache_1 start on hs21c
* Resource action: nfs_1 start on hs21c
* Resource action: apache_1 monitor=10000 on hs21c
* Resource action: nfs_1 monitor=20000 on hs21c
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Started hs21c
apache_1 (ocf::heartbeat:apache): Started hs21c
nfs_1 (ocf::heartbeat:Filesystem): Started hs21c
diff --git a/pengine/test10/systemhealthm3.summary b/pengine/test10/systemhealthm3.summary
index 760023d277..b10d3f244c 100644
--- a/pengine/test10/systemhealthm3.summary
+++ b/pengine/test10/systemhealthm3.summary
@@ -1,27 +1,28 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealthn1.summary b/pengine/test10/systemhealthn1.summary
index bbdbcafbc1..37b0b49a70 100644
--- a/pengine/test10/systemhealthn1.summary
+++ b/pengine/test10/systemhealthn1.summary
@@ -1,24 +1,26 @@
Current cluster status:
Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline)
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
+ * Fence hs21c
Executing cluster transition:
* Fencing hs21d (reboot)
* Fencing hs21c (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ hs21c hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealthn2.summary b/pengine/test10/systemhealthn2.summary
index 86c6bd4fe4..a37ce18034 100644
--- a/pengine/test10/systemhealthn2.summary
+++ b/pengine/test10/systemhealthn2.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
* Start stonith-1 (hs21c)
* Start apache_1 (hs21c)
* Start nfs_1 (hs21c)
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: stonith-1 start on hs21c
* Resource action: apache_1 start on hs21c
* Resource action: nfs_1 start on hs21c
* Resource action: apache_1 monitor=10000 on hs21c
* Resource action: nfs_1 monitor=20000 on hs21c
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Started hs21c
apache_1 (ocf::heartbeat:apache): Started hs21c
nfs_1 (ocf::heartbeat:Filesystem): Started hs21c
diff --git a/pengine/test10/systemhealthn3.summary b/pengine/test10/systemhealthn3.summary
index 86c6bd4fe4..a37ce18034 100644
--- a/pengine/test10/systemhealthn3.summary
+++ b/pengine/test10/systemhealthn3.summary
@@ -1,35 +1,36 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
* Start stonith-1 (hs21c)
* Start apache_1 (hs21c)
* Start nfs_1 (hs21c)
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: stonith-1 start on hs21c
* Resource action: apache_1 start on hs21c
* Resource action: nfs_1 start on hs21c
* Resource action: apache_1 monitor=10000 on hs21c
* Resource action: nfs_1 monitor=20000 on hs21c
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Started hs21c
apache_1 (ocf::heartbeat:apache): Started hs21c
nfs_1 (ocf::heartbeat:Filesystem): Started hs21c
diff --git a/pengine/test10/systemhealtho1.summary b/pengine/test10/systemhealtho1.summary
index bbdbcafbc1..37b0b49a70 100644
--- a/pengine/test10/systemhealtho1.summary
+++ b/pengine/test10/systemhealtho1.summary
@@ -1,24 +1,26 @@
Current cluster status:
Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline)
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
+ * Fence hs21c
Executing cluster transition:
* Fencing hs21d (reboot)
* Fencing hs21c (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ hs21c hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealtho2.summary b/pengine/test10/systemhealtho2.summary
index 760023d277..b10d3f244c 100644
--- a/pengine/test10/systemhealtho2.summary
+++ b/pengine/test10/systemhealtho2.summary
@@ -1,27 +1,28 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealtho3.summary b/pengine/test10/systemhealtho3.summary
index 760023d277..b10d3f244c 100644
--- a/pengine/test10/systemhealtho3.summary
+++ b/pengine/test10/systemhealtho3.summary
@@ -1,27 +1,28 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealthp1.summary b/pengine/test10/systemhealthp1.summary
index bbdbcafbc1..37b0b49a70 100644
--- a/pengine/test10/systemhealthp1.summary
+++ b/pengine/test10/systemhealthp1.summary
@@ -1,24 +1,26 @@
Current cluster status:
Node hs21c (c97a3ee5-02d8-4fad-a9fb-a79ae2b35549): UNCLEAN (offline)
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
+ * Fence hs21c
Executing cluster transition:
* Fencing hs21d (reboot)
* Fencing hs21c (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ hs21c hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/systemhealthp2.summary b/pengine/test10/systemhealthp2.summary
index 94c63d68c6..6605e12edd 100644
--- a/pengine/test10/systemhealthp2.summary
+++ b/pengine/test10/systemhealthp2.summary
@@ -1,33 +1,34 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
* Start apache_1 (hs21c)
* Start nfs_1 (hs21c)
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: apache_1 start on hs21c
* Resource action: nfs_1 start on hs21c
* Resource action: apache_1 monitor=10000 on hs21c
* Resource action: nfs_1 monitor=20000 on hs21c
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Started hs21c
nfs_1 (ocf::heartbeat:Filesystem): Started hs21c
diff --git a/pengine/test10/systemhealthp3.summary b/pengine/test10/systemhealthp3.summary
index 760023d277..b10d3f244c 100644
--- a/pengine/test10/systemhealthp3.summary
+++ b/pengine/test10/systemhealthp3.summary
@@ -1,27 +1,28 @@
Current cluster status:
Node hs21d (737318c6-0f92-4592-9754-45967d45aff7): UNCLEAN (offline)
Online: [ hs21c ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
Transition Summary:
+ * Fence hs21d
Executing cluster transition:
* Resource action: stonith-1 monitor on hs21c
* Resource action: apache_1 monitor on hs21c
* Resource action: nfs_1 monitor on hs21c
* Fencing hs21d (reboot)
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
Revised cluster status:
Online: [ hs21c ]
OFFLINE: [ hs21d ]
stonith-1 (stonith:dummy): Stopped
apache_1 (ocf::heartbeat:apache): Stopped
nfs_1 (ocf::heartbeat:Filesystem): Stopped
diff --git a/pengine/test10/ticket-clone-21.summary b/pengine/test10/ticket-clone-21.summary
index 5ae23ec5f7..50df6280f4 100644
--- a/pengine/test10/ticket-clone-21.summary
+++ b/pengine/test10/ticket-clone-21.summary
@@ -1,31 +1,33 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
Clone Set: clone1 [rsc1]
Started: [ node1 node2 ]
Transition Summary:
+ * Fence node2
+ * Fence node1
* Stop rsc_stonith (node1)
* Stop rsc1:0 (node1)
* Stop rsc1:1 (node2)
Executing cluster transition:
* Fencing node1 (reboot)
* Pseudo action: rsc_stonith_stop_0
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: clone1_stop_0
* Pseudo action: rsc1:1_stop_0
* Pseudo action: rsc1:0_stop_0
* Pseudo action: clone1_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ node1 node2 ]
rsc_stonith (stonith:null): Stopped
Clone Set: clone1 [rsc1]
Stopped: [ node1 node2 ]
diff --git a/pengine/test10/ticket-clone-9.summary b/pengine/test10/ticket-clone-9.summary
index 5ae23ec5f7..50df6280f4 100644
--- a/pengine/test10/ticket-clone-9.summary
+++ b/pengine/test10/ticket-clone-9.summary
@@ -1,31 +1,33 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
Clone Set: clone1 [rsc1]
Started: [ node1 node2 ]
Transition Summary:
+ * Fence node2
+ * Fence node1
* Stop rsc_stonith (node1)
* Stop rsc1:0 (node1)
* Stop rsc1:1 (node2)
Executing cluster transition:
* Fencing node1 (reboot)
* Pseudo action: rsc_stonith_stop_0
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: clone1_stop_0
* Pseudo action: rsc1:1_stop_0
* Pseudo action: rsc1:0_stop_0
* Pseudo action: clone1_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
OFFLINE: [ node1 node2 ]
rsc_stonith (stonith:null): Stopped
Clone Set: clone1 [rsc1]
Stopped: [ node1 node2 ]
diff --git a/pengine/test10/ticket-group-21.summary b/pengine/test10/ticket-group-21.summary
index 9027d2cc0c..eab14a9b9a 100644
--- a/pengine/test10/ticket-group-21.summary
+++ b/pengine/test10/ticket-group-21.summary
@@ -1,31 +1,32 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
Resource Group: group1
rsc1 (ocf::pacemaker:Dummy): Started node2
rsc2 (ocf::pacemaker:Dummy): Started node2
Transition Summary:
+ * Fence node2
* Stop rsc1 (node2)
* Stop rsc2 (node2)
Executing cluster transition:
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: group1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc1_stop_0
* Pseudo action: all_stopped
* Pseudo action: group1_stopped_0
Revised cluster status:
Online: [ node1 ]
OFFLINE: [ node2 ]
rsc_stonith (stonith:null): Started node1
Resource Group: group1
rsc1 (ocf::pacemaker:Dummy): Stopped
rsc2 (ocf::pacemaker:Dummy): Stopped
diff --git a/pengine/test10/ticket-group-9.summary b/pengine/test10/ticket-group-9.summary
index 9027d2cc0c..eab14a9b9a 100644
--- a/pengine/test10/ticket-group-9.summary
+++ b/pengine/test10/ticket-group-9.summary
@@ -1,31 +1,32 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
Resource Group: group1
rsc1 (ocf::pacemaker:Dummy): Started node2
rsc2 (ocf::pacemaker:Dummy): Started node2
Transition Summary:
+ * Fence node2
* Stop rsc1 (node2)
* Stop rsc2 (node2)
Executing cluster transition:
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: group1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc1_stop_0
* Pseudo action: all_stopped
* Pseudo action: group1_stopped_0
Revised cluster status:
Online: [ node1 ]
OFFLINE: [ node2 ]
rsc_stonith (stonith:null): Started node1
Resource Group: group1
rsc1 (ocf::pacemaker:Dummy): Stopped
rsc2 (ocf::pacemaker:Dummy): Stopped
diff --git a/pengine/test10/ticket-master-21.summary b/pengine/test10/ticket-master-21.summary
index b228696351..a107a38b8d 100644
--- a/pengine/test10/ticket-master-21.summary
+++ b/pengine/test10/ticket-master-21.summary
@@ -1,35 +1,36 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
Master/Slave Set: ms1 [rsc1]
Masters: [ node1 ]
Slaves: [ node2 ]
Transition Summary:
+ * Fence node1
* Move rsc_stonith (Started node1 -> node2)
* Demote rsc1:0 (Master -> Stopped node1)
Executing cluster transition:
* Pseudo action: rsc_stonith_stop_0
* Pseudo action: ms1_demote_0
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Resource action: rsc_stonith start on node2
* Pseudo action: rsc1:1_demote_0
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Pseudo action: rsc1:1_stop_0
* Pseudo action: ms1_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
rsc_stonith (stonith:null): Started node2
Master/Slave Set: ms1 [rsc1]
Slaves: [ node2 ]
Stopped: [ node1 ]
diff --git a/pengine/test10/ticket-master-9.summary b/pengine/test10/ticket-master-9.summary
index b228696351..a107a38b8d 100644
--- a/pengine/test10/ticket-master-9.summary
+++ b/pengine/test10/ticket-master-9.summary
@@ -1,35 +1,36 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
Master/Slave Set: ms1 [rsc1]
Masters: [ node1 ]
Slaves: [ node2 ]
Transition Summary:
+ * Fence node1
* Move rsc_stonith (Started node1 -> node2)
* Demote rsc1:0 (Master -> Stopped node1)
Executing cluster transition:
* Pseudo action: rsc_stonith_stop_0
* Pseudo action: ms1_demote_0
* Fencing node1 (reboot)
* Pseudo action: stonith_complete
* Resource action: rsc_stonith start on node2
* Pseudo action: rsc1:1_demote_0
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Pseudo action: rsc1:1_stop_0
* Pseudo action: ms1_stopped_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node2 ]
OFFLINE: [ node1 ]
rsc_stonith (stonith:null): Started node2
Master/Slave Set: ms1 [rsc1]
Slaves: [ node2 ]
Stopped: [ node1 ]
diff --git a/pengine/test10/ticket-primitive-21.summary b/pengine/test10/ticket-primitive-21.summary
index 6c523fe100..918b743f48 100644
--- a/pengine/test10/ticket-primitive-21.summary
+++ b/pengine/test10/ticket-primitive-21.summary
@@ -1,23 +1,24 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
rsc1 (ocf::pacemaker:Dummy): Started node2
Transition Summary:
+ * Fence node2
* Stop rsc1 (node2)
Executing cluster transition:
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node1 ]
OFFLINE: [ node2 ]
rsc_stonith (stonith:null): Started node1
rsc1 (ocf::pacemaker:Dummy): Stopped
diff --git a/pengine/test10/ticket-primitive-9.summary b/pengine/test10/ticket-primitive-9.summary
index 6c523fe100..918b743f48 100644
--- a/pengine/test10/ticket-primitive-9.summary
+++ b/pengine/test10/ticket-primitive-9.summary
@@ -1,23 +1,24 @@
Current cluster status:
Online: [ node1 node2 ]
rsc_stonith (stonith:null): Started node1
rsc1 (ocf::pacemaker:Dummy): Started node2
Transition Summary:
+ * Fence node2
* Stop rsc1 (node2)
Executing cluster transition:
* Fencing node2 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: rsc1_stop_0
* Pseudo action: all_stopped
Revised cluster status:
Online: [ node1 ]
OFFLINE: [ node2 ]
rsc_stonith (stonith:null): Started node1
rsc1 (ocf::pacemaker:Dummy): Stopped
diff --git a/pengine/test10/unfence-definition.summary b/pengine/test10/unfence-definition.summary
index 05f80035cb..03506a349f 100644
--- a/pengine/test10/unfence-definition.summary
+++ b/pengine/test10/unfence-definition.summary
@@ -1,62 +1,67 @@
Current cluster status:
Node virt-4 (4): UNCLEAN (offline)
Online: [ virt-1 virt-2 virt-3 ]
fencing (stonith:fence_scsi): Started virt-1
Clone Set: dlm-clone [dlm]
Started: [ virt-1 virt-2 ]
Stopped: [ virt-3 virt-4 ]
Clone Set: clvmd-clone [clvmd]
Started: [ virt-1 ]
Stopped: [ virt-2 virt-3 virt-4 ]
Transition Summary:
+ * Fence virt-4
+ * Fence virt-4
+ * Fence virt-3
+ * Fence virt-2
+ * Fence virt-1
* Restart fencing (Started virt-1)
* Restart dlm:0 (Started virt-1)
* Start dlm:2 (virt-3)
* Restart clvmd:0 (Started virt-1)
* Start clvmd:1 (virt-2)
* Start clvmd:2 (virt-3)
Executing cluster transition:
* Resource action: dlm monitor on virt-3
* Resource action: clvmd monitor on virt-2
* Resource action: clvmd monitor on virt-3
* Pseudo action: clvmd-clone_stop_0
* Fencing virt-4 (reboot)
* Pseudo action: stonith_complete
* Fencing virt-3 (on)
* Resource action: fencing monitor on virt-3
* Resource action: fencing stop on virt-1
* Resource action: clvmd stop on virt-1
* Pseudo action: clvmd-clone_stopped_0
* Resource action: fencing delete on virt-1
* Pseudo action: dlm-clone_stop_0
* Resource action: dlm stop on virt-1
* Pseudo action: dlm-clone_stopped_0
* Pseudo action: dlm-clone_start_0
* Fencing virt-1 (on)
* Pseudo action: all_stopped
* Resource action: fencing start on virt-1
* Resource action: dlm start on virt-1
* Resource action: dlm start on virt-3
* Pseudo action: dlm-clone_running_0
* Pseudo action: clvmd-clone_start_0
* Resource action: clvmd start on virt-1
* Resource action: clvmd start on virt-2
* Resource action: clvmd start on virt-3
* Pseudo action: clvmd-clone_running_0
Revised cluster status:
Online: [ virt-1 virt-2 virt-3 ]
OFFLINE: [ virt-4 ]
fencing (stonith:fence_scsi): Started virt-1
Clone Set: dlm-clone [dlm]
Started: [ virt-1 virt-2 virt-3 ]
Stopped: [ virt-4 ]
Clone Set: clvmd-clone [clvmd]
Started: [ virt-1 virt-2 virt-3 ]
Stopped: [ virt-4 ]
diff --git a/pengine/test10/unfence-parameters.summary b/pengine/test10/unfence-parameters.summary
index 41fed9089c..5cc4a4b6b6 100644
--- a/pengine/test10/unfence-parameters.summary
+++ b/pengine/test10/unfence-parameters.summary
@@ -1,65 +1,70 @@
Current cluster status:
Node virt-4 (4): UNCLEAN (offline)
Online: [ virt-1 virt-2 virt-3 ]
fencing (stonith:fence_scsi): Started virt-1
Clone Set: dlm-clone [dlm]
Started: [ virt-1 virt-2 ]
Stopped: [ virt-3 virt-4 ]
Clone Set: clvmd-clone [clvmd]
Started: [ virt-1 ]
Stopped: [ virt-2 virt-3 virt-4 ]
Transition Summary:
+ * Fence virt-4
+ * Fence virt-4
+ * Fence virt-3
+ * Fence virt-2
+ * Fence virt-1
* Restart fencing (Started virt-1)
* Restart dlm:0 (Started virt-1)
* Restart dlm:1 (Started virt-2)
* Start dlm:2 (virt-3)
* Restart clvmd:0 (Started virt-1)
* Start clvmd:1 (virt-2)
* Start clvmd:2 (virt-3)
Executing cluster transition:
* Resource action: dlm monitor on virt-3
* Resource action: clvmd monitor on virt-2
* Resource action: clvmd monitor on virt-3
* Pseudo action: clvmd-clone_stop_0
* Fencing virt-4 (reboot)
* Pseudo action: stonith_complete
* Fencing virt-3 (on)
* Resource action: fencing monitor on virt-3
* Resource action: clvmd stop on virt-1
* Pseudo action: clvmd-clone_stopped_0
* Resource action: fencing stop on virt-1
* Pseudo action: dlm-clone_stop_0
* Resource action: dlm stop on virt-2
* Fencing virt-2 (on)
* Resource action: dlm stop on virt-1
* Pseudo action: dlm-clone_stopped_0
* Pseudo action: dlm-clone_start_0
* Fencing virt-1 (on)
* Pseudo action: all_stopped
* Resource action: fencing start on virt-1
* Resource action: dlm start on virt-1
* Resource action: dlm start on virt-2
* Resource action: dlm start on virt-3
* Pseudo action: dlm-clone_running_0
* Pseudo action: clvmd-clone_start_0
* Resource action: clvmd start on virt-1
* Resource action: clvmd start on virt-2
* Resource action: clvmd start on virt-3
* Pseudo action: clvmd-clone_running_0
Revised cluster status:
Online: [ virt-1 virt-2 virt-3 ]
OFFLINE: [ virt-4 ]
fencing (stonith:fence_scsi): Started virt-1
Clone Set: dlm-clone [dlm]
Started: [ virt-1 virt-2 virt-3 ]
Stopped: [ virt-4 ]
Clone Set: clvmd-clone [clvmd]
Started: [ virt-1 virt-2 virt-3 ]
Stopped: [ virt-4 ]
diff --git a/pengine/test10/unfence-startup.summary b/pengine/test10/unfence-startup.summary
index 76bc0fc5fc..6cd38ad3bc 100644
--- a/pengine/test10/unfence-startup.summary
+++ b/pengine/test10/unfence-startup.summary
@@ -1,47 +1,52 @@
Current cluster status:
Node virt-4 (4): UNCLEAN (offline)
Online: [ virt-1 virt-2 virt-3 ]
fencing (stonith:fence_scsi): Started virt-1
Clone Set: dlm-clone [dlm]
Started: [ virt-1 virt-2 ]
Stopped: [ virt-3 virt-4 ]
Clone Set: clvmd-clone [clvmd]
Started: [ virt-1 ]
Stopped: [ virt-2 virt-3 virt-4 ]
Transition Summary:
+ * Fence virt-4
+ * Fence virt-4
+ * Fence virt-3
+ * Fence virt-2
+ * Fence virt-1
* Start dlm:2 (virt-3)
* Start clvmd:1 (virt-2)
* Start clvmd:2 (virt-3)
Executing cluster transition:
* Resource action: dlm monitor on virt-3
* Pseudo action: dlm-clone_start_0
* Resource action: clvmd monitor on virt-2
* Resource action: clvmd monitor on virt-3
* Fencing virt-4 (reboot)
* Pseudo action: stonith_complete
* Fencing virt-3 (on)
* Pseudo action: all_stopped
* Resource action: fencing monitor on virt-3
* Resource action: dlm start on virt-3
* Pseudo action: dlm-clone_running_0
* Pseudo action: clvmd-clone_start_0
* Resource action: clvmd start on virt-2
* Resource action: clvmd start on virt-3
* Pseudo action: clvmd-clone_running_0
Revised cluster status:
Online: [ virt-1 virt-2 virt-3 ]
OFFLINE: [ virt-4 ]
fencing (stonith:fence_scsi): Started virt-1
Clone Set: dlm-clone [dlm]
Started: [ virt-1 virt-2 virt-3 ]
Stopped: [ virt-4 ]
Clone Set: clvmd-clone [clvmd]
Started: [ virt-1 virt-2 virt-3 ]
Stopped: [ virt-4 ]
diff --git a/pengine/test10/unmanaged-master.summary b/pengine/test10/unmanaged-master.summary
index 024179a727..66a8748053 100644
--- a/pengine/test10/unmanaged-master.summary
+++ b/pengine/test10/unmanaged-master.summary
@@ -1,61 +1,63 @@
Current cluster status:
Online: [ pcmk-1 pcmk-2 ]
OFFLINE: [ pcmk-3 pcmk-4 ]
Clone Set: Fencing [FencingChild] (unmanaged)
FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged)
FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged)
Stopped: [ pcmk-3 pcmk-4 ]
Resource Group: group-1
r192.168.122.126 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
r192.168.122.127 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
r192.168.122.128 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 (unmanaged)
rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 (unmanaged)
rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 (unmanaged)
lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged)
migrator (ocf::pacemaker:Dummy): Started pcmk-4 (unmanaged)
Clone Set: Connectivity [ping-1] (unmanaged)
ping-1 (ocf::pacemaker:ping): Started pcmk-2 (unmanaged)
ping-1 (ocf::pacemaker:ping): Started pcmk-1 (unmanaged)
Stopped: [ pcmk-3 pcmk-4 ]
Master/Slave Set: master-1 [stateful-1] (unmanaged)
stateful-1 (ocf::pacemaker:Stateful): Master pcmk-2 (unmanaged)
stateful-1 (ocf::pacemaker:Stateful): Slave pcmk-1 ( unmanaged )
Stopped: [ pcmk-3 pcmk-4 ]
Transition Summary:
+ * Shutdown pcmk-2
+ * Shutdown pcmk-1
Executing cluster transition:
* Cluster action: do_shutdown on pcmk-2
* Cluster action: do_shutdown on pcmk-1
Revised cluster status:
Online: [ pcmk-1 pcmk-2 ]
OFFLINE: [ pcmk-3 pcmk-4 ]
Clone Set: Fencing [FencingChild] (unmanaged)
FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged)
FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged)
Stopped: [ pcmk-3 pcmk-4 ]
Resource Group: group-1
r192.168.122.126 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
r192.168.122.127 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
r192.168.122.128 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
rsc_pcmk-1 (ocf::heartbeat:IPaddr): Started pcmk-1 (unmanaged)
rsc_pcmk-2 (ocf::heartbeat:IPaddr): Started pcmk-2 (unmanaged)
rsc_pcmk-3 (ocf::heartbeat:IPaddr): Started pcmk-3 (unmanaged)
rsc_pcmk-4 (ocf::heartbeat:IPaddr): Started pcmk-4 (unmanaged)
lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged)
migrator (ocf::pacemaker:Dummy): Started pcmk-4 (unmanaged)
Clone Set: Connectivity [ping-1] (unmanaged)
ping-1 (ocf::pacemaker:ping): Started pcmk-2 (unmanaged)
ping-1 (ocf::pacemaker:ping): Started pcmk-1 (unmanaged)
Stopped: [ pcmk-3 pcmk-4 ]
Master/Slave Set: master-1 [stateful-1] (unmanaged)
stateful-1 (ocf::pacemaker:Stateful): Master pcmk-2 (unmanaged)
stateful-1 (ocf::pacemaker:Stateful): Slave pcmk-1 ( unmanaged )
Stopped: [ pcmk-3 pcmk-4 ]
diff --git a/pengine/test10/whitebox-fail1.summary b/pengine/test10/whitebox-fail1.summary
index 1872e9a8b9..5f70a66fbc 100644
--- a/pengine/test10/whitebox-fail1.summary
+++ b/pengine/test10/whitebox-fail1.summary
@@ -1,55 +1,56 @@
Current cluster status:
Online: [ 18node1 18node2 18node3 ]
Containers: [ lxc2:container2 ]
container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2
container2 (ocf::heartbeat:VirtualDomain): Started 18node2
shoot1 (stonith:fence_xvm): Started 18node3
Clone Set: M-clone [M]
Started: [ 18node1 18node2 18node3 lxc2 ]
A (ocf::pacemaker:Dummy): Started 18node1
B (ocf::pacemaker:Dummy): FAILED lxc1
C (ocf::pacemaker:Dummy): Started lxc2
D (ocf::pacemaker:Dummy): Started 18node1
Transition Summary:
+ * Fence lxc1 (resource: container1)
* Recover container1 (Started 18node2)
* Recover M:4 (Started lxc1)
* Recover B (Started lxc1)
* Restart lxc1 (Started 18node2)
Executing cluster transition:
* Resource action: lxc1 stop on 18node2
* Resource action: container1 stop on 18node2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Pseudo action: stonith_complete
* Resource action: container1 start on 18node2
* Pseudo action: M-clone_stop_0
* Pseudo action: B_stop_0
* Resource action: lxc1 start on 18node2
* Resource action: lxc1 monitor=30000 on 18node2
* Pseudo action: M_stop_0
* Pseudo action: M-clone_stopped_0
* Pseudo action: M-clone_start_0
* Resource action: B start on lxc1
* Pseudo action: all_stopped
* Resource action: M start on lxc1
* Pseudo action: M-clone_running_0
* Resource action: B monitor=10000 on lxc1
* Resource action: M monitor=10000 on lxc1
Revised cluster status:
Online: [ 18node1 18node2 18node3 ]
Containers: [ lxc1:container1 lxc2:container2 ]
container1 (ocf::heartbeat:VirtualDomain): Started 18node2
container2 (ocf::heartbeat:VirtualDomain): Started 18node2
shoot1 (stonith:fence_xvm): Started 18node3
Clone Set: M-clone [M]
Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
A (ocf::pacemaker:Dummy): Started 18node1
B (ocf::pacemaker:Dummy): Started lxc1
C (ocf::pacemaker:Dummy): Started lxc2
D (ocf::pacemaker:Dummy): Started 18node1
diff --git a/pengine/test10/whitebox-fail2.summary b/pengine/test10/whitebox-fail2.summary
index 5db6588f87..2922f16ea7 100644
--- a/pengine/test10/whitebox-fail2.summary
+++ b/pengine/test10/whitebox-fail2.summary
@@ -1,55 +1,56 @@
Current cluster status:
Online: [ 18node1 18node2 18node3 ]
Containers: [ lxc2:container2 ]
container1 (ocf::heartbeat:VirtualDomain): FAILED 18node2
container2 (ocf::heartbeat:VirtualDomain): Started 18node2
shoot1 (stonith:fence_xvm): Started 18node3
Clone Set: M-clone [M]
Started: [ 18node1 18node2 18node3 lxc2 ]
A (ocf::pacemaker:Dummy): Started 18node1
B (ocf::pacemaker:Dummy): FAILED lxc1
C (ocf::pacemaker:Dummy): Started lxc2
D (ocf::pacemaker:Dummy): Started 18node1
Transition Summary:
+ * Fence lxc1 (resource: container1)
* Recover container1 (Started 18node2)
* Recover M:4 (Started lxc1)
* Recover B (Started lxc1)
* Recover lxc1 (Started 18node2)
Executing cluster transition:
* Resource action: lxc1 stop on 18node2
* Resource action: container1 stop on 18node2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Pseudo action: stonith_complete
* Resource action: container1 start on 18node2
* Pseudo action: M-clone_stop_0
* Pseudo action: B_stop_0
* Resource action: lxc1 start on 18node2
* Resource action: lxc1 monitor=30000 on 18node2
* Pseudo action: M_stop_0
* Pseudo action: M-clone_stopped_0
* Pseudo action: M-clone_start_0
* Resource action: B start on lxc1
* Pseudo action: all_stopped
* Resource action: M start on lxc1
* Pseudo action: M-clone_running_0
* Resource action: B monitor=10000 on lxc1
* Resource action: M monitor=10000 on lxc1
Revised cluster status:
Online: [ 18node1 18node2 18node3 ]
Containers: [ lxc1:container1 lxc2:container2 ]
container1 (ocf::heartbeat:VirtualDomain): Started 18node2
container2 (ocf::heartbeat:VirtualDomain): Started 18node2
shoot1 (stonith:fence_xvm): Started 18node3
Clone Set: M-clone [M]
Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
A (ocf::pacemaker:Dummy): Started 18node1
B (ocf::pacemaker:Dummy): Started lxc1
C (ocf::pacemaker:Dummy): Started lxc2
D (ocf::pacemaker:Dummy): Started 18node1
diff --git a/pengine/test10/whitebox-imply-stop-on-fence.summary b/pengine/test10/whitebox-imply-stop-on-fence.summary
index 50a3446e29..31cc4a51b3 100644
--- a/pengine/test10/whitebox-imply-stop-on-fence.summary
+++ b/pengine/test10/whitebox-imply-stop-on-fence.summary
@@ -1,93 +1,96 @@
Current cluster status:
Node kiff-01 (1): UNCLEAN (offline)
Online: [ kiff-02 ]
Containers: [ lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-02:R-lxc-02_kiff-02 ]
fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN)
Clone Set: dlm-clone [dlm]
dlm (ocf::pacemaker:controld): Started kiff-01 (UNCLEAN)
Started: [ kiff-02 ]
Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
Clone Set: clvmd-clone [clvmd]
clvmd (ocf::heartbeat:clvm): Started kiff-01 (UNCLEAN)
Started: [ kiff-02 ]
Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
Clone Set: shared0-clone [shared0]
shared0 (ocf::heartbeat:Filesystem): Started kiff-01 (UNCLEAN)
Started: [ kiff-02 ]
Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
vm-fs (ocf::heartbeat:Filesystem): FAILED lxc-01_kiff-01
Transition Summary:
+ * Fence lxc-02_kiff-01 (resource: R-lxc-02_kiff-01)
+ * Fence lxc-01_kiff-01 (resource: R-lxc-01_kiff-01)
+ * Fence kiff-01
* Move fence-kiff-02 (Started kiff-01 -> kiff-02)
* Stop dlm:0 (kiff-01)
* Stop clvmd:0 (kiff-01)
* Stop shared0:0 (kiff-01)
* Recover R-lxc-01_kiff-01 (Started kiff-01 -> kiff-02)
* Move R-lxc-02_kiff-01 (Started kiff-01 -> kiff-02)
* Recover vm-fs (Started lxc-01_kiff-01)
* Move lxc-01_kiff-01 (Started kiff-01 -> kiff-02)
* Move lxc-02_kiff-01 (Started kiff-01 -> kiff-02)
Executing cluster transition:
* Pseudo action: fence-kiff-02_stop_0
* Fencing kiff-01 (reboot)
* Pseudo action: lxc-01_kiff-01_stop_0
* Pseudo action: lxc-02_kiff-01_stop_0
* Pseudo action: R-lxc-01_kiff-01_stop_0
* Pseudo action: R-lxc-02_kiff-01_stop_0
* Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01
* Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01
* Pseudo action: stonith_complete
* Pseudo action: shared0-clone_stop_0
* Resource action: R-lxc-01_kiff-01 start on kiff-02
* Resource action: R-lxc-02_kiff-01 start on kiff-02
* Pseudo action: vm-fs_stop_0
* Resource action: lxc-01_kiff-01 start on kiff-02
* Resource action: lxc-02_kiff-01 start on kiff-02
* Pseudo action: shared0_stop_0
* Pseudo action: shared0-clone_stopped_0
* Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02
* Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02
* Resource action: vm-fs start on lxc-01_kiff-01
* Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02
* Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02
* Pseudo action: clvmd-clone_stop_0
* Resource action: vm-fs monitor=20000 on lxc-01_kiff-01
* Pseudo action: clvmd_stop_0
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Pseudo action: dlm_stop_0
* Pseudo action: dlm-clone_stopped_0
* Pseudo action: all_stopped
* Resource action: fence-kiff-02 start on kiff-02
* Resource action: fence-kiff-02 monitor=60000 on kiff-02
Revised cluster status:
Online: [ kiff-02 ]
OFFLINE: [ kiff-01 ]
Containers: [ lxc-01_kiff-01:R-lxc-01_kiff-01 lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-01:R-lxc-02_kiff-01 lxc-02_kiff-02:R-lxc-02_kiff-02 ]
fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02
Clone Set: dlm-clone [dlm]
Started: [ kiff-02 ]
Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
Clone Set: clvmd-clone [clvmd]
Started: [ kiff-02 ]
Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
Clone Set: shared0-clone [shared0]
Started: [ kiff-02 ]
Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02
R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02
R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
vm-fs (ocf::heartbeat:Filesystem): Started lxc-01_kiff-01
diff --git a/pengine/test10/whitebox-ms-ordering.summary b/pengine/test10/whitebox-ms-ordering.summary
index 365cf8d6d2..fcdef66e79 100644
--- a/pengine/test10/whitebox-ms-ordering.summary
+++ b/pengine/test10/whitebox-ms-ordering.summary
@@ -1,71 +1,73 @@
Current cluster status:
Online: [ 18node1 18node2 18node3 ]
shooter (stonith:fence_xvm): Started 18node2
container1 (ocf::heartbeat:VirtualDomain): FAILED
container2 (ocf::heartbeat:VirtualDomain): FAILED
Master/Slave Set: lxc-ms-master [lxc-ms]
Stopped: [ 18node1 18node2 18node3 ]
Transition Summary:
+ * Fence lxc2 (resource: container2)
+ * Fence lxc1 (resource: container1)
* Start container1 (18node1)
* Start container2 (18node1)
* Recover lxc-ms:0 (Master lxc1)
* Recover lxc-ms:1 (Slave lxc2)
* Start lxc1 (18node1)
* Start lxc2 (18node1)
Executing cluster transition:
* Resource action: container1 monitor on 18node3
* Resource action: container1 monitor on 18node2
* Resource action: container1 monitor on 18node1
* Resource action: container2 monitor on 18node3
* Resource action: container2 monitor on 18node2
* Resource action: container2 monitor on 18node1
* Resource action: lxc-ms monitor on 18node3
* Resource action: lxc-ms monitor on 18node2
* Resource action: lxc-ms monitor on 18node1
* Pseudo action: lxc-ms-master_demote_0
* Resource action: lxc1 monitor on 18node3
* Resource action: lxc1 monitor on 18node2
* Resource action: lxc1 monitor on 18node1
* Resource action: lxc2 monitor on 18node3
* Resource action: lxc2 monitor on 18node2
* Resource action: lxc2 monitor on 18node1
* Pseudo action: stonith-lxc2-reboot on lxc2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Pseudo action: stonith_complete
* Resource action: container1 start on 18node1
* Resource action: container2 start on 18node1
* Pseudo action: lxc-ms_demote_0
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Resource action: lxc1 start on 18node1
* Resource action: lxc2 start on 18node1
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: lxc1 monitor=30000 on 18node1
* Resource action: lxc2 monitor=30000 on 18node1
* Pseudo action: all_stopped
* Resource action: lxc-ms start on lxc1
* Resource action: lxc-ms start on lxc2
* Pseudo action: lxc-ms-master_running_0
* Resource action: lxc-ms monitor=10000 on lxc2
* Pseudo action: lxc-ms-master_promote_0
* Resource action: lxc-ms promote on lxc1
* Pseudo action: lxc-ms-master_promoted_0
Revised cluster status:
Online: [ 18node1 18node2 18node3 ]
Containers: [ lxc1:container1 lxc2:container2 ]
shooter (stonith:fence_xvm): Started 18node2
container1 (ocf::heartbeat:VirtualDomain): Started 18node1
container2 (ocf::heartbeat:VirtualDomain): Started 18node1
Master/Slave Set: lxc-ms-master [lxc-ms]
Masters: [ lxc1 ]
Slaves: [ lxc2 ]
diff --git a/pengine/test10/whitebox-unexpectedly-running.summary b/pengine/test10/whitebox-unexpectedly-running.summary
index 0b5ca423c0..ed0a5bdd9e 100644
--- a/pengine/test10/whitebox-unexpectedly-running.summary
+++ b/pengine/test10/whitebox-unexpectedly-running.summary
@@ -1,27 +1,28 @@
Current cluster status:
Online: [ 18builder ]
FAKE (ocf::pacemaker:Dummy): FAILED 18builder
Transition Summary:
+ * Fence remote1 (resource: FAKE)
* Recover FAKE (Started 18builder)
* Start remote1 (18builder)
Executing cluster transition:
* Resource action: FAKE stop on 18builder
* Resource action: remote1 monitor on 18builder
* Pseudo action: stonith-remote1-reboot on remote1
* Pseudo action: stonith_complete
* Pseudo action: all_stopped
* Resource action: FAKE start on 18builder
* Resource action: remote1 start on 18builder
* Resource action: FAKE monitor=60000 on 18builder
* Resource action: remote1 monitor=30000 on 18builder
Revised cluster status:
Online: [ 18builder ]
Containers: [ remote1:FAKE ]
FAKE (ocf::pacemaker:Dummy): Started 18builder
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index 2cc578d10e..aaaf0aa0b3 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -1,927 +1,928 @@
/*
* Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <sys/types.h>
#include <dirent.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/common/util.h>
#include <crm/transition.h>
#include <crm/common/iso8601.h>
#include <crm/pengine/status.h>
#include <allocate.h>
#include "fake_transition.h"
cib_t *global_cib = NULL;
GListPtr op_fail = NULL;
bool action_numbers = FALSE;
gboolean quiet = FALSE;
gboolean print_pending = TRUE;
char *temp_shadow = NULL;
extern gboolean bringing_nodes_online;
#define quiet_log(fmt, args...) do { \
if(quiet == FALSE) { \
printf(fmt , ##args); \
} \
} while(0)
extern void cleanup_alloc_calculations(pe_working_set_t * data_set);
extern xmlNode *do_calculations(pe_working_set_t * data_set, xmlNode * xml_input, crm_time_t * now);
char *use_date = NULL;
static void
get_date(pe_working_set_t * data_set)
{
int value = 0;
time_t original_date = 0;
crm_element_value_int(data_set->input, "execution-date", &value);
original_date = value;
if (use_date) {
data_set->now = crm_time_new(use_date);
} else if(original_date) {
char *when = NULL;
data_set->now = crm_time_new(NULL);
crm_time_set_timet(data_set->now, &original_date);
when = crm_time_as_string(data_set->now, crm_time_log_date|crm_time_log_timeofday);
printf("Using the original execution date of: %s\n", when);
free(when);
}
}
static void
print_cluster_status(pe_working_set_t * data_set, long options)
{
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_remote_containers = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
GListPtr gIter = NULL;
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = NULL;
if (is_container_remote_node(node)) {
node_name = crm_strdup_printf("%s:%s", node->details->uname, node->details->remote_rsc->container->id);
} else {
node_name = crm_strdup_printf("%s", node->details->uname);
}
if (node->details->unclean) {
if (node->details->online && node->details->unclean) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
node_mode = "standby";
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
if (is_container_remote_node(node)) {
online_remote_containers = add_list_element(online_remote_containers, node_name);
} else if (is_baremetal_remote_node(node)) {
online_remote_nodes = add_list_element(online_remote_nodes, node_name);
} else {
online_nodes = add_list_element(online_nodes, node_name);
}
free(node_name);
continue;
} else {
if (is_baremetal_remote_node(node)) {
offline_remote_nodes = add_list_element(offline_remote_nodes, node_name);
} else if (is_container_remote_node(node)) {
/* ignore offline container nodes */
} else {
offline_nodes = add_list_element(offline_nodes, node_name);
}
free(node_name);
continue;
}
if (is_container_remote_node(node)) {
printf("ContainerNode %s: %s\n", node_name, node_mode);
} else if (is_baremetal_remote_node(node)) {
printf("RemoteNode %s: %s\n", node_name, node_mode);
} else if (safe_str_eq(node->details->uname, node->details->id)) {
printf("Node %s: %s\n", node_name, node_mode);
} else {
printf("Node %s (%s): %s\n", node_name, node->details->id, node_mode);
}
free(node_name);
}
if (online_nodes) {
printf("Online: [%s ]\n", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
printf("OFFLINE: [%s ]\n", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
printf("RemoteOnline: [%s ]\n", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
printf("RemoteOFFLINE: [%s ]\n", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_remote_containers) {
printf("Containers: [%s ]\n", online_remote_containers);
free(online_remote_containers);
}
fprintf(stdout, "\n");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (is_set(rsc->flags, pe_rsc_orphan)
&& rsc->role == RSC_ROLE_STOPPED) {
continue;
}
rsc->fns->print(rsc, NULL, pe_print_printf | options, stdout);
}
fprintf(stdout, "\n");
}
static char *
create_action_name(action_t * action)
{
char *action_name = NULL;
const char *prefix = NULL;
const char *action_host = NULL;
const char *task = action->task;
if (action->node) {
action_host = action->node->details->uname;
} else if (is_not_set(action->flags, pe_action_pseudo)) {
action_host = "<none>";
}
if (safe_str_eq(action->task, RSC_CANCEL)) {
prefix = "Cancel ";
task = "monitor"; /* TO-DO: Hack! */
}
if (action->rsc && action->rsc->clone_name) {
char *key = NULL;
const char *name = action->rsc->clone_name;
const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
int interval = crm_parse_int(interval_s, "0");
if (safe_str_eq(action->task, RSC_NOTIFY)
|| safe_str_eq(action->task, RSC_NOTIFIED)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_key_type");
const char *n_task = g_hash_table_lookup(action->meta, "notify_key_operation");
CRM_ASSERT(n_type != NULL);
CRM_ASSERT(n_task != NULL);
key = generate_notify_key(name, n_type, n_task);
} else {
key = generate_op_key(name, task, interval);
}
if (action_host) {
action_name = crm_strdup_printf("%s%s %s", prefix ? prefix : "", key, action_host);
} else {
action_name = crm_strdup_printf("%s%s", prefix ? prefix : "", key);
}
free(key);
} else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
action_name = crm_strdup_printf("%s%s '%s' %s", prefix ? prefix : "", action->task, op, action_host);
} else if (action->rsc && action_host) {
action_name = crm_strdup_printf("%s%s %s", prefix ? prefix : "", action->uuid, action_host);
} else if (action_host) {
action_name = crm_strdup_printf("%s%s %s", prefix ? prefix : "", action->task, action_host);
} else {
action_name = crm_strdup_printf("%s", action->uuid);
}
if(action_numbers) {
char *with_id = crm_strdup_printf("%s (%d)", action_name, action->id);
free(action_name);
action_name = with_id;
}
return action_name;
}
static void
create_dotfile(pe_working_set_t * data_set, const char *dot_file, gboolean all_actions)
{
GListPtr gIter = NULL;
FILE *dot_strm = fopen(dot_file, "w");
if (dot_strm == NULL) {
crm_perror(LOG_ERR, "Could not open %s for writing", dot_file);
return;
}
fprintf(dot_strm, " digraph \"g\" {\n");
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
const char *style = "dashed";
const char *font = "black";
const char *color = "black";
char *action_name = create_action_name(action);
crm_trace("Action %d: %s %s %p", action->id, action_name, action->uuid, action);
if (is_set(action->flags, pe_action_pseudo)) {
font = "orange";
}
if (is_set(action->flags, pe_action_dumped)) {
style = "bold";
color = "green";
} else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) {
color = "red";
font = "purple";
if (all_actions == FALSE) {
goto dont_write;
}
} else if (is_set(action->flags, pe_action_optional)) {
color = "blue";
if (all_actions == FALSE) {
goto dont_write;
}
} else {
color = "red";
CRM_CHECK(is_set(action->flags, pe_action_runnable) == FALSE,;
);
}
set_bit(action->flags, pe_action_dumped);
crm_trace("\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]",
action_name, style, color, font);
fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n",
action_name, style, color, font);
dont_write:
free(action_name);
}
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
GListPtr gIter2 = NULL;
for (gIter2 = action->actions_before; gIter2 != NULL; gIter2 = gIter2->next) {
action_wrapper_t *before = (action_wrapper_t *) gIter2->data;
char *before_name = NULL;
char *after_name = NULL;
const char *style = "dashed";
gboolean optional = TRUE;
if (before->state == pe_link_dumped) {
optional = FALSE;
style = "bold";
} else if (is_set(action->flags, pe_action_pseudo)
&& (before->type & pe_order_stonith_stop)) {
continue;
} else if (before->state == pe_link_dup) {
continue;
} else if (before->type == pe_order_none) {
continue;
} else if (is_set(before->action->flags, pe_action_dumped)
&& is_set(action->flags, pe_action_dumped)
&& before->type != pe_order_load) {
optional = FALSE;
}
if (all_actions || optional == FALSE) {
before_name = create_action_name(before->action);
after_name = create_action_name(action);
crm_trace("\"%s\" -> \"%s\" [ style = %s]",
before_name, after_name, style);
fprintf(dot_strm, "\"%s\" -> \"%s\" [ style = %s]\n",
before_name, after_name, style);
free(before_name);
free(after_name);
}
}
}
fprintf(dot_strm, "}\n");
if (dot_strm != NULL) {
fflush(dot_strm);
fclose(dot_strm);
}
}
static void
setup_input(const char *input, const char *output)
{
int rc = pcmk_ok;
cib_t *cib_conn = NULL;
xmlNode *cib_object = NULL;
char *local_output = NULL;
if (input == NULL) {
/* Use live CIB */
cib_conn = cib_new();
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
if (rc == pcmk_ok) {
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_object, cib_scope_local | cib_sync_call);
}
cib_conn->cmds->signoff(cib_conn);
cib_delete(cib_conn);
cib_conn = NULL;
if (rc != pcmk_ok) {
fprintf(stderr, "Live CIB query failed: %s (%d)\n", pcmk_strerror(rc), rc);
crm_exit(rc);
} else if (cib_object == NULL) {
fprintf(stderr, "Live CIB query failed: empty result\n");
crm_exit(ENOTCONN);
}
} else if (safe_str_eq(input, "-")) {
cib_object = filename2xml(NULL);
} else {
cib_object = filename2xml(input);
}
if (get_object_root(XML_CIB_TAG_STATUS, cib_object) == NULL) {
create_xml_node(cib_object, XML_CIB_TAG_STATUS);
}
if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) {
free_xml(cib_object);
crm_exit(ENOKEY);
}
if (validate_xml(cib_object, NULL, FALSE) != TRUE) {
free_xml(cib_object);
crm_exit(pcmk_err_schema_validation);
}
if (output == NULL) {
char *pid = crm_itoa(getpid());
local_output = get_shadow_file(pid);
temp_shadow = strdup(local_output);
output = local_output;
free(pid);
}
rc = write_xml_file(cib_object, output, FALSE);
free_xml(cib_object);
cib_object = NULL;
if (rc < 0) {
fprintf(stderr, "Could not create '%s': %s\n", output, strerror(errno));
crm_exit(rc);
}
setenv("CIB_file", output, 1);
free(local_output);
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"quiet", 0, 0, 'Q', "\tDisplay only essentialoutput"},
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"-spacer-", 0, 0, '-', "\nOperations:"},
{"run", 0, 0, 'R', "\tDetermine the cluster's response to the given configuration and status"},
{"simulate", 0, 0, 'S', "Simulate the transition's execution and display the resulting cluster status"},
{"in-place", 0, 0, 'X', "Simulate the transition's execution and store the result back to the input file"},
{"show-scores", 0, 0, 's', "Show allocation scores"},
{"show-utilization", 0, 0, 'U', "Show utilization information"},
{"profile", 1, 0, 'P', "Run all tests in the named directory to create profiling data"},
{"pending", 0, 0, 'j', "\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden},
{"-spacer-", 0, 0, '-', "\nSynthetic Cluster Events:"},
{"node-up", 1, 0, 'u', "\tBring a node online"},
{"node-down", 1, 0, 'd', "\tTake a node offline"},
{"node-fail", 1, 0, 'f', "\tMark a node as failed"},
{"op-inject", 1, 0, 'i', "\tGenerate a failure for the cluster to react to in the simulation"},
{"-spacer-", 0, 0, '-', "\t\tValue is of the form ${resource}_${task}_${interval}@${node}=${rc}."},
{"-spacer-", 0, 0, '-', "\t\tEg. memcached_monitor_20000@bart.example.com=7"},
{"-spacer-", 0, 0, '-', "\t\tFor more information on OCF return codes, refer to: http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/s-ocf-return-codes.html"},
{"op-fail", 1, 0, 'F', "\tIf the specified task occurs during the simulation, have it fail with return code ${rc}"},
{"-spacer-", 0, 0, '-', "\t\tValue is of the form ${resource}_${task}_${interval}@${node}=${rc}."},
{"-spacer-", 0, 0, '-', "\t\tEg. memcached_stop_0@bart.example.com=1\n"},
{"-spacer-", 0, 0, '-', "\t\tThe transition will normally stop at the failed action. Save the result with --save-output and re-run with --xml-file"},
{"set-datetime", 1, 0, 't', "Set date/time"},
{"quorum", 1, 0, 'q', "\tSpecify a value for quorum"},
{"watchdog", 1, 0, 'w', "\tAssume a watchdog device is active"},
{"ticket-grant", 1, 0, 'g', "Grant a ticket"},
{"ticket-revoke", 1, 0, 'r', "Revoke a ticket"},
{"ticket-standby", 1, 0, 'b', "Make a ticket standby"},
{"ticket-activate", 1, 0, 'e', "Activate a ticket"},
{"-spacer-", 0, 0, '-', "\nOutput Options:"},
{"save-input", 1, 0, 'I', "\tSave the input configuration to the named file"},
{"save-output", 1, 0, 'O', "Save the output configuration to the named file"},
{"save-graph", 1, 0, 'G', "\tSave the transition graph (XML format) to the named file"},
{"save-dotfile", 1, 0, 'D', "Save the transition graph (DOT format) to the named file"},
{"all-actions", 0, 0, 'a', "\tDisplay all possible actions in the DOT graph - even ones not part of the transition"},
{"-spacer-", 0, 0, '-', "\nData Source:"},
{"live-check", 0, 0, 'L', "\tConnect to the CIB and use the current contents as input"},
{"xml-file", 1, 0, 'x', "\tRetrieve XML from the named file"},
{"xml-pipe", 0, 0, 'p', "\tRetrieve XML from stdin"},
{"-spacer-", 0, 0, '-', "\nExamples:\n"},
{"-spacer-", 0, 0, '-', "Pretend a recurring monitor action found memcached stopped on node fred.example.com and, during recovery, that the memcached stop action failed", pcmk_option_paragraph},
{"-spacer-", 0, 0, '-', " crm_simulate -LS --op-inject memcached:0_monitor_20000@bart.example.com=7 --op-fail memcached:0_stop_0@fred.example.com=1 --save-output /tmp/memcached-test.xml", pcmk_option_example},
{"-spacer-", 0, 0, '-', "Now see what the reaction to the stop failure would be", pcmk_option_paragraph},
{"-spacer-", 0, 0, '-', " crm_simulate -S --xml-file /tmp/memcached-test.xml", pcmk_option_example},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
static void
profile_one(const char *xml_file)
{
xmlNode *cib_object = NULL;
pe_working_set_t data_set;
printf("* Testing %s\n", xml_file);
cib_object = filename2xml(xml_file);
if (get_object_root(XML_CIB_TAG_STATUS, cib_object) == NULL) {
create_xml_node(cib_object, XML_CIB_TAG_STATUS);
}
if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) {
free_xml(cib_object);
return;
}
if (validate_xml(cib_object, NULL, FALSE) != TRUE) {
free_xml(cib_object);
return;
}
set_working_set_defaults(&data_set);
data_set.input = cib_object;
get_date(&data_set);
do_calculations(&data_set, cib_object, NULL);
cleanup_alloc_calculations(&data_set);
}
#ifndef FILENAME_MAX
# define FILENAME_MAX 512
#endif
static int
profile_all(const char *dir)
{
struct dirent **namelist;
int lpc = 0;
int file_num = scandir(dir, &namelist, 0, alphasort);
if (file_num > 0) {
struct stat prop;
char buffer[FILENAME_MAX + 1];
while (file_num--) {
if ('.' == namelist[file_num]->d_name[0]) {
free(namelist[file_num]);
continue;
} else if (!crm_ends_with(namelist[file_num]->d_name, ".xml")) {
free(namelist[file_num]);
continue;
}
lpc++;
snprintf(buffer, FILENAME_MAX, "%s/%s", dir, namelist[file_num]->d_name);
if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) {
profile_one(buffer);
}
free(namelist[file_num]);
}
free(namelist);
}
return lpc;
}
static int
count_resources(pe_working_set_t * data_set, resource_t * rsc)
{
int count = 0;
GListPtr gIter = NULL;
if (rsc == NULL) {
gIter = data_set->resources;
} else if (rsc->children) {
gIter = rsc->children;
} else {
return is_not_set(rsc->flags, pe_rsc_orphan);
}
for (; gIter != NULL; gIter = gIter->next) {
count += count_resources(data_set, gIter->data);
}
return count;
}
int
main(int argc, char **argv)
{
int rc = 0;
guint modified = 0;
gboolean store = FALSE;
gboolean process = FALSE;
gboolean simulate = FALSE;
gboolean all_actions = FALSE;
gboolean have_stdout = FALSE;
pe_working_set_t data_set;
const char *xml_file = "-";
const char *quorum = NULL;
const char *watchdog = NULL;
const char *test_dir = NULL;
const char *dot_file = NULL;
const char *graph_file = NULL;
const char *input_file = NULL;
const char *output_file = NULL;
int flag = 0;
int index = 0;
int argerr = 0;
GListPtr node_up = NULL;
GListPtr node_down = NULL;
GListPtr node_fail = NULL;
GListPtr op_inject = NULL;
GListPtr ticket_grant = NULL;
GListPtr ticket_revoke = NULL;
GListPtr ticket_standby = NULL;
GListPtr ticket_activate = NULL;
xmlNode *input = NULL;
crm_log_cli_init("crm_simulate");
crm_set_options(NULL, "datasource operation [additional options]",
long_options, "Tool for simulating the cluster's response to events");
if (argc < 2) {
crm_help('?', EX_USAGE);
}
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1)
break;
switch (flag) {
case 'V':
if (have_stdout == FALSE) {
/* Redirect stderr to stdout so we can grep the output */
have_stdout = TRUE;
close(STDERR_FILENO);
dup2(STDOUT_FILENO, STDERR_FILENO);
}
crm_bump_log_level(argc, argv);
action_numbers = TRUE;
break;
case '?':
case '$':
crm_help(flag, EX_OK);
break;
case 'p':
xml_file = "-";
break;
case 'Q':
quiet = TRUE;
break;
case 'L':
xml_file = NULL;
break;
case 'x':
xml_file = optarg;
break;
case 'u':
modified++;
bringing_nodes_online = TRUE;
node_up = g_list_append(node_up, optarg);
break;
case 'd':
modified++;
node_down = g_list_append(node_down, optarg);
break;
case 'f':
modified++;
node_fail = g_list_append(node_fail, optarg);
break;
case 't':
use_date = strdup(optarg);
break;
case 'i':
modified++;
op_inject = g_list_append(op_inject, optarg);
break;
case 'F':
process = TRUE;
simulate = TRUE;
op_fail = g_list_append(op_fail, optarg);
break;
case 'w':
modified++;
watchdog = optarg;
break;
case 'q':
modified++;
quorum = optarg;
break;
case 'g':
modified++;
ticket_grant = g_list_append(ticket_grant, optarg);
break;
case 'r':
modified++;
ticket_revoke = g_list_append(ticket_revoke, optarg);
break;
case 'b':
modified++;
ticket_standby = g_list_append(ticket_standby, optarg);
break;
case 'e':
modified++;
ticket_activate = g_list_append(ticket_activate, optarg);
break;
case 'a':
all_actions = TRUE;
break;
case 's':
process = TRUE;
show_scores = TRUE;
break;
case 'U':
process = TRUE;
show_utilization = TRUE;
break;
case 'j':
print_pending = TRUE;
break;
case 'S':
process = TRUE;
simulate = TRUE;
break;
case 'X':
store = TRUE;
process = TRUE;
simulate = TRUE;
break;
case 'R':
process = TRUE;
break;
case 'D':
process = TRUE;
dot_file = optarg;
break;
case 'G':
process = TRUE;
graph_file = optarg;
break;
case 'I':
input_file = optarg;
break;
case 'O':
output_file = optarg;
break;
case 'P':
test_dir = optarg;
break;
default:
++argerr;
break;
}
}
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', EX_USAGE);
}
if (test_dir != NULL) {
return profile_all(test_dir);
}
setup_input(xml_file, store ? xml_file : output_file);
global_cib = cib_new();
global_cib->cmds->signon(global_cib, crm_system_name, cib_command);
set_working_set_defaults(&data_set);
if (data_set.now != NULL) {
quiet_log(" + Setting effective cluster time: %s", use_date);
crm_time_log(LOG_WARNING, "Set fake 'now' to", data_set.now,
crm_time_log_date | crm_time_log_timeofday);
}
rc = global_cib->cmds->query(global_cib, NULL, &input, cib_sync_call | cib_scope_local);
CRM_ASSERT(rc == pcmk_ok);
data_set.input = input;
get_date(&data_set);
if(xml_file) {
set_bit(data_set.flags, pe_flag_sanitized);
}
cluster_status(&data_set);
if (quiet == FALSE) {
int options = print_pending ? pe_print_pending : 0;
if(is_set(data_set.flags, pe_flag_maintenance_mode)) {
quiet_log("\n *** Resource management is DISABLED ***");
quiet_log("\n The cluster will not attempt to start, stop or recover services");
quiet_log("\n");
}
if(data_set.disabled_resources || data_set.blocked_resources) {
quiet_log("%d of %d resources DISABLED and %d BLOCKED from being started due to failures\n",
data_set.disabled_resources, count_resources(&data_set, NULL), data_set.blocked_resources);
}
quiet_log("\nCurrent cluster status:\n");
print_cluster_status(&data_set, options);
}
if (modified) {
quiet_log("Performing requested modifications\n");
modify_configuration(&data_set, global_cib, quorum, watchdog, node_up, node_down, node_fail, op_inject,
ticket_grant, ticket_revoke, ticket_standby, ticket_activate);
rc = global_cib->cmds->query(global_cib, NULL, &input, cib_sync_call);
if (rc != pcmk_ok) {
fprintf(stderr, "Could not connect to the CIB for input: %s\n", pcmk_strerror(rc));
goto done;
}
cleanup_calculations(&data_set);
data_set.input = input;
get_date(&data_set);
if(xml_file) {
set_bit(data_set.flags, pe_flag_sanitized);
}
cluster_status(&data_set);
}
if (input_file != NULL) {
rc = write_xml_file(input, input_file, FALSE);
if (rc < 0) {
fprintf(stderr, "Could not create '%s': %s\n", input_file, strerror(errno));
goto done;
}
}
rc = 0;
if (process || simulate) {
crm_time_t *local_date = NULL;
if (show_scores && show_utilization) {
printf("Allocation scores and utilization information:\n");
} else if (show_scores) {
fprintf(stdout, "Allocation scores:\n");
} else if (show_utilization) {
printf("Utilization information:\n");
}
do_calculations(&data_set, input, local_date);
input = NULL; /* Don't try and free it twice */
if (graph_file != NULL) {
write_xml_file(data_set.graph, graph_file, FALSE);
}
if (dot_file != NULL) {
create_dotfile(&data_set, dot_file, all_actions);
}
if (quiet == FALSE) {
GListPtr gIter = NULL;
quiet_log("%sTransition Summary:\n", show_scores || show_utilization
|| modified ? "\n" : "");
fflush(stdout);
+ LogNodeActions(&data_set, TRUE);
for (gIter = data_set.resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
LogActions(rsc, &data_set, TRUE);
}
}
}
if (simulate) {
rc = run_simulation(&data_set, global_cib, op_fail, quiet);
if(quiet == FALSE) {
get_date(&data_set);
quiet_log("\nRevised cluster status:\n");
cluster_status(&data_set);
print_cluster_status(&data_set, 0);
}
}
done:
cleanup_alloc_calculations(&data_set);
global_cib->cmds->signoff(global_cib);
cib_delete(global_cib);
free(use_date);
fflush(stderr);
if (temp_shadow) {
unlink(temp_shadow);
free(temp_shadow);
}
return crm_exit(rc);
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 4:34 PM (17 h, 18 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018917
Default Alt Text
(713 KB)

Event Timeline