Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 2a1ee02a3f..5f155a67c8 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,2412 +1,2421 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <glib.h>
#include <crm/crm.h>
#include <crm/services.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <crm/pengine/status.h>
#include <crm/pengine/rules.h>
#include <utils.h>
#include <unpack.h>
CRM_TRACE_INIT_DATA(pe_status);
#define set_config_flag(data_set, option, flag) do { \
const char *tmp = pe_pref(data_set->config_hash, option); \
if(tmp) { \
if(crm_is_true(tmp)) { \
set_bit_inplace(data_set->flags, flag); \
} else { \
clear_bit_inplace(data_set->flags, flag); \
} \
} \
} while(0)
gboolean unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, GListPtr next,
enum action_fail_response *failed, pe_working_set_t * data_set);
static void
pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason)
{
CRM_CHECK(node, return);
if (node->details->unclean == FALSE) {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_warn("Node %s will be fenced %s", node->details->uname, reason);
} else {
crm_warn("Node %s is unclean %s", node->details->uname, reason);
}
}
node->details->unclean = TRUE;
}
gboolean
unpack_config(xmlNode * config, pe_working_set_t * data_set)
{
const char *value = NULL;
GHashTable *config_hash =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
data_set->config_hash = config_hash;
unpack_instance_attributes(data_set->input, config, XML_CIB_TAG_PROPSET, NULL, config_hash,
CIB_OPTIONS_FIRST, FALSE, data_set->now);
verify_pe_options(data_set->config_hash);
set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
crm_info("Startup probes: %s",
is_set(data_set->flags, pe_flag_startup_probes) ? "enabled" : "disabled (dangerous)");
value = pe_pref(data_set->config_hash, "stonith-timeout");
data_set->stonith_timeout = crm_get_msec(value);
crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
crm_debug("STONITH of failed nodes is %s",
is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
crm_trace("STONITH will %s nodes", data_set->stonith_action);
set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
crm_debug("Stop all active resources: %s",
is_set(data_set->flags, pe_flag_stop_everything) ? "true" : "false");
set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
if (is_set(data_set->flags, pe_flag_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pe_pref(data_set->config_hash, "default-resource-stickiness");
data_set->default_resource_stickiness = char2score(value);
crm_debug("Default stickiness: %d", data_set->default_resource_stickiness);
value = pe_pref(data_set->config_hash, "no-quorum-policy");
if (safe_str_eq(value, "ignore")) {
data_set->no_quorum_policy = no_quorum_ignore;
} else if (safe_str_eq(value, "freeze")) {
data_set->no_quorum_policy = no_quorum_freeze;
} else if (safe_str_eq(value, "suicide")) {
gboolean do_panic = FALSE;
crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic);
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
crm_config_err
("Setting no-quorum-policy=suicide makes no sense if stonith-enabled=false");
}
if (do_panic && is_set(data_set->flags, pe_flag_stonith_enabled)) {
data_set->no_quorum_policy = no_quorum_suicide;
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && do_panic == FALSE) {
crm_notice("Resetting no-quorum-policy to 'stop': The cluster has never had quorum");
data_set->no_quorum_policy = no_quorum_stop;
}
} else {
data_set->no_quorum_policy = no_quorum_stop;
}
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
crm_debug("On loss of CCM Quorum: Freeze resources");
break;
case no_quorum_stop:
crm_debug("On loss of CCM Quorum: Stop ALL resources");
break;
case no_quorum_suicide:
crm_notice("On loss of CCM Quorum: Fence all remaining nodes");
break;
case no_quorum_ignore:
crm_notice("On loss of CCM Quorum: Ignore");
break;
}
set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
crm_trace("Orphan resources are %s",
is_set(data_set->flags, pe_flag_stop_rsc_orphans) ? "stopped" : "ignored");
set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
crm_trace("Orphan resource actions are %s",
is_set(data_set->flags, pe_flag_stop_action_orphans) ? "stopped" : "ignored");
set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop);
crm_trace("Stopped resources are removed from the status section: %s",
is_set(data_set->flags, pe_flag_remove_after_stop) ? "true" : "false");
set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
crm_trace("Maintenance mode: %s",
is_set(data_set->flags, pe_flag_maintenance_mode) ? "true" : "false");
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
clear_bit(data_set->flags, pe_flag_is_managed_default);
} else {
set_config_flag(data_set, "is-managed-default", pe_flag_is_managed_default);
}
crm_trace("By default resources are %smanaged",
is_set(data_set->flags, pe_flag_is_managed_default) ? "" : "not ");
set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
crm_trace("Start failures are %s",
is_set(data_set->flags,
pe_flag_start_failure_fatal) ? "always fatal" : "handled by failcount");
node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red"));
node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green"));
node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow"));
crm_info("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s",
pe_pref(data_set->config_hash, "node-health-red"),
pe_pref(data_set->config_hash, "node-health-yellow"),
pe_pref(data_set->config_hash, "node-health-green"));
data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
crm_trace("Placement strategy: %s", data_set->placement_strategy);
return TRUE;
}
gboolean
unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
const char *score = NULL;
gboolean unseen_are_unclean = TRUE;
const char *blind_faith = pe_pref(data_set->config_hash, "startup-fencing");
if (crm_is_true(blind_faith) == FALSE) {
unseen_are_unclean = FALSE;
crm_warn("Blind faith: not fencing unseen nodes");
}
for (xml_obj = __xml_first_child(xml_nodes); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) {
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) {
new_node = NULL;
id = crm_element_value(xml_obj, XML_ATTR_ID);
uname = crm_element_value(xml_obj, XML_ATTR_UNAME);
type = crm_element_value(xml_obj, XML_ATTR_TYPE);
score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
crm_config_err("Must specify id tag in <node>");
continue;
}
if (type == NULL) {
crm_config_err("Must specify type tag in <node>");
continue;
}
if (pe_find_node(data_set->nodes, uname) != NULL) {
crm_config_warn("Detected multiple node entries with uname=%s"
" - this is rarely intended", uname);
}
new_node = calloc(1, sizeof(node_t));
if (new_node == NULL) {
return FALSE;
}
new_node->weight = char2score(score);
new_node->fixed = FALSE;
new_node->details = calloc(1, sizeof(struct node_shared_s));
if (new_node->details == NULL) {
free(new_node);
return FALSE;
}
crm_trace("Creaing node for entry %s/%s", uname, id);
new_node->details->id = id;
new_node->details->uname = uname;
new_node->details->type = node_ping;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->running_rsc = NULL;
new_node->details->attrs = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str,
g_hash_destroy_str);
new_node->details->utilization =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
g_hash_destroy_str);
/* if(data_set->have_quorum == FALSE */
/* && data_set->no_quorum_policy == no_quorum_stop) { */
/* /\* start shutting resources down *\/ */
/* new_node->weight = -INFINITY; */
/* } */
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE
|| unseen_are_unclean == FALSE) {
/* blind faith... */
new_node->details->unclean = FALSE;
} else {
/* all nodes are unclean until we've seen their
* status entry
*/
new_node->details->unclean = TRUE;
}
if (type == NULL || safe_str_eq(type, "member")
|| safe_str_eq(type, NORMALNODE)) {
new_node->details->type = node_member;
}
add_node_attrs(xml_obj, new_node, FALSE, data_set);
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_UTILIZATION, NULL,
new_node->details->utilization, NULL, FALSE, data_set->now);
data_set->nodes = g_list_append(data_set->nodes, new_node);
crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
}
}
return TRUE;
}
static void
g_hash_destroy_node_list(gpointer data)
{
GListPtr domain = data;
g_list_free_full(domain, free);
}
gboolean
unpack_domains(xmlNode * xml_domains, pe_working_set_t * data_set)
{
const char *id = NULL;
GListPtr domain = NULL;
xmlNode *xml_node = NULL;
xmlNode *xml_domain = NULL;
crm_info("Unpacking domains");
data_set->domains =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
g_hash_destroy_node_list);
for (xml_domain = __xml_first_child(xml_domains); xml_domain != NULL;
xml_domain = __xml_next(xml_domain)) {
if (crm_str_eq((const char *)xml_domain->name, XML_CIB_TAG_DOMAIN, TRUE)) {
domain = NULL;
id = crm_element_value(xml_domain, XML_ATTR_ID);
for (xml_node = __xml_first_child(xml_domain); xml_node != NULL;
xml_node = __xml_next(xml_node)) {
if (crm_str_eq((const char *)xml_node->name, XML_CIB_TAG_NODE, TRUE)) {
node_t *copy = NULL;
node_t *node = NULL;
const char *uname = crm_element_value(xml_node, "name");
const char *score = crm_element_value(xml_node, XML_RULE_ATTR_SCORE);
if (uname == NULL) {
crm_config_err("Invalid domain %s: Must specify id tag in <node>", id);
continue;
}
node = pe_find_node(data_set->nodes, uname);
if (node == NULL) {
node = pe_find_node_id(data_set->nodes, uname);
}
if (node == NULL) {
crm_config_warn("Invalid domain %s: Node %s does not exist", id, uname);
continue;
}
copy = node_copy(node);
copy->weight = char2score(score);
crm_debug("Adding %s to domain %s with score %s", node->details->uname, id,
score);
domain = g_list_prepend(domain, copy);
}
}
if (domain) {
crm_debug("Created domain %s with %d members", id, g_list_length(domain));
g_hash_table_replace(data_set->domains, strdup(id), domain);
}
}
}
return TRUE;
}
static void
destroy_template_rsc_set(gpointer data)
{
xmlNode *rsc_set = data;
free_xml(rsc_set);
}
gboolean
unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
data_set->template_rsc_sets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
destroy_template_rsc_set);
for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) {
resource_t *new_rsc = NULL;
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, TRUE)) {
const char *template_id = ID(xml_obj);
if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets,
template_id, NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL);
}
continue;
}
crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj));
if (common_unpack(xml_obj, &new_rsc, NULL, data_set)) {
data_set->resources = g_list_append(data_set->resources, new_rsc);
print_resource(LOG_DEBUG_3, "Added", new_rsc, FALSE);
} else {
crm_config_err("Failed unpacking %s %s",
crm_element_name(xml_obj), crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
}
}
data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority);
if (is_set(data_set->flags, pe_flag_stonith_enabled)
&& is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
crm_config_err("Resource start-up disabled since no STONITH resources have been defined");
crm_config_err("Either configure some or disable STONITH with the stonith-enabled option");
crm_config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
{
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
ticket_t *ticket = NULL;
ticket_id = ID(xml_ticket);
if (ticket_id == NULL || strlen(ticket_id) == 0) {
return FALSE;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, data_set);
if (ticket == NULL) {
return FALSE;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_ticket, prop_name);
if(crm_str_eq(prop_name, XML_ATTR_ID, TRUE)) {
continue;
}
g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value));
}
granted = g_hash_table_lookup(ticket->state, "granted");
if (granted && crm_is_true(granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, "last-granted");
if (last_granted) {
ticket->last_granted = crm_parse_int(last_granted, 0);
}
standby = g_hash_table_lookup(ticket->state, "standby");
if (standby && crm_is_true(standby)) {
ticket->standby = TRUE;
if (ticket->granted) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
ticket->standby = FALSE;
}
crm_trace("Done with ticket state for %s", ticket_id);
return TRUE;
}
static gboolean
unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
for (xml_obj = __xml_first_child(xml_tickets); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) {
if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, TRUE) == FALSE) {
continue;
}
unpack_ticket_state(xml_obj, data_set);
}
return TRUE;
}
/* Compatibility with the deprecated ticket state section:
* "/cib/status/tickets/instance_attributes" */
static void
get_ticket_state_legacy(gpointer key, gpointer value, gpointer user_data)
{
const char *long_key = key;
char *state_key = NULL;
const char *granted_prefix = "granted-ticket-";
const char *last_granted_prefix = "last-granted-";
static int granted_prefix_strlen = 0;
static int last_granted_prefix_strlen = 0;
const char *ticket_id = NULL;
const char *is_granted = NULL;
const char *last_granted = NULL;
const char *sep = NULL;
ticket_t *ticket = NULL;
pe_working_set_t *data_set = user_data;
if (granted_prefix_strlen == 0) {
granted_prefix_strlen = strlen(granted_prefix);
}
if (last_granted_prefix_strlen == 0) {
last_granted_prefix_strlen = strlen(last_granted_prefix);
}
if (strstr(long_key, granted_prefix) == long_key) {
ticket_id = long_key + granted_prefix_strlen;
if (strlen(ticket_id)) {
state_key = strdup("granted");
is_granted = value;
}
} else if (strstr(long_key, last_granted_prefix) == long_key) {
ticket_id = long_key + last_granted_prefix_strlen;
if (strlen(ticket_id)) {
state_key = strdup("last-granted");
last_granted = value;
}
} else if ((sep = strrchr(long_key, '-'))) {
ticket_id = sep + 1;
state_key = strndup(long_key, strlen(long_key) - strlen(sep));
}
if (ticket_id == NULL || strlen(ticket_id) == 0) {
free(state_key);
return;
}
if (state_key == NULL || strlen(state_key) == 0) {
free(state_key);
return;
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, data_set);
if (ticket == NULL) {
free(state_key);
return;
}
}
g_hash_table_replace(ticket->state, state_key, strdup(value));
if (is_granted) {
if (crm_is_true(is_granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
} else if (last_granted) {
ticket->last_granted = crm_parse_int(last_granted, 0);
}
}
/* remove nodes that are down, stopping */
/* create +ve rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode * status, pe_working_set_t * data_set)
{
const char *id = NULL;
const char *uname = NULL;
xmlNode *lrm_rsc = NULL;
xmlNode *attrs = NULL;
xmlNode *state = NULL;
xmlNode *node_state = NULL;
node_t *this_node = NULL;
crm_trace("Beginning unpack");
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket);
}
for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) {
if (crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) {
xmlNode *xml_tickets = state;
GHashTable *state_hash = NULL;
/* Compatibility with the deprecated ticket state section:
* Unpack the attributes in the deprecated "/cib/status/tickets/instance_attributes" if it exists. */
state_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
g_hash_destroy_str);
unpack_instance_attributes(data_set->input, xml_tickets, XML_TAG_ATTR_SETS, NULL,
state_hash, NULL, TRUE, data_set->now);
g_hash_table_foreach(state_hash, get_ticket_state_legacy, data_set);
if (state_hash) {
g_hash_table_destroy(state_hash);
}
/* Unpack the new "/cib/status/tickets/ticket_state"s */
unpack_tickets_state(xml_tickets, data_set);
}
if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE)) {
node_state = state;
id = crm_element_value(node_state, XML_ATTR_ID);
uname = crm_element_value(node_state, XML_ATTR_UNAME);
attrs = find_xml_node(node_state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
crm_trace("Processing node id=%s, uname=%s", id, uname);
this_node = pe_find_node_id(data_set->nodes, id);
if (uname == NULL) {
/* error */
continue;
} else if (this_node == NULL) {
crm_config_warn("Node %s in status section no longer exists", uname);
continue;
}
/* Mark the node as provisionally clean
* - at least we have seen it in the current cluster's lifetime
*/
this_node->details->unclean = FALSE;
add_node_attrs(attrs, this_node, TRUE, data_set);
if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) {
crm_info("Node %s is in standby-mode", this_node->details->uname);
this_node->details->standby = TRUE;
}
crm_trace("determining node state");
determine_online_status(node_state, this_node, data_set);
if (this_node->details->online && data_set->no_quorum_policy == no_quorum_suicide) {
/* Everything else should flow from this automatically
* At least until the PE becomes able to migrate off healthy resources
*/
pe_fence_node(data_set, this_node, "because the cluster does not have quorum");
}
}
}
/* Now that we know all node states, we can safely handle migration ops
* But, for now, only process healthy nodes
* - this is necessary for the logic in bug lf#2508 to function correctly
*/
for (node_state = __xml_first_child(status); node_state != NULL;
node_state = __xml_next(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
continue;
}
id = crm_element_value(node_state, XML_ATTR_ID);
this_node = pe_find_node_id(data_set->nodes, id);
if (this_node == NULL) {
crm_info("Node %s is unknown", id);
continue;
} else if (this_node->details->online) {
crm_trace("Processing lrm resource entries on healthy node: %s",
this_node->details->uname);
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
unpack_lrm_resources(this_node, lrm_rsc, data_set);
}
}
/* Now handle failed nodes - but only if stonith is enabled
*
* By definition, offline nodes run no resources so there is nothing to do.
* Only when stonith is enabled do we need to know what is on the node to
* ensure rsc start events happen after the stonith
*/
for (node_state = __xml_first_child(status);
node_state != NULL && is_set(data_set->flags, pe_flag_stonith_enabled);
node_state = __xml_next(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
continue;
}
id = crm_element_value(node_state, XML_ATTR_ID);
this_node = pe_find_node_id(data_set->nodes, id);
if (this_node == NULL || this_node->details->online) {
continue;
} else {
crm_trace("Processing lrm resource entries on unhealthy node: %s",
this_node->details->uname);
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
unpack_lrm_resources(this_node, lrm_rsc, data_set);
}
}
return TRUE;
}
static gboolean
determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state,
node_t * this_node)
{
gboolean online = FALSE;
const char *join_state = crm_element_value(node_state, XML_CIB_ATTR_JOINSTATE);
const char *crm_state = crm_element_value(node_state, XML_CIB_ATTR_CRMDSTATE);
const char *ccm_state = crm_element_value(node_state, XML_CIB_ATTR_INCCM);
const char *ha_state = crm_element_value(node_state, XML_CIB_ATTR_HASTATE);
const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE);
if (ha_state == NULL) {
ha_state = DEADSTATUS;
}
if (!crm_is_true(ccm_state) || safe_str_eq(ha_state, DEADSTATUS)) {
crm_trace("Node is down: ha_state=%s, ccm_state=%s", crm_str(ha_state), crm_str(ccm_state));
} else if (safe_str_eq(crm_state, ONLINESTATUS)) {
if (safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) {
online = TRUE;
} else {
crm_debug("Node is not ready to run resources: %s", join_state);
}
} else if (this_node->details->expected_up == FALSE) {
crm_trace("CRMd is down: ha_state=%s, ccm_state=%s", crm_str(ha_state), crm_str(ccm_state));
crm_trace("\tcrm_state=%s, join_state=%s, expected=%s",
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
} else {
/* mark it unclean */
pe_fence_node(data_set, this_node, "because it is partially and/or un-expectedly down");
crm_info("\tha_state=%s, ccm_state=%s,"
" crm_state=%s, join_state=%s, expected=%s",
crm_str(ha_state), crm_str(ccm_state),
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
}
return online;
}
static gboolean
determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state,
node_t * this_node)
{
gboolean online = FALSE;
gboolean do_terminate = FALSE;
const char *join_state = crm_element_value(node_state, XML_CIB_ATTR_JOINSTATE);
const char *crm_state = crm_element_value(node_state, XML_CIB_ATTR_CRMDSTATE);
const char *ccm_state = crm_element_value(node_state, XML_CIB_ATTR_INCCM);
const char *ha_state = crm_element_value(node_state, XML_CIB_ATTR_HASTATE);
const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE);
const char *terminate = g_hash_table_lookup(this_node->details->attrs, "terminate");
if (ha_state == NULL) {
ha_state = DEADSTATUS;
}
if (crm_is_true(terminate)) {
do_terminate = TRUE;
} else if (terminate != NULL && strlen(terminate) > 0) {
/* could be a time() value */
char t = terminate[0];
if (t != '0' && isdigit(t)) {
do_terminate = TRUE;
}
}
if (crm_is_true(ccm_state)
&& safe_str_eq(ha_state, ACTIVESTATUS)
&& safe_str_eq(crm_state, ONLINESTATUS)) {
if (safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) {
online = TRUE;
if (do_terminate) {
pe_fence_node(data_set, this_node, "because termination was requested");
}
} else if (join_state == exp_state /* == NULL */ ) {
crm_info("Node %s is coming up", this_node->details->uname);
crm_debug("\tha_state=%s, ccm_state=%s,"
" crm_state=%s, join_state=%s, expected=%s",
crm_str(ha_state), crm_str(ccm_state),
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
} else if (safe_str_eq(join_state, CRMD_JOINSTATE_PENDING)) {
crm_info("Node %s is not ready to run resources", this_node->details->uname);
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
online = TRUE;
} else if (safe_str_eq(join_state, CRMD_JOINSTATE_NACK)) {
crm_warn("Node %s is not part of the cluster", this_node->details->uname);
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
online = TRUE;
} else if (safe_str_eq(join_state, exp_state)) {
crm_info("Node %s is still coming up: %s", this_node->details->uname, join_state);
crm_info("\tha_state=%s, ccm_state=%s, crm_state=%s",
crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
online = TRUE;
} else {
crm_warn("Node %s (%s) is un-expectedly down",
this_node->details->uname, this_node->details->id);
crm_info("\tha_state=%s, ccm_state=%s,"
" crm_state=%s, join_state=%s, expected=%s",
crm_str(ha_state), crm_str(ccm_state),
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
pe_fence_node(data_set, this_node, "because it is un-expectedly down");
}
} else if (crm_is_true(ccm_state) == FALSE && safe_str_eq(ha_state, DEADSTATUS)
&& safe_str_eq(crm_state, OFFLINESTATUS)
&& this_node->details->expected_up == FALSE) {
crm_debug("Node %s is down: join_state=%s, expected=%s",
this_node->details->uname, crm_str(join_state), crm_str(exp_state));
#if 0
/* While a nice optimization, it causes the cluster to block until the node
* comes back online. Which is a serious problem if the cluster software
* is not configured to start at boot or stonith is configured to merely
* stop the node instead of restart it.
* Easily triggered by setting terminate=true for the DC
*/
} else if (do_terminate) {
crm_info("Node %s is %s after forced termination",
this_node->details->uname, crm_is_true(ccm_state) ? "coming up" : "going down");
crm_debug("\tha_state=%s, ccm_state=%s,"
" crm_state=%s, join_state=%s, expected=%s",
crm_str(ha_state), crm_str(ccm_state),
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
if (crm_is_true(ccm_state) == FALSE) {
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
online = TRUE;
}
#endif
} else if (this_node->details->expected_up) {
/* mark it unclean */
pe_fence_node(data_set, this_node, "because it is un-expectedly down");
crm_info("\tha_state=%s, ccm_state=%s,"
" crm_state=%s, join_state=%s, expected=%s",
crm_str(ha_state), crm_str(ccm_state),
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
} else {
crm_info("Node %s is down", this_node->details->uname);
crm_debug("\tha_state=%s, ccm_state=%s,"
" crm_state=%s, join_state=%s, expected=%s",
crm_str(ha_state), crm_str(ccm_state),
crm_str(crm_state), crm_str(join_state), crm_str(exp_state));
}
return online;
}
gboolean
determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set_t * data_set)
{
gboolean online = FALSE;
const char *shutdown = NULL;
const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE);
if (this_node == NULL) {
crm_config_err("No node to check");
return online;
}
this_node->details->shutdown = FALSE;
this_node->details->expected_up = FALSE;
shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN);
if (shutdown != NULL && safe_str_neq("0", shutdown)) {
this_node->details->shutdown = TRUE;
} else if (safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) {
this_node->details->expected_up = TRUE;
}
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
online = determine_online_status_no_fencing(data_set, node_state, this_node);
} else {
online = determine_online_status_fencing(data_set, node_state, this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->fixed = TRUE;
this_node->weight = -INFINITY;
}
if (online && this_node->details->shutdown) {
/* dont run resources here */
this_node->fixed = TRUE;
this_node->weight = -INFINITY;
}
if (this_node->details->unclean) {
pe_proc_warn("Node %s is unclean", this_node->details->uname);
} else if (this_node->details->online) {
crm_info("Node %s is %s", this_node->details->uname,
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
this_node->details->standby ? "standby" : "online");
} else {
crm_trace("Node %s is offline", this_node->details->uname);
}
return online;
}
#define set_char(x) last_rsc_id[lpc] = x; complete = TRUE;
char *
clone_zero(const char *last_rsc_id)
{
int lpc = 0;
char *zero = NULL;
CRM_CHECK(last_rsc_id != NULL, return NULL);
if (last_rsc_id != NULL) {
lpc = strlen(last_rsc_id);
}
while (--lpc > 0) {
switch (last_rsc_id[lpc]) {
case 0:
return NULL;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
zero = calloc(1, lpc + 3);
memcpy(zero, last_rsc_id, lpc);
zero[lpc] = ':';
zero[lpc + 1] = '0';
zero[lpc + 2] = 0;
return zero;
}
}
return NULL;
}
char *
increment_clone(char *last_rsc_id)
{
int lpc = 0;
int len = 0;
char *tmp = NULL;
gboolean complete = FALSE;
CRM_CHECK(last_rsc_id != NULL, return NULL);
if (last_rsc_id != NULL) {
len = strlen(last_rsc_id);
}
lpc = len - 1;
while (complete == FALSE && lpc > 0) {
switch (last_rsc_id[lpc]) {
case 0:
lpc--;
break;
case '0':
set_char('1');
break;
case '1':
set_char('2');
break;
case '2':
set_char('3');
break;
case '3':
set_char('4');
break;
case '4':
set_char('5');
break;
case '5':
set_char('6');
break;
case '6':
set_char('7');
break;
case '7':
set_char('8');
break;
case '8':
set_char('9');
break;
case '9':
last_rsc_id[lpc] = '0';
lpc--;
break;
case ':':
tmp = last_rsc_id;
last_rsc_id = calloc(1, len + 2);
memcpy(last_rsc_id, tmp, len);
last_rsc_id[++lpc] = '1';
last_rsc_id[len] = '0';
last_rsc_id[len + 1] = 0;
complete = TRUE;
free(tmp);
break;
default:
crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc);
return NULL;
break;
}
}
return last_rsc_id;
}
static int
get_clone(char *last_rsc_id)
{
int clone = 0;
int lpc = 0;
int len = 0;
CRM_CHECK(last_rsc_id != NULL, return -1);
if (last_rsc_id != NULL) {
len = strlen(last_rsc_id);
}
lpc = len - 1;
while (lpc > 0) {
switch (last_rsc_id[lpc]) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
clone += (int)(last_rsc_id[lpc] - '0') * (len - lpc);
lpc--;
break;
case ':':
return clone;
break;
default:
crm_err("Unexpected char: %d (%c)", lpc, last_rsc_id[lpc]);
return clone;
break;
}
}
return -1;
}
static resource_t *
create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set)
{
resource_t *rsc = NULL;
xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
copy_in_properties(xml_rsc, rsc_entry);
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) {
return NULL;
}
set_bit(rsc->flags, pe_rsc_orphan);
data_set->resources = g_list_append(data_set->resources, rsc);
return rsc;
}
extern resource_t *create_child_clone(resource_t * rsc, int sub_id, pe_working_set_t * data_set);
static resource_t *
find_clone(pe_working_set_t * data_set, node_t * node, resource_t * parent, const char *rsc_id)
{
int len = 0;
resource_t *rsc = NULL;
char *base = clone_zero(rsc_id);
char *alt_rsc_id = NULL;
CRM_ASSERT(parent != NULL);
CRM_ASSERT(parent->variant == pe_clone || parent->variant == pe_master);
if (base) {
len = strlen(base);
}
if (len > 0) {
base[len - 1] = 0;
}
crm_trace("Looking for %s on %s in %s %d",
rsc_id, node->details->uname, parent->id, is_set(parent->flags, pe_rsc_unique));
if (is_set(parent->flags, pe_rsc_unique)) {
crm_trace("Looking for %s", rsc_id);
rsc = parent->fns->find_rsc(parent, rsc_id, NULL, pe_find_current);
} else {
crm_trace("Looking for %s on %s", base, node->details->uname);
rsc = parent->fns->find_rsc(parent, base, node, pe_find_partial | pe_find_current);
if (rsc != NULL && rsc->running_on) {
GListPtr gIter = parent->children;
rsc = NULL;
crm_trace("Looking for an existing orphan for %s: %s on %s", parent->id, rsc_id,
node->details->uname);
/* There is already an instance of this _anonymous_ clone active on "node".
*
* If there is a partially active orphan (only applies to clone groups) on
* the same node, use that.
* Otherwise create a new (orphaned) instance at "orphan_check:".
*/
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
node_t *loc = child->fns->location(child, NULL, TRUE);
if (loc && loc->details == node->details) {
resource_t *tmp =
child->fns->find_rsc(child, base, NULL, pe_find_partial | pe_find_current);
if (tmp && tmp->running_on == NULL) {
rsc = tmp;
break;
}
}
}
goto orphan_check;
} else if (((resource_t *) parent->children->data)->variant == pe_group) {
/* If we're grouped, we need to look for a peer thats active on $node
* and use their clone instance number
*/
resource_t *peer =
parent->fns->find_rsc(parent, NULL, node, pe_find_clone | pe_find_current);
if (peer && peer->running_on) {
char buffer[256];
int clone_num = get_clone(peer->id);
snprintf(buffer, 256, "%s%d", base, clone_num);
rsc =
parent->fns->find_rsc(parent, buffer, node, pe_find_current | pe_find_inactive);
if (rsc) {
crm_trace("Found someone active: %s on %s, becoming %s", peer->id,
((node_t *) peer->running_on->data)->details->uname, buffer);
}
}
}
if (parent->fns->find_rsc(parent, rsc_id, NULL, pe_find_current)) {
alt_rsc_id = strdup(rsc_id);
} else {
alt_rsc_id = clone_zero(rsc_id);
}
while (rsc == NULL) {
rsc = parent->fns->find_rsc(parent, alt_rsc_id, NULL, pe_find_current);
if (rsc == NULL) {
crm_trace("Unknown resource: %s", alt_rsc_id);
break;
}
if (rsc->running_on == NULL) {
crm_trace("Resource %s: just right", alt_rsc_id);
break;
}
crm_trace("Resource %s: already active", alt_rsc_id);
alt_rsc_id = increment_clone(alt_rsc_id);
rsc = NULL;
}
}
orphan_check:
if (rsc == NULL) {
/* Create an extra orphan */
resource_t *top = create_child_clone(parent, -1, data_set);
crm_debug("Created orphan for %s: %s on %s", parent->id, rsc_id, node->details->uname);
rsc = top->fns->find_rsc(top, base, NULL, pe_find_current | pe_find_partial);
CRM_ASSERT(rsc != NULL);
}
free(rsc->clone_name);
rsc->clone_name = NULL;
if (safe_str_neq(rsc_id, rsc->id)) {
crm_info("Internally renamed %s on %s to %s%s",
rsc_id, node->details->uname, rsc->id,
is_set(rsc->flags, pe_rsc_orphan) ? " (ORPHAN)" : "");
rsc->clone_name = strdup(rsc_id);
}
free(alt_rsc_id);
free(base);
return rsc;
}
static resource_t *
unpack_find_resource(pe_working_set_t * data_set, node_t * node, const char *rsc_id,
xmlNode * rsc_entry)
{
resource_t *rsc = NULL;
resource_t *clone_parent = NULL;
char *alt_rsc_id = strdup(rsc_id);
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(data_set->resources, alt_rsc_id);
/* no match */
if (rsc == NULL) {
/* Even when clone-max=0, we still create a single :0 orphan to match against */
char *tmp = clone_zero(alt_rsc_id);
resource_t *clone0 = pe_find_resource(data_set->resources, tmp);
clone_parent = uber_parent(clone0);
free(tmp);
crm_trace("%s not found: %s", alt_rsc_id, clone_parent ? clone_parent->id : "orphan");
} else {
clone_parent = uber_parent(rsc);
}
if (clone_parent && clone_parent->variant > pe_group) {
rsc = find_clone(data_set, node, clone_parent, rsc_id);
CRM_ASSERT(rsc != NULL);
}
free(alt_rsc_id);
return rsc;
}
static resource_t *
process_orphan_resource(xmlNode * rsc_entry, node_t * node, pe_working_set_t * data_set)
{
resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname);
rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
clear_bit(rsc->flags, pe_rsc_managed);
} else {
GListPtr gIter = NULL;
print_resource(LOG_DEBUG_3, "Added orphan", rsc, FALSE);
CRM_CHECK(rsc != NULL, return NULL);
resource_location(rsc, NULL, -INFINITY, "__orphan_dont_run__", data_set);
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node->details->online && get_failcount(node, rsc, NULL, data_set)) {
action_t *clear_op = NULL;
action_t *ready = get_pseudo_op(CRM_OP_PROBED, data_set);
clear_op = custom_action(rsc, crm_concat(rsc->id, CRM_OP_CLEAR_FAILCOUNT, '_'),
CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set);
add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_info("Clearing failcount (%d) for orphaned resource %s on %s (%s)",
get_failcount(node, rsc, NULL, data_set), rsc->id, node->details->uname,
clear_op->uuid);
order_actions(clear_op, ready, pe_order_optional);
}
}
}
return rsc;
}
static void
process_rsc_state(resource_t * rsc, node_t * node,
enum action_fail_response on_fail,
xmlNode * migrate_op, pe_working_set_t * data_set)
{
crm_trace("Resource %s is %s on %s: on_fail=%s",
rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail));
/* process current state */
if (rsc->role != RSC_ROLE_UNKNOWN) {
resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
node_t *n = node_copy(node);
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
if (is_set(iter->flags, pe_rsc_unique)) {
break;
}
iter = iter->parent;
}
}
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = action_fail_ignore;
}
switch (on_fail) {
case action_fail_ignore:
/* nothing to do */
break;
case action_fail_fence:
/* treat it as if it is still running
* but also mark the node as unclean
*/
pe_fence_node(data_set, node, "to recover from resource failure(s)");
break;
case action_fail_standby:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
case action_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
clear_bit(rsc->flags, pe_rsc_managed);
set_bit(rsc->flags, pe_rsc_block);
break;
case action_fail_migrate:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set);
break;
case action_fail_stop:
rsc->next_role = RSC_ROLE_STOPPED;
break;
case action_fail_recover:
if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
set_bit(rsc->flags, pe_rsc_failed);
stop_action(rsc, node, FALSE);
}
break;
}
if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
if (is_set(rsc->flags, pe_rsc_orphan)) {
if (is_set(rsc->flags, pe_rsc_managed)) {
crm_config_warn("Detected active orphan %s running on %s",
rsc->id, node->details->uname);
} else {
crm_config_warn("Cluster configured not to stop active orphans."
" %s must be stopped manually on %s",
rsc->id, node->details->uname);
}
}
native_add_running(rsc, node, data_set);
if (on_fail != action_fail_ignore) {
set_bit(rsc->flags, pe_rsc_failed);
}
} else if (rsc->clone_name) {
crm_trace("Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id);
free(rsc->clone_name);
rsc->clone_name = NULL;
} else {
char *key = stop_key(rsc);
GListPtr possible_matches = find_actions(rsc->actions, key, node);
GListPtr gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
action_t *stop = (action_t *) gIter->data;
stop->flags |= pe_action_optional;
}
free(key);
}
}
/* create active recurring operations as optional */
static void
process_recurring(node_t * node, resource_t * rsc,
int start_index, int stop_index,
GListPtr sorted_op_list, pe_working_set_t * data_set)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GListPtr gIter = sorted_op_list;
crm_trace("%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
int interval = 0;
char *key = NULL;
const char *id = ID(rsc_op);
const char *interval_s = NULL;
counter++;
if (node->details->online == FALSE) {
crm_trace("Skipping %s/%s: node is offline", rsc->id, node->details->uname);
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
crm_trace("Skipping %s/%s: resource is not active", id, node->details->uname);
continue;
} else if (counter < start_index) {
crm_trace("Skipping %s/%s: old %d", id, node->details->uname, counter);
continue;
}
interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL);
interval = crm_parse_int(interval_s, "0");
if (interval == 0) {
crm_trace("Skipping %s/%s: non-recurring", id, node->details->uname);
continue;
}
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
if (safe_str_eq(status, "-1")) {
crm_trace("Skipping %s/%s: status", id, node->details->uname);
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
/* create the action */
key = generate_op_key(rsc->id, task, interval);
crm_trace("Creating %s/%s", key, node->details->uname);
custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
}
}
void
calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index)
{
int counter = -1;
+ int implied_monitor_start = -1;
+ int implied_master_start = -1;
const char *task = NULL;
const char *status = NULL;
GListPtr gIter = sorted_op_list;
*stop_index = -1;
*start_index = -1;
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
if (safe_str_eq(task, CRMD_ACTION_STOP)
&& safe_str_eq(status, "0")) {
*stop_index = counter;
} else if (safe_str_eq(task, CRMD_ACTION_START) ||
- safe_str_eq(task, CRMD_ACTION_MIGRATED) ||
- safe_str_eq(task, CRMD_ACTION_PROMOTE) ||
- safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
-
+ safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
*start_index = counter;
- } else if (*start_index <= *stop_index && safe_str_eq(task, CRMD_ACTION_STATUS)) {
+ } else if ((implied_monitor_start <= *stop_index) && safe_str_eq(task, CRMD_ACTION_STATUS)) {
const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
if (safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) {
- *start_index = counter;
+ implied_monitor_start = counter;
}
+ } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE) || safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
+ implied_master_start = counter;
+ }
+ }
+
+ if (*start_index == -1) {
+ if (implied_master_start != -1) {
+ *start_index = implied_master_start;
+ } else if (implied_monitor_start != -1) {
+ *start_index = implied_monitor_start;
}
}
}
static void
unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
const char *task = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
xmlNode *migrate_op = NULL;
xmlNode *rsc_op = NULL;
enum action_fail_response on_fail = FALSE;
enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
crm_trace("[%s] Processing %s on %s",
crm_element_name(rsc_entry), rsc_id, node->details->uname);
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return;
}
/* find the resource */
rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry);
if (rsc == NULL) {
rsc = process_orphan_resource(rsc_entry, node, data_set);
}
CRM_ASSERT(rsc != NULL);
/* process operations */
saved_role = rsc->role;
on_fail = action_fail_ignore;
rsc->role = RSC_ROLE_UNKNOWN;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
migrate_op = rsc_op;
}
unpack_rsc_op(rsc, node, rsc_op, gIter->next, &on_fail, data_set);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail, migrate_op, data_set);
if (get_target_role(rsc, &req_role)) {
if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
crm_debug("%s: Overwriting calculated next role %s"
" with requested next role %s",
rsc->id, role2text(rsc->next_role), role2text(req_role));
rsc->next_role = req_role;
} else if (req_role > rsc->next_role) {
crm_info("%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, role2text(rsc->next_role), role2text(req_role));
}
}
if (saved_role > rsc->role) {
rsc->role = saved_role;
}
}
gboolean
unpack_lrm_resources(node_t * node, xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
{
xmlNode *rsc_entry = NULL;
CRM_CHECK(node != NULL, return FALSE);
crm_trace("Unpacking resources on %s", node->details->uname);
for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL;
rsc_entry = __xml_next(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
unpack_lrm_rsc_state(node, rsc_entry, data_set);
}
}
return TRUE;
}
static void
set_active(resource_t * rsc)
{
resource_t *top = uber_parent(rsc);
if (top && top->variant == pe_master) {
rsc->role = RSC_ROLE_SLAVE;
} else {
rsc->role = RSC_ROLE_STARTED;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
node_t *node = value;
int *score = user_data;
node->weight = *score;
}
#define STATUS_PATH_MAX 1024
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
pe_working_set_t * data_set)
{
int offset = 0;
char xpath[STATUS_PATH_MAX];
offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node);
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']",
resource);
/* Need to check against transition_magic too? */
if (source && safe_str_eq(op, CRMD_ACTION_MIGRATE)) {
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset,
"/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op,
source);
} else if (source && safe_str_eq(op, CRMD_ACTION_MIGRATED)) {
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset,
"/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op,
source);
} else {
offset +=
snprintf(xpath + offset, STATUS_PATH_MAX - offset,
"/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op);
}
return get_xpath_object(xpath, data_set->input, LOG_DEBUG);
}
gboolean
unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, GListPtr next,
enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
int task_id = 0;
const char *id = NULL;
const char *key = NULL;
const char *task = NULL;
const char *task_key = NULL;
const char *magic = NULL;
const char *actual_rc = NULL;
/* const char *target_rc = NULL; */
const char *task_status = NULL;
const char *interval_s = NULL;
const char *op_version = NULL;
int interval = 0;
int task_status_i = -2;
int actual_rc_i = 0;
int target_rc = -1;
int last_failure = 0;
action_t *action = NULL;
node_t *effective_node = NULL;
resource_t *failed = NULL;
gboolean expired = FALSE;
gboolean is_probe = FALSE;
gboolean clear_past_failure = FALSE;
CRM_CHECK(rsc != NULL, return FALSE);
CRM_CHECK(node != NULL, return FALSE);
CRM_CHECK(xml_op != NULL, return FALSE);
id = ID(xml_op);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
task_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
task_status = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC);
key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id);
CRM_CHECK(id != NULL, return FALSE);
CRM_CHECK(task != NULL, return FALSE);
CRM_CHECK(task_status != NULL, return FALSE);
task_status_i = crm_parse_int(task_status, NULL);
CRM_CHECK(task_status_i <= PCMK_LRM_OP_ERROR, return FALSE);
CRM_CHECK(task_status_i >= PCMK_LRM_OP_PENDING, return FALSE);
if (safe_str_eq(task, CRMD_ACTION_NOTIFY)) {
/* safe to ignore these */
return TRUE;
}
if (rsc->failure_timeout > 0) {
int last_run = 0;
if (crm_element_value_int(xml_op, "last-rc-change", &last_run) == 0) {
time_t now = get_timet_now(data_set);
if (now > (last_run + rsc->failure_timeout)) {
expired = TRUE;
}
}
}
crm_trace("Unpacking task %s/%s (call_id=%d, status=%s) on %s (role=%s)",
id, task, task_id, task_status, node->details->uname, role2text(rsc->role));
interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
interval = crm_parse_int(interval_s, "0");
if (interval == 0 && safe_str_eq(task, CRMD_ACTION_STATUS)) {
is_probe = TRUE;
}
if (node->details->unclean) {
crm_trace("Node %s (where %s is running) is unclean."
" Further action depends on the value of the stop's on-fail attribue",
node->details->uname, rsc->id);
}
actual_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
CRM_CHECK(actual_rc != NULL, return FALSE);
actual_rc_i = crm_parse_int(actual_rc, NULL);
if (key) {
int dummy = 0;
char *dummy_string = NULL;
decode_transition_key(key, &dummy_string, &dummy, &dummy, &target_rc);
free(dummy_string);
}
if (task_status_i == PCMK_LRM_OP_DONE && target_rc >= 0) {
if (target_rc == actual_rc_i) {
task_status_i = PCMK_LRM_OP_DONE;
} else {
task_status_i = PCMK_LRM_OP_ERROR;
crm_debug("%s on %s returned %d (%s) instead of the expected value: %d (%s)",
id, node->details->uname,
actual_rc_i, lrmd_event_rc2str(actual_rc_i),
target_rc, lrmd_event_rc2str(target_rc));
}
} else if (task_status_i == PCMK_LRM_OP_ERROR) {
/* let us decide that */
task_status_i = PCMK_LRM_OP_DONE;
}
if (task_status_i == PCMK_LRM_OP_NOTSUPPORTED) {
actual_rc_i = PCMK_EXECRA_UNIMPLEMENT_FEATURE;
}
if (task_status_i != actual_rc_i
&& rsc->failure_timeout > 0 && get_failcount(node, rsc, &last_failure, data_set) == 0) {
if (last_failure > 0) {
action_t *clear_op = NULL;
clear_op = custom_action(rsc, crm_concat(rsc->id, CRM_OP_CLEAR_FAILCOUNT, '_'),
CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set);
add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_notice("Clearing expired failcount for %s on %s", rsc->id, node->details->uname);
}
}
if (expired
&& actual_rc_i != PCMK_EXECRA_NOT_RUNNING
&& actual_rc_i != PCMK_EXECRA_RUNNING_MASTER && actual_rc_i != PCMK_EXECRA_OK) {
crm_notice("Ignoring expired failure %s (rc=%d, magic=%s) on %s",
id, actual_rc_i, magic, node->details->uname);
goto done;
}
/* we could clean this up significantly except for old LRMs and CRMs that
* didnt include target_rc and liked to remap status
*/
switch (actual_rc_i) {
case PCMK_EXECRA_NOT_RUNNING:
if (is_probe || target_rc == actual_rc_i) {
task_status_i = PCMK_LRM_OP_DONE;
rsc->role = RSC_ROLE_STOPPED;
/* clear any previous failure actions */
*on_fail = action_fail_ignore;
rsc->next_role = RSC_ROLE_UNKNOWN;
} else if (safe_str_neq(task, CRMD_ACTION_STOP)) {
task_status_i = PCMK_LRM_OP_ERROR;
}
break;
case PCMK_EXECRA_RUNNING_MASTER:
if (is_probe) {
task_status_i = PCMK_LRM_OP_DONE;
crm_notice("Operation %s found resource %s active in master mode on %s",
task, rsc->id, node->details->uname);
} else if (target_rc == actual_rc_i) {
/* nothing to do */
} else if (target_rc >= 0) {
task_status_i = PCMK_LRM_OP_ERROR;
/* legacy code for pre-0.6.5 operations */
} else if (safe_str_neq(task, CRMD_ACTION_STATUS)
|| rsc->role != RSC_ROLE_MASTER) {
task_status_i = PCMK_LRM_OP_ERROR;
if (rsc->role != RSC_ROLE_MASTER) {
crm_err("%s reported %s in master mode on %s",
id, rsc->id, node->details->uname);
}
}
rsc->role = RSC_ROLE_MASTER;
break;
case PCMK_EXECRA_FAILED_MASTER:
rsc->role = RSC_ROLE_MASTER;
task_status_i = PCMK_LRM_OP_ERROR;
break;
case PCMK_EXECRA_UNIMPLEMENT_FEATURE:
if (interval > 0) {
task_status_i = PCMK_LRM_OP_NOTSUPPORTED;
break;
}
/* else: fall through */
case PCMK_EXECRA_INSUFFICIENT_PRIV:
case PCMK_EXECRA_NOT_INSTALLED:
case PCMK_EXECRA_INVALID_PARAM:
effective_node = node;
/* fall through */
case PCMK_EXECRA_NOT_CONFIGURED:
failed = rsc;
if (is_not_set(rsc->flags, pe_rsc_unique)) {
failed = uber_parent(failed);
}
do_crm_log(actual_rc_i == PCMK_EXECRA_NOT_INSTALLED ? LOG_NOTICE : LOG_ERR,
"Preventing %s from re-starting %s %s: operation %s failed '%s' (rc=%d)",
failed->id,
effective_node ? "on" : "anywhere in the cluster",
effective_node ? effective_node->details->uname : "",
task, lrmd_event_rc2str(actual_rc_i), actual_rc_i);
resource_location(failed, effective_node, -INFINITY, "hard-error", data_set);
if (is_probe) {
/* treat these like stops */
task = CRMD_ACTION_STOP;
task_status_i = PCMK_LRM_OP_DONE;
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
if (actual_rc_i != PCMK_EXECRA_NOT_INSTALLED
|| is_set(data_set->flags, pe_flag_symmetric_cluster)) {
if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) {
add_node_copy(data_set->failed, xml_op);
}
}
}
break;
case PCMK_EXECRA_OK:
if (is_probe && target_rc == 7) {
task_status_i = PCMK_LRM_OP_DONE;
crm_info("Operation %s found resource %s active on %s",
task, rsc->id, node->details->uname);
/* legacy code for pre-0.6.5 operations */
} else if (target_rc < 0 && interval > 0 && rsc->role == RSC_ROLE_MASTER) {
/* catch status ops that return 0 instead of 8 while they
* are supposed to be in master mode
*/
task_status_i = PCMK_LRM_OP_ERROR;
}
break;
default:
if (task_status_i == PCMK_LRM_OP_DONE) {
crm_info("Remapping %s (rc=%d) on %s to an ERROR",
id, actual_rc_i, node->details->uname);
task_status_i = PCMK_LRM_OP_ERROR;
}
}
if (task_status_i == PCMK_LRM_OP_ERROR
|| task_status_i == PCMK_LRM_OP_TIMEOUT || task_status_i == PCMK_LRM_OP_NOTSUPPORTED) {
const char *action_key = task_key ? task_key : id;
action = custom_action(rsc, strdup(action_key), task, NULL, TRUE, FALSE, data_set);
if (expired) {
crm_notice("Ignoring expired failure (calculated) %s (rc=%d, magic=%s) on %s",
id, actual_rc_i, magic, node->details->uname);
goto done;
} else if (action->on_fail == action_fail_ignore) {
crm_warn("Remapping %s (rc=%d) on %s to DONE: ignore",
id, actual_rc_i, node->details->uname);
task_status_i = PCMK_LRM_OP_DONE;
set_bit(rsc->flags, pe_rsc_failure_ignored);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) {
add_node_copy(data_set->failed, xml_op);
}
}
}
switch (task_status_i) {
case PCMK_LRM_OP_PENDING:
if (safe_str_eq(task, CRMD_ACTION_START)) {
set_bit(rsc->flags, pe_rsc_start_pending);
set_active(rsc);
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_MASTER;
}
/*
* Intentionally ignoring pending migrate ops here;
* haven't decided if we need to do anything special
* with them yet...
*/
break;
case PCMK_LRM_OP_DONE:
crm_trace("%s/%s completed on %s", rsc->id, task, node->details->uname);
if (actual_rc_i == PCMK_EXECRA_NOT_RUNNING) {
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_START)) {
rsc->role = RSC_ROLE_STARTED;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_STOP)) {
rsc->role = RSC_ROLE_STOPPED;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_MASTER;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
/* Demote from Master does not clear an error */
rsc->role = RSC_ROLE_SLAVE;
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
rsc->role = RSC_ROLE_STARTED;
clear_past_failure = TRUE;
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) {
/*
* The normal sequence is (now): migrate_to(Src) -> migrate_from(Tgt) -> stop(Src)
*
* So if a migrate_to is followed by a stop, then we dont need to care what
* happended on the target node
*
* Without the stop, we need to look for a successful migrate_from.
* This would also imply we're no longer running on the source
*
* Without the stop, and without a migrate_from op we make sure the resource
* gets stopped on both source and target (assuming the target is up)
*
*/
int stop_id = 0;
xmlNode *stop_op =
find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->id, NULL, data_set);
if (stop_op) {
crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id);
}
if (stop_op == NULL || stop_id < task_id) {
int from_rc = 0, from_status = 0;
const char *migrate_source =
crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *migrate_target =
crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
node_t *target = pe_find_node(data_set->nodes, migrate_target);
node_t *source = pe_find_node(data_set->nodes, migrate_source);
xmlNode *migrate_from =
find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source,
data_set);
rsc->role = RSC_ROLE_STARTED; /* can be master? */
if (migrate_from) {
crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status);
crm_trace("%s op on %s exited with status=%d, rc=%d",
ID(migrate_from), migrate_target, from_status, from_rc);
}
if (migrate_from && from_rc == PCMK_EXECRA_OK && from_status == PCMK_LRM_OP_DONE) {
crm_trace("Detected dangling migration op: %s on %s", ID(xml_op),
migrate_source);
/* all good
* just need to arrange for the stop action to get sent
* but _without_ affecting the target somehow
*/
rsc->role = RSC_ROLE_STOPPED;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
} else if (migrate_from) { /* Failed */
crm_trace("Marking active on %s %p %d", migrate_target, target,
target->details->online);
if (target && target->details->online) {
native_add_running(rsc, target, data_set);
}
} else { /* Pending or complete but erased */
node_t *target = pe_find_node_id(data_set->nodes, migrate_target);
crm_trace("Marking active on %s %p %d", migrate_target, target,
target->details->online);
if (target && target->details->online) {
native_add_running(rsc, target, data_set);
if (source && source->details->online) {
/* If we make it here we have a partial migration. The migrate_to
* has completed but the migrate_from on the target has not. Hold on
* to the target and source on the resource. Later on if we detect that
* the resource is still going to run on that target, we may continue
* the migration */
rsc->partial_migration_target = target;
rsc->partial_migration_source = source;
}
} else {
/* Consider it failed here - forces a restart, prevents migration */
set_bit_inplace(rsc->flags, pe_rsc_failed);
}
}
}
} else if (rsc->role < RSC_ROLE_STARTED) {
/* start, migrate_to and migrate_from will land here */
crm_trace("%s active on %s", rsc->id, node->details->uname);
set_active(rsc);
}
/* clear any previous failure actions */
if (clear_past_failure) {
switch (*on_fail) {
case action_fail_block:
case action_fail_stop:
case action_fail_fence:
case action_fail_migrate:
case action_fail_standby:
crm_trace("%s.%s is not cleared by a completed stop",
rsc->id, fail2text(*on_fail));
break;
case action_fail_ignore:
case action_fail_recover:
*on_fail = action_fail_ignore;
rsc->next_role = RSC_ROLE_UNKNOWN;
}
}
break;
case PCMK_LRM_OP_ERROR:
case PCMK_LRM_OP_TIMEOUT:
case PCMK_LRM_OP_NOTSUPPORTED:
crm_warn("Processing failed op %s on %s: %s (%d)",
id, node->details->uname, lrmd_event_rc2str(actual_rc_i), actual_rc_i);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) {
add_node_copy(data_set->failed, xml_op);
}
if (*on_fail < action->on_fail) {
*on_fail = action->on_fail;
}
if (safe_str_eq(task, CRMD_ACTION_STOP)) {
resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set);
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
int stop_id = 0;
int migrate_id = 0;
const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
xmlNode *stop_op =
find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_source, NULL, data_set);
xmlNode *migrate_op =
find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, migrate_source, migrate_target,
data_set);
if (stop_op) {
crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id);
}
if (migrate_op) {
crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id);
}
/* Get our state right */
rsc->role = RSC_ROLE_STARTED; /* can be master? */
if (stop_op == NULL || stop_id < migrate_id) {
node_t *source = pe_find_node(data_set->nodes, migrate_source);
if (source && source->details->online) {
native_add_running(rsc, source, data_set);
}
}
} else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) {
int stop_id = 0;
int migrate_id = 0;
const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
xmlNode *stop_op =
find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_target, NULL, data_set);
xmlNode *migrate_op =
find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source,
data_set);
if (stop_op) {
crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id);
}
if (migrate_op) {
crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id);
}
/* Get our state right */
rsc->role = RSC_ROLE_STARTED; /* can be master? */
if (stop_op == NULL || stop_id < migrate_id) {
node_t *target = pe_find_node(data_set->nodes, migrate_target);
crm_trace("Stop: %p %d, Migrated: %p %d", stop_op, stop_id, migrate_op,
migrate_id);
if (target && target->details->online) {
native_add_running(rsc, target, data_set);
}
} else if (migrate_op == NULL) {
/* Make sure it gets cleaned up, the stop may pre-date the migrate_from */
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
}
} else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_MASTER;
} else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) {
/*
* staying in role=master ends up putting the PE/TE into a loop
* setting role=slave is not dangerous because no master will be
* promoted until the failed resource has been fully stopped
*/
crm_warn("Forcing %s to stop after a failed demote action", rsc->id);
rsc->next_role = RSC_ROLE_STOPPED;
rsc->role = RSC_ROLE_SLAVE;
} else if (compare_version("2.0", op_version) > 0
&& safe_str_eq(task, CRMD_ACTION_START)) {
crm_warn("Compatibility handling for failed op %s on %s", id, node->details->uname);
resource_location(rsc, node, -INFINITY, "__legacy_start__", data_set);
}
if (rsc->role < RSC_ROLE_STARTED) {
set_active(rsc);
}
crm_trace("Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
rsc->id, role2text(rsc->role),
node->details->unclean ? "true" : "false",
fail2text(action->on_fail), role2text(action->fail_role));
if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) {
rsc->next_role = action->fail_role;
}
if (action->fail_role == RSC_ROLE_STOPPED) {
int score = -INFINITY;
crm_err("Making sure %s doesn't come up again", rsc->id);
/* make sure it doesnt come up again */
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = node_hash_from_list(data_set->nodes);
g_hash_table_foreach(rsc->allowed_nodes, set_node_score, &score);
}
pe_free_action(action);
action = NULL;
break;
case PCMK_LRM_OP_CANCELLED:
/* do nothing?? */
pe_err("Dont know what to do for cancelled ops yet");
break;
}
done:
crm_trace("Resource %s after %s: role=%s", rsc->id, task, role2text(rsc->role));
pe_free_action(action);
return TRUE;
}
gboolean
add_node_attrs(xmlNode * xml_obj, node_t * node, gboolean overwrite, pe_working_set_t * data_set)
{
g_hash_table_insert(node->details->attrs,
strdup("#" XML_ATTR_UNAME), strdup(node->details->uname));
g_hash_table_insert(node->details->attrs,
strdup("#" XML_ATTR_ID), strdup(node->details->id));
if (safe_str_eq(node->details->id, data_set->dc_uuid)) {
data_set->dc_node = node;
node->details->is_dc = TRUE;
g_hash_table_insert(node->details->attrs,
strdup("#" XML_ATTR_DC), strdup(XML_BOOLEAN_TRUE));
} else {
g_hash_table_insert(node->details->attrs,
strdup("#" XML_ATTR_DC), strdup(XML_BOOLEAN_FALSE));
}
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL,
node->details->attrs, NULL, overwrite, data_set->now);
return TRUE;
}
static GListPtr
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GListPtr gIter = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
crm_xml_add(rsc_op, "resource", rsc);
crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", ID(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", ID(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GListPtr
find_operations(const char *rsc, const char *node, gboolean active_filter,
pe_working_set_t * data_set)
{
GListPtr output = NULL;
GListPtr intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
const char *uname = NULL;
node_t *this_node = NULL;
xmlNode *node_state = NULL;
for (node_state = __xml_first_child(status); node_state != NULL;
node_state = __xml_next(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
uname = crm_element_value(node_state, XML_ATTR_UNAME);
if (node != NULL && safe_str_neq(uname, node)) {
continue;
}
this_node = pe_find_node(data_set->nodes, uname);
CRM_CHECK(this_node != NULL, continue);
determine_online_status(node_state, this_node, data_set);
if (this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
for (lrm_rsc = __xml_first_child(tmp); lrm_rsc != NULL;
lrm_rsc = __xml_next(lrm_rsc)) {
if (crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) {
const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID);
if (rsc != NULL && safe_str_neq(rsc_id, rsc)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
}
}
return output;
}
diff --git a/pengine/regression.sh b/pengine/regression.sh
index ab73e152a2..f6d7d1c813 100755
--- a/pengine/regression.sh
+++ b/pengine/regression.sh
@@ -1,652 +1,653 @@
#!/bin/bash
# Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
core=`dirname $0`
. $core/regression.core.sh
create_mode="true"
info Generating test outputs for these tests...
# do_test file description
info Done.
echo ""
info Performing the following tests from $io_dir
create_mode="false"
echo ""
do_test simple1 "Offline "
do_test simple2 "Start "
do_test simple3 "Start 2 "
do_test simple4 "Start Failed"
do_test simple6 "Stop Start "
do_test simple7 "Shutdown "
#do_test simple8 "Stonith "
#do_test simple9 "Lower version"
#do_test simple10 "Higher version"
do_test simple11 "Priority (ne)"
do_test simple12 "Priority (eq)"
do_test simple8 "Stickiness"
echo ""
do_test group1 "Group "
do_test group2 "Group + Native "
do_test group3 "Group + Group "
do_test group4 "Group + Native (nothing)"
do_test group5 "Group + Native (move) "
do_test group6 "Group + Group (move) "
do_test group7 "Group colocation"
do_test group13 "Group colocation (cant run)"
do_test group8 "Group anti-colocation"
do_test group9 "Group recovery"
do_test group10 "Group partial recovery"
do_test group11 "Group target_role"
do_test group14 "Group stop (graph terminated)"
do_test group15 "-ve group colocation"
do_test bug-1573 "Partial stop of a group with two children"
do_test bug-1718 "Mandatory group ordering - Stop group_FUN"
do_test bug-lf-2613 "Move group on failure"
do_test bug-lf-2619 "Move group on clone failure"
echo ""
do_test rsc_dep1 "Must not "
do_test rsc_dep3 "Must "
do_test rsc_dep5 "Must not 3 "
do_test rsc_dep7 "Must 3 "
do_test rsc_dep10 "Must (but cant)"
do_test rsc_dep2 "Must (running) "
do_test rsc_dep8 "Must (running : alt) "
do_test rsc_dep4 "Must (running + move)"
do_test asymmetric "Asymmetric - require explicit location constraints"
echo ""
do_test orphan-0 "Orphan ignore"
do_test orphan-1 "Orphan stop"
do_test orphan-2 "Orphan stop, remove failcount"
echo ""
do_test params-0 "Params: No change"
do_test params-1 "Params: Changed"
do_test params-2 "Params: Resource definition"
do_test params-4 "Params: Reload"
do_test params-5 "Params: Restart based on probe digest"
do_test novell-251689 "Resource definition change + target_role=stopped"
do_test bug-lf-2106 "Restart all anonymous clone instances after config change"
do_test params-6 "Params: Detect reload in previously migrated resource"
echo ""
do_test target-0 "Target Role : baseline"
do_test target-1 "Target Role : master"
do_test target-2 "Target Role : invalid"
echo ""
do_test domain "Failover domains"
do_test base-score "Set a node's default score for all nodes"
echo ""
do_test date-1 "Dates" -t "2005-020"
do_test date-2 "Date Spec - Pass" -t "2005-020T12:30"
do_test date-3 "Date Spec - Fail" -t "2005-020T11:30"
do_test probe-0 "Probe (anon clone)"
do_test probe-1 "Pending Probe"
do_test probe-2 "Correctly re-probe cloned groups"
do_test probe-3 "Probe (pending node)"
do_test probe-4 "Probe (pending node + stopped resource)" --rc 4
do_test standby "Standby"
do_test comments "Comments"
echo ""
do_test one-or-more-0 "Everything starts"
do_test one-or-more-1 "Nothing starts because of A"
do_test one-or-more-2 "D can start because of C"
do_test one-or-more-3 "D cannot start because of B and C"
do_test one-or-more-4 "D cannot start because of target-role"
do_test one-or-more-5 "Start A and F even though C and D are stopped"
do_test one-or-more-6 "Leave A running even though B is stopped"
do_test one-or-more-7 "Leave A running even though C is stopped"
echo ""
do_test order1 "Order start 1 "
do_test order2 "Order start 2 "
do_test order3 "Order stop "
do_test order4 "Order (multiple) "
do_test order5 "Order (move) "
do_test order6 "Order (move w/ restart) "
do_test order7 "Order (manditory) "
do_test order-optional "Order (score=0) "
do_test order-required "Order (score=INFINITY) "
do_test bug-lf-2171 "Prevent group start when clone is stopped"
do_test order-clone "Clone ordering should be able to prevent startup of dependant clones"
do_test order-sets "Ordering for resource sets"
do_test order-serialize "Serialize resources without inhibiting migration"
do_test order-serialize-set "Serialize a set of resources without inhibiting migration"
do_test clone-order-primitive "Order clone start after a primitive"
do_test order-optional-keyword "Order (optional keyword)"
do_test order-mandatory "Order (mandatory keyword)"
do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones"
do_test ordered-set-basic-startup "Constraint set with default order settings."
# This test emits an error log and thus upsets the test suite; even
# though it explicitly aims to test an error leg. FIXME
# do_test order-wrong-kind "Order (error)"
echo ""
do_test coloc-loop "Colocation - loop"
do_test coloc-many-one "Colocation - many-to-one"
do_test coloc-list "Colocation - many-to-one with list"
do_test coloc-group "Colocation - groups"
do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation"
do_test coloc-attr "Colocation based on node attributes"
do_test coloc-negative-group "Negative colocation with a group"
do_test coloc-intra-set "Intra-set colocation"
do_test bug-lf-2435 "Colocation sets with a negative score"
do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop"
echo ""
do_test rsc-sets-seq-true "Resource Sets - sequential=false"
do_test rsc-sets-seq-false "Resource Sets - sequential=true"
do_test rsc-sets-clone "Resource Sets - Clone"
do_test rsc-sets-master "Resource Sets - Master"
do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)"
#echo ""
#do_test agent1 "version: lt (empty)"
#do_test agent2 "version: eq "
#do_test agent3 "version: gt "
echo ""
do_test attrs1 "string: eq (and) "
do_test attrs2 "string: lt / gt (and)"
do_test attrs3 "string: ne (or) "
do_test attrs4 "string: exists "
do_test attrs5 "string: not_exists "
do_test attrs6 "is_dc: true "
do_test attrs7 "is_dc: false "
do_test attrs8 "score_attribute "
echo ""
do_test mon-rsc-1 "Schedule Monitor - start"
do_test mon-rsc-2 "Schedule Monitor - move "
do_test mon-rsc-3 "Schedule Monitor - pending start "
do_test mon-rsc-4 "Schedule Monitor - move/pending start"
echo ""
do_test rec-rsc-0 "Resource Recover - no start "
do_test rec-rsc-1 "Resource Recover - start "
do_test rec-rsc-2 "Resource Recover - monitor "
do_test rec-rsc-3 "Resource Recover - stop - ignore"
do_test rec-rsc-4 "Resource Recover - stop - block "
do_test rec-rsc-5 "Resource Recover - stop - fence "
do_test rec-rsc-6 "Resource Recover - multiple - restart"
do_test rec-rsc-7 "Resource Recover - multiple - stop "
do_test rec-rsc-8 "Resource Recover - multiple - block "
do_test rec-rsc-9 "Resource Recover - group/group"
echo ""
do_test quorum-1 "No quorum - ignore"
do_test quorum-2 "No quorum - freeze"
do_test quorum-3 "No quorum - stop "
do_test quorum-4 "No quorum - start anyway"
do_test quorum-5 "No quorum - start anyway (group)"
do_test quorum-6 "No quorum - start anyway (clone)"
echo ""
do_test rec-node-1 "Node Recover - Startup - no fence"
do_test rec-node-2 "Node Recover - Startup - fence "
do_test rec-node-3 "Node Recover - HA down - no fence"
do_test rec-node-4 "Node Recover - HA down - fence "
do_test rec-node-5 "Node Recover - CRM down - no fence"
do_test rec-node-6 "Node Recover - CRM down - fence "
do_test rec-node-7 "Node Recover - no quorum - ignore "
do_test rec-node-8 "Node Recover - no quorum - freeze "
do_test rec-node-9 "Node Recover - no quorum - stop "
do_test rec-node-10 "Node Recover - no quorum - stop w/fence"
do_test rec-node-11 "Node Recover - CRM down w/ group - fence "
do_test rec-node-12 "Node Recover - nothing active - fence "
do_test rec-node-13 "Node Recover - failed resource + shutdown - fence "
do_test rec-node-15 "Node Recover - unknown lrm section"
do_test rec-node-14 "Serialize all stonith's"
echo ""
do_test multi1 "Multiple Active (stop/start)"
echo ""
do_test migrate-begin "Normal migration"
do_test migrate-success "Completed migration"
do_test migrate-partial-1 "Completed migration, missing stop on source"
do_test migrate-partial-2 "Successful migrate_to only"
do_test migrate-partial-3 "Successful migrate_to only, target down"
do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from"
do_test migrate-fail-2 "Failed migrate_from"
do_test migrate-fail-3 "Failed migrate_from + stop on source"
do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"
do_test migrate-fail-5 "Failed migrate_from + stop on source and target"
do_test migrate-fail-6 "Failed migrate_to"
do_test migrate-fail-7 "Failed migrate_to + stop on source"
do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"
do_test migrate-fail-9 "Failed migrate_to + stop on source and target"
do_test migrate-stop "Migration in a stopping stack"
do_test migrate-start "Migration in a starting stack"
do_test migrate-stop_start "Migration in a restarting stack"
do_test migrate-stop-complex "Migration in a complex stopping stack"
do_test migrate-start-complex "Migration in a complex starting stack"
do_test migrate-stop-start-complex "Migration in a complex moving stack"
do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown"
do_test migrate-1 "Migrate (migrate)"
do_test migrate-2 "Migrate (stable)"
do_test migrate-3 "Migrate (failed migrate_to)"
do_test migrate-4 "Migrate (failed migrate_from)"
do_test novell-252693 "Migration in a stopping stack"
do_test novell-252693-2 "Migration in a starting stack"
do_test novell-252693-3 "Non-Migration in a starting and stopping stack"
do_test bug-1820 "Migration in a group"
do_test bug-1820-1 "Non-migration in a group"
do_test migrate-5 "Primitive migration with a clone"
do_test migrate-fencing "Migration after Fencing"
#echo ""
#do_test complex1 "Complex "
do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*"
echo ""
do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node"
do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones"
do_test clone-anon-failcount "Merge failcounts for anonymous clones"
do_test inc0 "Incarnation start"
do_test inc1 "Incarnation start order"
do_test inc2 "Incarnation silent restart, stop, move"
do_test inc3 "Inter-incarnation ordering, silent restart, stop, move"
do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)"
do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)"
do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)"
do_test inc7 "Clone colocation"
do_test inc8 "Clone anti-colocation"
do_test inc9 "Non-unique clone"
do_test inc10 "Non-unique clone (stop)"
do_test inc11 "Primitive colocation with clones"
do_test inc12 "Clone shutdown"
do_test cloned-group "Make sure only the correct number of cloned groups are started"
do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved"
do_test clone-max-zero "Orphan processing with clone-max=0"
do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"
do_test bug-lf-2160 "Dont shuffle clones due to colocation"
do_test bug-lf-2213 "clone-node-max enforcement for cloned groups"
do_test bug-lf-2153 "Clone ordering constraints"
do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"
do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone"
do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)"
do_test clone-colocate-instance-2 "Colocation with a specific clone instance"
do_test clone-order-instance "Ordering with specific clone instances"
do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation"
do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups"
do_test bug-lf-2544 "Balanced clone placement"
do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0"
do_test bug-lf-2574 "Avoid clone shuffle"
do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start"
echo ""
do_test master-0 "Stopped -> Slave"
do_test master-1 "Stopped -> Promote"
do_test master-2 "Stopped -> Promote : notify"
do_test master-3 "Stopped -> Promote : master location"
do_test master-4 "Started -> Promote : master location"
do_test master-5 "Promoted -> Promoted"
do_test master-6 "Promoted -> Promoted (2)"
do_test master-7 "Promoted -> Fenced"
do_test master-8 "Promoted -> Fenced -> Moved"
do_test master-9 "Stopped + Promotable + No quorum"
do_test master-10 "Stopped -> Promotable : notify with monitor"
do_test master-11 "Stopped -> Promote : colocation"
do_test novell-239082 "Demote/Promote ordering"
do_test novell-239087 "Stable master placement"
do_test master-12 "Promotion based solely on rsc_location constraints"
do_test master-13 "Include preferences of colocated resources when placing master"
do_test master-demote "Ordering when actions depends on demoting a slave resource"
do_test master-ordering "Prevent resources from starting that need a master"
do_test bug-1765 "Master-Master Colocation (dont stop the slaves)"
do_test master-group "Promotion of cloned groups"
do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily"
do_test master-failed-demote "Dont retry failed demote actions"
do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)"
do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does"
do_test master-reattach "Re-attach to a running master"
do_test master-allow-start "Don't include master score if it would prevent allocation"
do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints"
do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly"
do_test master-role "Prevent target-role from promoting more than master-max instances"
do_test bug-lf-2358 "Master-Master anti-colocation"
do_test master-promotion-constraint "Mandatory master colocation constraints"
do_test unmanaged-master "Ensure role is preserved for unmanaged resources"
do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters"
do_test master-demote-2 "Demote does not clear past failure"
do_test master-move "Move master based on failure of colocated group"
do_test master-probed-score "Observe the promotion score of probed resources"
do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint"
do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint"
do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint"
do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint"
+do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion."
echo ""
do_test history-1 "Correctly parse stateful-1 resource state"
echo ""
do_test managed-0 "Managed (reference)"
do_test managed-1 "Not managed - down "
do_test managed-2 "Not managed - up "
do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource"
do_test bug-5028-detach "Ensure detach still works"
do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"
echo ""
do_test interleave-0 "Interleave (reference)"
do_test interleave-1 "coloc - not interleaved"
do_test interleave-2 "coloc - interleaved "
do_test interleave-3 "coloc - interleaved (2)"
do_test interleave-pseudo-stop "Interleaved clone during stonith"
do_test interleave-stop "Interleaved clone during stop"
do_test interleave-restart "Interleaved clone during dependancy restart"
echo ""
do_test notify-0 "Notify reference"
do_test notify-1 "Notify simple"
do_test notify-2 "Notify simple, confirm"
do_test notify-3 "Notify move, confirm"
do_test novell-239079 "Notification priority"
#do_test notify-2 "Notify - 764"
echo ""
do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition"
do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"
do_test 696 "OSDL #696 - CRM starts stonith RA without monitor"
do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"
do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"
do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"
do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id"
do_test 829 "OSDL #829"
do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"
do_test 994-2 "OSDL #994 - with a dependant resource"
do_test 1360 "OSDL #1360 - Clone stickiness"
do_test 1484 "OSDL #1484 - on_fail=stop"
do_test 1494 "OSDL #1494 - Clone stability"
do_test unrunnable-1 "Unrunnable"
do_test stonith-0 "Stonith loop - 1"
do_test stonith-1 "Stonith loop - 2"
do_test stonith-2 "Stonith loop - 3"
do_test stonith-3 "Stonith startup"
do_test bug-1572-1 "Recovery of groups depending on master/slave"
do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted"
do_test bug-1685 "Depends-on-master ordering"
do_test bug-1822 "Dont promote partially active groups"
do_test bug-pm-11 "New resource added to a m/s group"
do_test bug-pm-12 "Recover only the failed portion of a cloned group"
do_test bug-n-387749 "Don't shuffle clone instances"
do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"
do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node"
do_test bug-lf-1920 "Correctly handle probes that find active resources"
do_test bnc-515172 "Location constraint with multiple expressions"
do_test colocate-primitive-with-clone "Optional colocation with a clone"
do_test use-after-free-merge "Use-after-free in native_merge_weights"
do_test bug-lf-2551 "STONITH ordering for stop"
do_test bug-lf-2606 "Stonith implies demote"
do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults"
do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering"
do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false"
do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false"
do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"
do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts."
do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false"
do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false."
do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false."
do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false."
do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false."
do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false"
do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true"
do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources."
do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases"
do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload"
do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change."
do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart"
do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart"
do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop"
do_test bug-5059 "No need to restart p_stateful1:*"
do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled."
do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled."
do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc."
do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith."
do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group"
do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"
do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)."
do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"
do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group."
do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs."
echo ""
do_test systemhealth1 "System Health () #1"
do_test systemhealth2 "System Health () #2"
do_test systemhealth3 "System Health () #3"
do_test systemhealthn1 "System Health (None) #1"
do_test systemhealthn2 "System Health (None) #2"
do_test systemhealthn3 "System Health (None) #3"
do_test systemhealthm1 "System Health (Migrate On Red) #1"
do_test systemhealthm2 "System Health (Migrate On Red) #2"
do_test systemhealthm3 "System Health (Migrate On Red) #3"
do_test systemhealtho1 "System Health (Only Green) #1"
do_test systemhealtho2 "System Health (Only Green) #2"
do_test systemhealtho3 "System Health (Only Green) #3"
do_test systemhealthp1 "System Health (Progessive) #1"
do_test systemhealthp2 "System Health (Progessive) #2"
do_test systemhealthp3 "System Health (Progessive) #3"
echo ""
do_test utilization "Placement Strategy - utilization"
do_test minimal "Placement Strategy - minimal"
do_test balanced "Placement Strategy - balanced"
echo ""
do_test placement-stickiness "Optimized Placement Strategy - stickiness"
do_test placement-priority "Optimized Placement Strategy - priority"
do_test placement-location "Optimized Placement Strategy - location"
do_test placement-capacity "Optimized Placement Strategy - capacity"
echo ""
do_test utilization-order1 "Utilization Order - Simple"
do_test utilization-order2 "Utilization Order - Complex"
do_test utilization-order3 "Utilization Order - Migrate"
do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)"
do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"
echo ""
do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
echo ""
do_test stopped-monitor-00 "Stopped Monitor - initial start"
do_test stopped-monitor-01 "Stopped Monitor - failed started"
do_test stopped-monitor-02 "Stopped Monitor - started multi-up"
do_test stopped-monitor-03 "Stopped Monitor - stop started"
do_test stopped-monitor-04 "Stopped Monitor - failed stop"
do_test stopped-monitor-05 "Stopped Monitor - start unmanaged"
do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up"
do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up"
do_test stopped-monitor-08 "Stopped Monitor - migrate"
do_test stopped-monitor-09 "Stopped Monitor - unmanage started"
do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up"
do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started"
do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")"
do_test stopped-monitor-20 "Stopped Monitor - initial stop"
do_test stopped-monitor-21 "Stopped Monitor - stopped single-up"
do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up"
do_test stopped-monitor-23 "Stopped Monitor - start stopped"
do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped"
do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up"
do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped"
do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role="Started")"
do_test stopped-monitor-30 "Stopped Monitor - new node started"
do_test stopped-monitor-31 "Stopped Monitor - new node stopped"
echo""
do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)"
do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)"
do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)"
do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)"
do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)"
do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)"
do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)"
do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)"
do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)"
do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)"
do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)"
do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)"
do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)"
do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)"
do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)"
do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)"
do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)"
do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)"
do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)"
do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)"
do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)"
do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)"
do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)"
do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)"
echo""
do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)"
do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)"
do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)"
do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)"
do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)"
do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)"
do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)"
do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)"
do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)"
do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)"
do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)"
do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)"
do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)"
do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)"
do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)"
do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)"
do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)"
do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)"
do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)"
do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)"
do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)"
do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)"
do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)"
do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)"
echo""
do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)"
do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)"
do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)"
do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)"
do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)"
do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)"
do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)"
do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)"
do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)"
do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)"
do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)"
do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)"
do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)"
do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)"
do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)"
do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)"
do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)"
do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)"
do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)"
do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)"
do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)"
do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)"
do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)"
do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)"
echo""
do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)"
do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)"
do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)"
do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)"
do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)"
do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)"
do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)"
do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)"
do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)"
do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)"
do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)"
do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)"
do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)"
do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)"
do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)"
do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)"
do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)"
do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)"
do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)"
do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)"
do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)"
do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)"
do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)"
do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)"
echo ""
do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)"
do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)"
do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)"
do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)"
do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)"
do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)"
do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)"
do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)"
do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)"
do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)"
do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)"
do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)"
do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)"
do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)"
echo ""
do_test template-1 "Template - 1"
do_test template-2 "Template - 2"
do_test template-3 "Template - 3 (merge operations)"
do_test template-coloc-1 "Template - Colocation 1"
do_test template-coloc-2 "Template - Colocation 2"
do_test template-coloc-3 "Template - Colocation 3"
do_test template-order-1 "Template - Order 1"
do_test template-order-2 "Template - Order 2"
do_test template-order-3 "Template - Order 3"
do_test template-ticket "Template - Ticket"
do_test template-rsc-sets-1 "Template - Resource Sets 1"
do_test template-rsc-sets-2 "Template - Resource Sets 2"
do_test template-rsc-sets-3 "Template - Resource Sets 3"
do_test template-rsc-sets-4 "Template - Resource Sets 4"
echo ""
test_results
diff --git a/pengine/test10/master-demote-2.exp b/pengine/test10/master-demote-2.exp
index 6a8e3aa5b3..09cd987266 100644
--- a/pengine/test10/master-demote-2.exp
+++ b/pengine/test10/master-demote-2.exp
@@ -1,315 +1,315 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1" batch-limit="10" transition_id="0">
<synapse id="0">
<action_set>
- <pseudo_event id="31" operation="running" operation_key="group-1_running_0">
+ <pseudo_event id="30" operation="running" operation_key="group-1_running_0">
<attributes CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="24" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="23" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
<trigger>
- <rsc_op id="26" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="25" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
<trigger>
- <rsc_op id="28" operation="start" operation_key="r192.168.122.107_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="27" operation="start" operation_key="r192.168.122.107_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
<trigger>
- <pseudo_event id="30" operation="start" operation_key="group-1_start_0"/>
+ <pseudo_event id="29" operation="start" operation_key="group-1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="1">
<action_set>
- <pseudo_event id="30" operation="start" operation_key="group-1_start_0">
+ <pseudo_event id="29" operation="start" operation_key="group-1_start_0">
<attributes CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="73" operation="promoted" operation_key="master-1_promoted_0"/>
+ <pseudo_event id="72" operation="promoted" operation_key="master-1_promoted_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="2">
<action_set>
- <rsc_op id="25" operation="monitor" operation_key="r192.168.122.105_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="24" operation="monitor" operation_key="r192.168.122.105_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="r192.168.122.105" long-id="group-1:r192.168.122.105" class="ocf" provider="heartbeat" type="IPaddr"/>
<attributes CRM_meta_interval="5000" CRM_meta_name="monitor" CRM_meta_timeout="60000" cidr_netmask="32" crm_feature_set="3.0.6" ip="192.168.122.105"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="24" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="23" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
</inputs>
</synapse>
<synapse id="3">
<action_set>
- <rsc_op id="24" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="23" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="r192.168.122.105" long-id="group-1:r192.168.122.105" class="ocf" provider="heartbeat" type="IPaddr"/>
<attributes CRM_meta_timeout="60000" cidr_netmask="32" crm_feature_set="3.0.6" ip="192.168.122.105"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="30" operation="start" operation_key="group-1_start_0"/>
+ <pseudo_event id="29" operation="start" operation_key="group-1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="4">
<action_set>
- <rsc_op id="27" operation="monitor" operation_key="r192.168.122.106_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="26" operation="monitor" operation_key="r192.168.122.106_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="r192.168.122.106" long-id="group-1:r192.168.122.106" class="ocf" provider="heartbeat" type="IPaddr"/>
<attributes CRM_meta_interval="5000" CRM_meta_name="monitor" CRM_meta_timeout="60000" cidr_netmask="32" crm_feature_set="3.0.6" ip="192.168.122.106"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="26" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="25" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
</inputs>
</synapse>
<synapse id="5">
<action_set>
- <rsc_op id="26" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="25" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="r192.168.122.106" long-id="group-1:r192.168.122.106" class="ocf" provider="heartbeat" type="IPaddr"/>
<attributes CRM_meta_timeout="60000" cidr_netmask="32" crm_feature_set="3.0.6" ip="192.168.122.106"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="24" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="23" operation="start" operation_key="r192.168.122.105_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
<trigger>
- <pseudo_event id="30" operation="start" operation_key="group-1_start_0"/>
+ <pseudo_event id="29" operation="start" operation_key="group-1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="6">
<action_set>
- <rsc_op id="29" operation="monitor" operation_key="r192.168.122.107_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="28" operation="monitor" operation_key="r192.168.122.107_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="r192.168.122.107" long-id="group-1:r192.168.122.107" class="ocf" provider="heartbeat" type="IPaddr"/>
<attributes CRM_meta_interval="5000" CRM_meta_name="monitor" CRM_meta_timeout="60000" cidr_netmask="32" crm_feature_set="3.0.6" ip="192.168.122.107"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="28" operation="start" operation_key="r192.168.122.107_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="27" operation="start" operation_key="r192.168.122.107_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
</inputs>
</synapse>
<synapse id="7">
<action_set>
- <rsc_op id="28" operation="start" operation_key="r192.168.122.107_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="27" operation="start" operation_key="r192.168.122.107_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="r192.168.122.107" long-id="group-1:r192.168.122.107" class="ocf" provider="heartbeat" type="IPaddr"/>
<attributes CRM_meta_timeout="60000" cidr_netmask="32" crm_feature_set="3.0.6" ip="192.168.122.107"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="26" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="25" operation="start" operation_key="r192.168.122.106_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
<trigger>
- <pseudo_event id="30" operation="start" operation_key="group-1_start_0"/>
+ <pseudo_event id="29" operation="start" operation_key="group-1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="8">
<action_set>
- <rsc_op id="43" operation="monitor" operation_key="lsb-dummy_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="42" operation="monitor" operation_key="lsb-dummy_monitor_5000" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="lsb-dummy" long-id="lsb-dummy" class="lsb" type="/usr/share/pacemaker/tests/cts/LSBDummy"/>
<attributes CRM_meta_interval="5000" CRM_meta_name="monitor" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="42" operation="start" operation_key="lsb-dummy_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="41" operation="start" operation_key="lsb-dummy_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
</inputs>
</synapse>
<synapse id="9">
<action_set>
- <rsc_op id="42" operation="start" operation_key="lsb-dummy_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="41" operation="start" operation_key="lsb-dummy_start_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="lsb-dummy" long-id="lsb-dummy" class="lsb" type="/usr/share/pacemaker/tests/cts/LSBDummy"/>
<attributes CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="31" operation="running" operation_key="group-1_running_0"/>
+ <pseudo_event id="30" operation="running" operation_key="group-1_running_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="10">
<action_set>
- <rsc_op id="61" operation="monitor" operation_key="stateful-1:0_monitor_16000" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="60" operation="monitor" operation_key="stateful-1:0_monitor_16000" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="stateful-1:0" long-id="master-1:stateful-1:0" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_op_target_rc="8" CRM_meta_role="Master" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="60" operation="promote" operation_key="stateful-1:0_promote_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="59" operation="promote" operation_key="stateful-1:0_promote_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
</inputs>
</synapse>
<synapse id="11">
<action_set>
- <rsc_op id="60" operation="promote" operation_key="stateful-1:0_promote_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="59" operation="promote" operation_key="stateful-1:0_promote_0" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="stateful-1:0" long-id="master-1:stateful-1:0" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="6" operation="cancel" operation_key="stateful-1:0_monitor_15000" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="5" operation="cancel" operation_key="stateful-1:0_monitor_15000" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
<trigger>
- <pseudo_event id="72" operation="promote" operation_key="master-1_promote_0"/>
+ <pseudo_event id="71" operation="promote" operation_key="master-1_promote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="12">
<action_set>
- <rsc_op id="6" operation="cancel" operation_key="stateful-1:0_monitor_15000" on_node="pcmk-2" on_node_uuid="pcmk-2">
+ <rsc_op id="5" operation="cancel" operation_key="stateful-1:0_monitor_15000" on_node="pcmk-2" on_node_uuid="pcmk-2">
<primitive id="stateful-1:0" long-id="master-1:stateful-1:0" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_operation="monitor" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="13">
<action_set>
- <rsc_op id="63" operation="monitor" operation_key="stateful-1:1_monitor_15000" on_node="pcmk-1" on_node_uuid="pcmk-1">
+ <rsc_op id="62" operation="monitor" operation_key="stateful-1:1_monitor_15000" on_node="pcmk-1" on_node_uuid="pcmk-1">
<primitive id="stateful-1:1" long-id="master-1:stateful-1:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="62" operation="start" operation_key="stateful-1:1_start_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
+ <rsc_op id="61" operation="start" operation_key="stateful-1:1_start_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
</trigger>
</inputs>
</synapse>
<synapse id="14">
<action_set>
- <rsc_op id="62" operation="start" operation_key="stateful-1:1_start_0" on_node="pcmk-1" on_node_uuid="pcmk-1">
+ <rsc_op id="61" operation="start" operation_key="stateful-1:1_start_0" on_node="pcmk-1" on_node_uuid="pcmk-1">
<primitive id="stateful-1:1" long-id="master-1:stateful-1:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="2" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
+ <rsc_op id="1" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
</trigger>
<trigger>
- <pseudo_event id="68" operation="start" operation_key="master-1_start_0"/>
+ <pseudo_event id="67" operation="start" operation_key="master-1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="15">
<action_set>
- <rsc_op id="2" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1">
+ <rsc_op id="1" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1">
<primitive id="stateful-1:1" long-id="master-1:stateful-1:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="70" operation="stop" operation_key="master-1_stop_0"/>
+ <pseudo_event id="69" operation="stop" operation_key="master-1_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="16" priority="1000000">
<action_set>
- <pseudo_event id="73" operation="promoted" operation_key="master-1_promoted_0">
+ <pseudo_event id="72" operation="promoted" operation_key="master-1_promoted_0">
<attributes CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="60" operation="promote" operation_key="stateful-1:0_promote_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
+ <rsc_op id="59" operation="promote" operation_key="stateful-1:0_promote_0" on_node="pcmk-2" on_node_uuid="pcmk-2"/>
</trigger>
</inputs>
</synapse>
<synapse id="17">
<action_set>
- <pseudo_event id="72" operation="promote" operation_key="master-1_promote_0">
+ <pseudo_event id="71" operation="promote" operation_key="master-1_promote_0">
<attributes CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="69" operation="running" operation_key="master-1_running_0"/>
+ <pseudo_event id="68" operation="running" operation_key="master-1_running_0"/>
</trigger>
<trigger>
- <pseudo_event id="71" operation="stopped" operation_key="master-1_stopped_0"/>
+ <pseudo_event id="70" operation="stopped" operation_key="master-1_stopped_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="18" priority="1000000">
<action_set>
- <pseudo_event id="71" operation="stopped" operation_key="master-1_stopped_0">
+ <pseudo_event id="70" operation="stopped" operation_key="master-1_stopped_0">
<attributes CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="2" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
+ <rsc_op id="1" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
</trigger>
<trigger>
- <pseudo_event id="70" operation="stop" operation_key="master-1_stop_0"/>
+ <pseudo_event id="69" operation="stop" operation_key="master-1_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="19">
<action_set>
- <pseudo_event id="70" operation="stop" operation_key="master-1_stop_0">
+ <pseudo_event id="69" operation="stop" operation_key="master-1_stop_0">
<attributes CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs/>
</synapse>
<synapse id="20" priority="1000000">
<action_set>
- <pseudo_event id="69" operation="running" operation_key="master-1_running_0">
+ <pseudo_event id="68" operation="running" operation_key="master-1_running_0">
<attributes CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="62" operation="start" operation_key="stateful-1:1_start_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
+ <rsc_op id="61" operation="start" operation_key="stateful-1:1_start_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
</trigger>
<trigger>
- <pseudo_event id="68" operation="start" operation_key="master-1_start_0"/>
+ <pseudo_event id="67" operation="start" operation_key="master-1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="21">
<action_set>
- <pseudo_event id="68" operation="start" operation_key="master-1_start_0">
+ <pseudo_event id="67" operation="start" operation_key="master-1_start_0">
<attributes CRM_meta_clone_max="4" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="60000" crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="71" operation="stopped" operation_key="master-1_stopped_0"/>
+ <pseudo_event id="70" operation="stopped" operation_key="master-1_stopped_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="22">
<action_set>
- <pseudo_event id="16" operation="all_stopped" operation_key="all_stopped">
+ <pseudo_event id="15" operation="all_stopped" operation_key="all_stopped">
<attributes crm_feature_set="3.0.6"/>
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="2" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
+ <rsc_op id="1" operation="stop" operation_key="stateful-1:1_stop_0" on_node="pcmk-1" on_node_uuid="pcmk-1"/>
</trigger>
</inputs>
</synapse>
</transition_graph>
diff --git a/pengine/test10/master_monitor_restart.dot b/pengine/test10/master_monitor_restart.dot
new file mode 100644
index 0000000000..3442218dec
--- /dev/null
+++ b/pengine/test10/master_monitor_restart.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"MS_RSC_NATIVE:0_monitor_5000 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/pengine/test10/master_monitor_restart.exp b/pengine/test10/master_monitor_restart.exp
new file mode 100644
index 0000000000..86f75819eb
--- /dev/null
+++ b/pengine/test10/master_monitor_restart.exp
@@ -0,0 +1,12 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="30" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="9" operation="monitor" operation_key="MS_RSC_NATIVE:0_monitor_5000" on_node="node1" on_node_uuid="1031448768">
+ <primitive id="MS_RSC_NATIVE:0" long-id="MS_RSC:MS_RSC_NATIVE:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="5000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_op_target_rc="8" CRM_meta_role="Master" CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
+
diff --git a/pengine/test10/master_monitor_restart.scores b/pengine/test10/master_monitor_restart.scores
new file mode 100644
index 0000000000..ce93606c56
--- /dev/null
+++ b/pengine/test10/master_monitor_restart.scores
@@ -0,0 +1,13 @@
+Allocation scores:
+MS_RSC_NATIVE:0 promotion score on node1: 10
+MS_RSC_NATIVE:1 promotion score on none: 0
+clone_color: MS_RSC allocation score on node1: 0
+clone_color: MS_RSC allocation score on node2: 0
+clone_color: MS_RSC_NATIVE:0 allocation score on node1: 11
+clone_color: MS_RSC_NATIVE:0 allocation score on node2: 0
+clone_color: MS_RSC_NATIVE:1 allocation score on node1: 0
+clone_color: MS_RSC_NATIVE:1 allocation score on node2: 0
+native_color: MS_RSC_NATIVE:0 allocation score on node1: 11
+native_color: MS_RSC_NATIVE:0 allocation score on node2: -INFINITY
+native_color: MS_RSC_NATIVE:1 allocation score on node1: -INFINITY
+native_color: MS_RSC_NATIVE:1 allocation score on node2: -INFINITY
diff --git a/pengine/test10/master_monitor_restart.xml b/pengine/test10/master_monitor_restart.xml
new file mode 100644
index 0000000000..b38cd6596d
--- /dev/null
+++ b/pengine/test10/master_monitor_restart.xml
@@ -0,0 +1,79 @@
+<cib epoch="89" num_updates="8" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="node2" update-client="crm_attribute" cib-last-written="Mon Jul 2 11:40:20 2012" have-quorum="1" dc-uuid="1031448768">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.7-fc03be0"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1340915639"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1048225984" type="normal" uname="node2">
+ <instance_attributes id="nodes-1048225984">
+ <nvpair id="nodes-1048225984-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ <node id="1031448768" type="normal" uname="node1">
+ <instance_attributes id="nodes-1031448768">
+ <nvpair id="nodes-1031448768-standby" name="standby" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <master id="MS_RSC">
+ <meta_attributes id="MS_RSC-meta_attributes">
+ <nvpair id="MS_RSC-meta_attributes-master-max" name="master-max" value="1"/>
+ <nvpair id="MS_RSC-meta_attributes-master-node-max" name="master-node-max" value="1"/>
+ <nvpair id="MS_RSC-meta_attributes-clone-max" name="clone-max" value="2"/>
+ <nvpair id="MS_RSC-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
+ <nvpair id="MS_RSC-meta_attributes-notify" name="notify" value="true"/>
+ </meta_attributes>
+ <primitive class="ocf" id="MS_RSC_NATIVE" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="drbd_pgdrive-monitor-4" interval="4s" name="monitor" role="Slave"/>
+ <op id="drbd_pgdrive-monitor-5" interval="5s" name="monitor" role="Master"/>
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+ <constraints/>
+ </configuration>
+ <status>
+ <node_state id="1031448768" uname="node1" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="do_update_resource" shutdown="0">
+ <transient_attributes id="1031448768">
+ <instance_attributes id="status-1031448768">
+ <nvpair id="status-1031448768-probe_complete" name="probe_complete" value="true"/>
+ <nvpair id="status-1031448768-master-MS_RSC_NATIVE.0" name="master-MS_RSC_NATIVE:0" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1031448768">
+ <lrm_resources>
+ <lrm_resource id="MS_RSC_NATIVE:0" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="MS_RSC_NATIVE:0_last_0" operation_key="MS_RSC_NATIVE:0_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.0.6" transition-key="7:27:0:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" transition-magic="0:0;7:27:0:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" call-id="135" rc-code="0" op-status="0" interval="0" last-run="1341247220" last-rc-change="0" exec-time="78" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="MS_RSC_NATIVE:0_monitor_5000" operation_key="MS_RSC_NATIVE:0_monitor_5000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.6" transition-key="9:16:8:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" transition-magic="0:8;9:16:8:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" call-id="82" rc-code="8" op-status="0" interval="5000" last-rc-change="1341247063" exec-time="33" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="1048225984" uname="node2" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="do_update_resource" shutdown="0">
+ <transient_attributes id="1048225984">
+ <instance_attributes id="status-1048225984">
+ <nvpair id="status-1048225984-probe_complete" name="probe_complete" value="true"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1048225984">
+ <lrm_resources>
+ <lrm_resource id="MS_RSC_NATIVE:0" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="MS_RSC_NATIVE:0_last_0" operation_key="MS_RSC_NATIVE:0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.6" transition-key="5:3:7:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" transition-magic="0:7;5:3:7:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1341246675" last-rc-change="1341246675" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="MS_RSC_NATIVE:1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="MS_RSC_NATIVE:1_last_0" operation_key="MS_RSC_NATIVE:1_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.6" transition-key="10:26:0:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" transition-magic="0:0;10:26:0:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" call-id="126" rc-code="0" op-status="0" interval="0" last-run="1341247219" last-rc-change="0" exec-time="109" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="MS_RSC_NATIVE:1_monitor_5000" operation_key="MS_RSC_NATIVE:1_monitor_5000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.6" transition-key="9:22:8:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" transition-magic="0:8;9:22:8:316d1226-9a84-4153-b3a9-4cc2eafaaf5c" call-id="103" rc-code="8" op-status="0" interval="5000" last-rc-change="1341247145" exec-time="13" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
\ No newline at end of file

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 7:28 AM (1 d, 4 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018443
Default Alt Text
(154 KB)

Event Timeline