diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c index 8527e2c71c..d2df90f91e 100644 --- a/crmd/te_callbacks.c +++ b/crmd/te_callbacks.c @@ -1,508 +1,530 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include /* For ONLINESTATUS etc */ void te_update_confirm(const char *event, xmlNode *msg); extern char *te_uuid; gboolean shuttingdown = FALSE; crm_graph_t *transition_graph; crm_trigger_t *transition_trigger = NULL; /* #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */ #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']" static const char *get_node_id(xmlNode *rsc_op) { xmlNode *node = rsc_op; while(node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); return ID(node); } static void process_resource_updates(xmlXPathObject *xpathObj) { /* */ int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } } void te_update_diff(const char *event, xmlNode *msg) { int rc = -1; const char *op = NULL; xmlNode *diff = NULL; xmlXPathObject *xpathObj = NULL; int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; CRM_CHECK(msg != NULL, return); crm_element_value_int(msg, F_CIB_RC, &rc); if(transition_graph == NULL) { crm_debug_3("No graph"); return; } else if(rc < cib_ok) { crm_debug_3("Filter rc=%d (%s)", rc, cib_error2string(rc)); return; } else if(transition_graph->complete == TRUE && fsa_state != S_IDLE && fsa_state != S_TRANSITION_ENGINE && fsa_state != S_POLICY_ENGINE) { crm_debug_2("Filter state=%s, complete=%d", fsa_state2string(fsa_state), transition_graph->complete); return; } op = crm_element_value(msg, F_CIB_OPERATION); diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); cib_diff_version_details( diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_debug("Processing diff (%s): %d.%d.%d -> %d.%d.%d (%s)", op, diff_del_admin_epoch,diff_del_epoch,diff_del_updates, diff_add_admin_epoch,diff_add_epoch,diff_add_updates, fsa_state2string(fsa_state)); log_cib_diff(LOG_DEBUG_2, diff, op); if(cib_config_changed(NULL, NULL, &diff)) { abort_transition(INFINITY, tg_restart, "Non-status change", diff); goto bail; /* configuration changed */ } + /* Tickets Attributes - Added/Updated */ + xpathObj = xpath_search(diff,"//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_CIB_TAG_TICKETS); + if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { + xmlNode *aborted = getXpathResult(xpathObj, 0); + abort_transition(INFINITY, tg_restart, "Ticket attribute: update", aborted); + goto bail; + + } else if(xpathObj) { + xmlXPathFreeObject(xpathObj); + } + + /* Tickets Attributes - Removed */ + xpathObj = xpath_search(diff,"//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_REMOVED"//"XML_CIB_TAG_TICKETS); + if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { + xmlNode *aborted = getXpathResult(xpathObj, 0); + abort_transition(INFINITY, tg_restart, "Ticket attribute: removal", aborted); + goto bail; + + } else if(xpathObj) { + xmlXPathFreeObject(xpathObj); + } + /* Transient Attributes - Added/Updated */ xpathObj = xpath_search(diff,"//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_TAG_TRANSIENT_NODEATTRS"//"XML_CIB_TAG_NVPAIR); if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { int lpc; for(lpc = 0; lpc < xpathObj->nodesetval->nodeNr; lpc++) { xmlNode *attr = getXpathResult(xpathObj, lpc); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = NULL; if(safe_str_eq(CRM_OP_PROBED, name)) { value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); } if(crm_is_true(value) == FALSE) { abort_transition(INFINITY, tg_restart, "Transient attribute: update", attr); crm_log_xml_trace(attr, "Abort"); goto bail; } } } else if(xpathObj) { xmlXPathFreeObject(xpathObj); } /* Transient Attributes - Removed */ xpathObj = xpath_search(diff,"//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_REMOVED"//"XML_TAG_TRANSIENT_NODEATTRS); if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Transient attribute: removal", aborted); goto bail; } else if(xpathObj) { xmlXPathFreeObject(xpathObj); } /* Check for node state updates... possibly from a shutdown we requested */ xpathObj = xpath_search(diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_CIB_TAG_STATE); if(xpathObj) { int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { xmlNode *node = getXpathResult(xpathObj, lpc); const char *event_node = crm_element_value(node, XML_ATTR_ID); const char *ccm_state = crm_element_value(node, XML_CIB_ATTR_INCCM); const char *ha_state = crm_element_value(node, XML_CIB_ATTR_HASTATE); const char *shutdown_s = crm_element_value(node, XML_CIB_ATTR_SHUTDOWN); const char *crmd_state = crm_element_value(node, XML_CIB_ATTR_CRMDSTATE); if(safe_str_eq(ccm_state, XML_BOOLEAN_FALSE) || safe_str_eq(ha_state, DEADSTATUS) || safe_str_eq(crmd_state, CRMD_JOINSTATE_DOWN)) { crm_action_t *shutdown = match_down_event(0, event_node, NULL); if(shutdown != NULL) { const char *task = crm_element_value(shutdown->xml, XML_LRM_ATTR_TASK); if(safe_str_neq(task, CRM_OP_FENCE)) { /* Wait for stonithd to tell us it is complete via tengine_stonith_callback() */ crm_debug("Confirming %s op %d", task, shutdown->id); /* match->confirmed = TRUE; */ stop_te_timer(shutdown->timer); update_graph(transition_graph, shutdown); trigger_graph(); } } else { crm_info("Stonith/shutdown of %s not matched", event_node); abort_transition(INFINITY, tg_restart, "Node failure", node); } fail_incompletable_actions(transition_graph, event_node); } if(shutdown_s) { int shutdown = crm_parse_int(shutdown_s, NULL); if(shutdown > 0) { crm_info("Aborting on "XML_CIB_ATTR_SHUTDOWN" attribute for %s", event_node); abort_transition(INFINITY, tg_restart, "Shutdown request", node); } } } xmlXPathFreeObject(xpathObj); } /* * Check for and fast-track the processing of LRM refreshes * In large clusters this can result in _huge_ speedups * * Unfortunately we can only do so when there are no pending actions * Otherwise we could miss updates we're waiting for and stall * */ xpathObj = NULL; if(transition_graph->pending == 0) { xpathObj = xpath_search(diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_LRM_TAG_RESOURCE); } if(xpathObj) { int updates = xpathObj->nodesetval->nodeNr; if(updates > 1) { /* Updates by, or in response to, TE actions will never contain updates * for more than one resource at a time */ crm_info("Detected LRM refresh - %d resources updated: Skipping all resource events", updates); abort_transition(INFINITY, tg_restart, "LRM Refresh", diff); goto bail; } xmlXPathFreeObject(xpathObj); } /* Process operation updates */ xpathObj = xpath_search(diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_LRM_TAG_RSC_OP); if(xpathObj) { process_resource_updates(xpathObj); xmlXPathFreeObject(xpathObj); } /* Detect deleted (as opposed to replaced or added) actions - eg. crm_resource -C */ xpathObj = xpath_search(diff, "//"XML_TAG_DIFF_REMOVED"//"XML_LRM_TAG_RSC_OP); if(xpathObj) { int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { int max = 0; const char *op_id = NULL; char *rsc_op_xpath = NULL; xmlXPathObject *op_match = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); op_id = ID(match); max = strlen(rsc_op_template) + strlen(op_id) + 1; crm_malloc0(rsc_op_xpath, max); snprintf(rsc_op_xpath, max, rsc_op_template, op_id); op_match = xpath_search(diff, rsc_op_xpath); if(op_match == NULL || op_match->nodesetval->nodeNr == 0) { /* Prevent false positives by matching cancelations too */ const char *node = get_node_id(match); crm_action_t *cancelled = get_cancel_action(op_id, node); if(cancelled == NULL) { crm_debug("No match for deleted action %s (%s on %s)", rsc_op_xpath, op_id, node); abort_transition(INFINITY, tg_restart, "Resource op removal", match); if(op_match) { xmlXPathFreeObject(op_match); } crm_free(rsc_op_xpath); goto bail; } else { crm_debug("Deleted lrm_rsc_op %s on %s was for graph event %d", op_id, node, cancelled->id); } } if(op_match) { xmlXPathFreeObject(op_match); } crm_free(rsc_op_xpath); } } bail: if(xpathObj) { xmlXPathFreeObject(xpathObj); } } gboolean process_te_message(xmlNode *msg, xmlNode *xml_data) { const char *from = crm_element_value(msg, F_ORIG); const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); const char *ref = crm_element_value(msg, XML_ATTR_REFERENCE); const char *op = crm_element_value(msg, F_CRM_TASK); const char *type = crm_element_value(msg, F_CRM_MSG_TYPE); crm_debug_2("Processing %s (%s) message", op, ref); crm_log_xml(LOG_DEBUG_3, "ipc", msg); if(op == NULL){ /* error */ } else if(sys_to == NULL || strcasecmp(sys_to, CRM_SYSTEM_TENGINE) != 0) { crm_debug_2("Bad sys-to %s", crm_str(sys_to)); return FALSE; } else if(safe_str_eq(op, CRM_OP_INVOKE_LRM) && safe_str_eq(sys_from, CRM_SYSTEM_LRMD) /* && safe_str_eq(type, XML_ATTR_RESPONSE) */ ){ xmlXPathObject *xpathObj = NULL; crm_log_xml(LOG_DEBUG_2, "Processing (N)ACK", msg); crm_info("Processing (N)ACK %s from %s", crm_element_value(msg, XML_ATTR_REFERENCE), from); xpathObj = xpath_search(xml_data, "//"XML_LRM_TAG_RSC_OP); if(xpathObj) { process_resource_updates(xpathObj); xmlXPathFreeObject(xpathObj); xpathObj = NULL; } else { crm_log_xml(LOG_ERR, "Invalid (N)ACK", msg); return FALSE; } } else { crm_err("Unknown command: %s::%s from %s", type, op, sys_from); } crm_debug_3("finished processing message"); return TRUE; } void tengine_stonith_callback( stonith_t *stonith, const xmlNode *msg, int call_id, int rc, xmlNode *output, void *userdata) { char *uuid = NULL; int target_rc = -1; int stonith_id = -1; int transition_id = -1; crm_action_t *action = NULL; CRM_CHECK(userdata != NULL, return); crm_log_xml_info(output, "StonithOp"); crm_info("Stonith operation %d/%s: %s (%d)", call_id, (char*)userdata, stonith_error2string(rc), rc); if(AM_I_DC == FALSE) { return; } /* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */ /* op->call_id, op->optype, op->node_name, op->op_result, */ /* (char *)op->node_list, op->private_data); */ /* filter out old STONITH actions */ CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, &target_rc), crm_err("Invalid event detected"); goto bail; ); if(transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid) || transition_graph->id != transition_id) { crm_info("Ignoring STONITH action initiated outside" " of the current transition"); goto bail; } /* this will mark the event complete if a match is found */ action = get_action(stonith_id, TRUE); if(action == NULL) { crm_err("Stonith action not matched"); goto bail; } if(rc == stonith_ok) { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); crm_info("Stonith of %s passed", target); send_stonith_update(action, target, uuid); } else { const char *target = crm_element_value_const(action->xml, XML_LRM_ATTR_TARGET); const char *allow_fail = crm_meta_value(action->params, XML_ATTR_TE_ALLOWFAIL); action->failed = TRUE; if(crm_is_true(allow_fail) == FALSE) { crm_err("Stonith of %s failed (%d)... aborting transition.", target, rc); abort_transition(INFINITY, tg_restart, "Stonith failed", NULL); } } update_graph(transition_graph, action); trigger_graph(); bail: crm_free(userdata); crm_free(uuid); return; } void cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if(rc < cib_ok) { crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char*)user_data, cib_error2string(rc), rc); crm_log_xml_warn(msg, "Failed update"); abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char*)user_data); } crm_free(user_data); } void cib_action_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if(rc < cib_ok) { crm_err("Update %d FAILED: %s", call_id, cib_error2string(rc)); } } void cib_failcount_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if(rc < cib_ok) { crm_err("Update %d FAILED: %s", call_id, cib_error2string(rc)); } } gboolean action_timer_callback(gpointer data) { crm_action_timer_t *timer = NULL; CRM_CHECK(data != NULL, return FALSE); timer = (crm_action_timer_t*)data; stop_te_timer(timer); crm_warn("Timer popped (timeout=%d, abort_level=%d, complete=%s)", timer->timeout, transition_graph->abort_priority, transition_graph->complete?"true":"false"); CRM_CHECK(timer->action != NULL, return FALSE); if(transition_graph->complete) { crm_warn("Ignoring timeout while not in transition"); } else if(timer->reason == timeout_action_warn) { print_action( LOG_WARNING,"Action missed its timeout: ", timer->action); /* Don't check the FSA state * * We might also be in S_INTEGRATION or some other state waiting for this * action so we can close the transition and continue */ } else { /* fail the action */ gboolean send_update = TRUE; const char *task = crm_element_value(timer->action->xml, XML_LRM_ATTR_TASK); print_action(LOG_ERR, "Aborting transition, action lost: ", timer->action); timer->action->failed = TRUE; timer->action->confirmed = TRUE; abort_transition(INFINITY, tg_restart, "Action lost", NULL); update_graph(transition_graph, timer->action); trigger_graph(); if(timer->action->type != action_type_rsc) { send_update = FALSE; } else if(safe_str_eq(task, "cancel")) { /* we dont need to update the CIB with these */ send_update = FALSE; } if(send_update) { /* cib_action_update(timer->action, LRM_OP_PENDING, EXECRA_STATUS_UNKNOWN); */ cib_action_update(timer->action, LRM_OP_TIMEOUT, EXECRA_UNKNOWN_ERROR); } } return FALSE; } diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h index 56a17af795..4308e7eaf6 100644 --- a/include/crm/msg_xml.h +++ b/include/crm/msg_xml.h @@ -1,303 +1,309 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef XML_TAGS__H #define XML_TAGS__H #define CIB_OPTIONS_FIRST "cib-bootstrap-options" #define F_CRM_DATA "crm_xml" #define F_CRM_TASK "crm_task" #define F_CRM_HOST_TO "crm_host_to" #define F_CRM_MSG_TYPE F_SUBTYPE #define F_CRM_SYS_TO "crm_sys_to" #define F_CRM_SYS_FROM "crm_sys_from" #define F_CRM_HOST_FROM F_ORIG #define F_CRM_REFERENCE XML_ATTR_REFERENCE #define F_CRM_VERSION XML_ATTR_VERSION #define F_CRM_ORIGIN "origin" #define F_CRM_USER "crm_user" #define F_CRM_JOIN_ID "join_id" #define F_CRM_ELECTION_ID "election-id" #define F_CRM_ELECTION_AGE_S "election-age-sec" #define F_CRM_ELECTION_AGE_US "election-age-nano-sec" #define F_CRM_ELECTION_OWNER "election-owner" #define F_CRM_TGRAPH "crm-tgraph" #define F_CRM_TGRAPH_INPUT "crm-tgraph-in" /*---- Common tags/attrs */ #define XML_DIFF_MARKER "__crm_diff_marker__" #define XML_ATTR_TAGNAME F_XML_TAGNAME #define XML_TAG_CIB "cib" #define XML_TAG_FAILED "failed" #define XML_ATTR_CRM_VERSION "crm_feature_set" #define XML_ATTR_DIGEST "digest" #define XML_ATTR_VALIDATION "validate-with" #define XML_ATTR_QUORUM_PANIC "no-quorum-panic" #define XML_ATTR_HAVE_QUORUM "have-quorum" #define XML_ATTR_EXPECTED_VOTES "expected-quorum-votes" #define XML_ATTR_GENERATION "epoch" #define XML_ATTR_GENERATION_ADMIN "admin_epoch" #define XML_ATTR_NUMUPDATES "num_updates" #define XML_ATTR_TIMEOUT "timeout" #define XML_ATTR_ORIGIN "crm-debug-origin" #define XML_ATTR_TSTAMP "crm-timestamp" #define XML_CIB_ATTR_WRITTEN "cib-last-written" #define XML_ATTR_VERSION "version" #define XML_ATTR_DESC "description" #define XML_ATTR_ID "id" #define XML_ATTR_IDREF "id-ref" #define XML_ATTR_ID_LONG "long-id" #define XML_ATTR_TYPE "type" #define XML_ATTR_FILTER_TYPE "type-filter" #define XML_ATTR_FILTER_ID "id-filter" #define XML_ATTR_FILTER_PRIORITY "priority-filter" #define XML_ATTR_VERBOSE "verbose" #define XML_ATTR_OP "op" #define XML_ATTR_DC "is_dc" #define XML_ATTR_DC_UUID "dc-uuid" #define XML_ATTR_UPDATE_ORIG "update-origin" #define XML_ATTR_UPDATE_CLIENT "update-client" #define XML_ATTR_UPDATE_USER "update-user" #define XML_BOOLEAN_TRUE "true" #define XML_BOOLEAN_FALSE "false" #define XML_BOOLEAN_YES XML_BOOLEAN_TRUE #define XML_BOOLEAN_NO XML_BOOLEAN_FALSE #define XML_TAG_OPTIONS "options" /*---- top level tags/attrs */ #define XML_MSG_TAG "crm_message" #define XML_MSG_TAG_DATA "msg_data" #define XML_ATTR_REQUEST "request" #define XML_ATTR_RESPONSE "response" #define XML_ATTR_UNAME "uname" #define XML_ATTR_UUID "id" #define XML_ATTR_REFERENCE "reference" #define XML_FAIL_TAG_RESOURCE "failed_resource" #define XML_FAILRES_ATTR_RESID "resource_id" #define XML_FAILRES_ATTR_REASON "reason" #define XML_FAILRES_ATTR_RESSTATUS "resource_status" #define XML_CRM_TAG_PING "ping_response" #define XML_PING_ATTR_STATUS "result" #define XML_PING_ATTR_SYSFROM "crm_subsystem" #define XML_TAG_FRAGMENT "cib_fragment" #define XML_ATTR_RESULT "result" #define XML_ATTR_SECTION "section" #define XML_FAIL_TAG_CIB "failed_update" #define XML_FAILCIB_ATTR_ID "id" #define XML_FAILCIB_ATTR_OBJTYPE "object_type" #define XML_FAILCIB_ATTR_OP "operation" #define XML_FAILCIB_ATTR_REASON "reason" /*---- CIB specific tags/attrs */ #define XML_CIB_TAG_SECTION_ALL "all" #define XML_CIB_TAG_CONFIGURATION "configuration" #define XML_CIB_TAG_STATUS "status" #define XML_CIB_TAG_RESOURCES "resources" #define XML_CIB_TAG_NODES "nodes" #define XML_CIB_TAG_DOMAINS "domains" #define XML_CIB_TAG_CONSTRAINTS "constraints" #define XML_CIB_TAG_CRMCONFIG "crm_config" #define XML_CIB_TAG_OPCONFIG "op_defaults" #define XML_CIB_TAG_RSCCONFIG "rsc_defaults" #define XML_CIB_TAG_ACLS "acls" #define XML_CIB_TAG_STATE "node_state" #define XML_CIB_TAG_NODE "node" #define XML_CIB_TAG_DOMAIN "domain" #define XML_CIB_TAG_CONSTRAINT "constraint" #define XML_CIB_TAG_NVPAIR "nvpair" #define XML_CIB_TAG_PROPSET "cluster_property_set" #define XML_TAG_ATTR_SETS "instance_attributes" #define XML_TAG_META_SETS "meta_attributes" #define XML_TAG_ATTRS "attributes" #define XML_TAG_PARAMS "parameters" #define XML_TAG_PARAM "param" #define XML_TAG_UTILIZATION "utilization" #define XML_TAG_RESOURCE_REF "resource_ref" #define XML_CIB_TAG_RESOURCE "primitive" #define XML_CIB_TAG_GROUP "group" #define XML_CIB_TAG_INCARNATION "clone" #define XML_CIB_TAG_MASTER "master" #define XML_RSC_ATTR_RESTART "restart-type" #define XML_RSC_ATTR_ORDERED "ordered" #define XML_RSC_ATTR_INTERLEAVE "interleave" #define XML_RSC_ATTR_INCARNATION "clone" #define XML_RSC_ATTR_INCARNATION_MAX "clone-max" #define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max" #define XML_RSC_ATTR_MASTER_MAX "master-max" #define XML_RSC_ATTR_MASTER_NODEMAX "master-node-max" #define XML_RSC_ATTR_STATE "clone-state" #define XML_RSC_ATTR_MANAGED "is-managed" #define XML_RSC_ATTR_TARGET_ROLE "target-role" #define XML_RSC_ATTR_UNIQUE "globally-unique" #define XML_RSC_ATTR_NOTIFY "notify" #define XML_RSC_ATTR_STICKINESS "resource-stickiness" #define XML_RSC_ATTR_FAIL_STICKINESS "migration-threshold" #define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout" #define XML_RSC_ATTR_MULTIPLE "multiple-active" #define XML_RSC_ATTR_PRIORITY "priority" #define XML_OP_ATTR_ON_FAIL "on-fail" #define XML_OP_ATTR_START_DELAY "start-delay" #define XML_OP_ATTR_ALLOW_MIGRATE "allow-migrate" #define XML_OP_ATTR_ORIGIN "interval-origin" #define XML_OP_ATTR_PENDING "record-pending" #define XML_CIB_TAG_LRM "lrm" #define XML_LRM_TAG_RESOURCES "lrm_resources" #define XML_LRM_TAG_RESOURCE "lrm_resource" #define XML_LRM_TAG_AGENTS "lrm_agents" #define XML_LRM_TAG_AGENT "lrm_agent" #define XML_LRM_TAG_RSC_OP "lrm_rsc_op" #define XML_AGENT_ATTR_CLASS "class" #define XML_AGENT_ATTR_PROVIDER "provider" #define XML_LRM_TAG_ATTRIBUTES "attributes" #define XML_CIB_ATTR_REPLACE "replace" #define XML_CIB_ATTR_SOURCE "source" #define XML_CIB_ATTR_HEALTH "health" #define XML_CIB_ATTR_WEIGHT "weight" #define XML_CIB_ATTR_PRIORITY "priority" #define XML_CIB_ATTR_CLEAR "clear_on" #define XML_CIB_ATTR_SOURCE "source" #define XML_CIB_ATTR_JOINSTATE "join" #define XML_CIB_ATTR_EXPSTATE "expected" #define XML_CIB_ATTR_INCCM "in_ccm" #define XML_CIB_ATTR_CRMDSTATE "crmd" #define XML_CIB_ATTR_HASTATE "ha" #define XML_CIB_ATTR_SHUTDOWN "shutdown" #define XML_CIB_ATTR_STONITH "stonith" #define XML_LRM_ATTR_INTERVAL "interval" #define XML_LRM_ATTR_TASK "operation" #define XML_LRM_ATTR_TASK_KEY "operation_key" #define XML_LRM_ATTR_TARGET "on_node" #define XML_LRM_ATTR_TARGET_UUID "on_node_uuid" #define XML_LRM_ATTR_RSCID "rsc-id" #define XML_LRM_ATTR_OPSTATUS "op-status" #define XML_LRM_ATTR_RC "rc-code" #define XML_LRM_ATTR_CALLID "call-id" #define XML_LRM_ATTR_OP_DIGEST "op-digest" #define XML_LRM_ATTR_OP_RESTART "op-force-restart" #define XML_LRM_ATTR_RESTART_DIGEST "op-restart-digest" #define XML_LRM_ATTR_MIGRATE_SOURCE "migrate_source" #define XML_LRM_ATTR_MIGRATE_TARGET "migrate_target" #define XML_TAG_GRAPH "transition_graph" #define XML_GRAPH_TAG_RSC_OP "rsc_op" #define XML_GRAPH_TAG_PSEUDO_EVENT "pseudo_event" #define XML_GRAPH_TAG_CRM_EVENT "crm_event" #define XML_TAG_RULE "rule" #define XML_RULE_ATTR_SCORE "score" #define XML_RULE_ATTR_SCORE_ATTRIBUTE "score-attribute" #define XML_RULE_ATTR_SCORE_MANGLED "score-attribute-mangled" #define XML_RULE_ATTR_ROLE "role" #define XML_RULE_ATTR_RESULT "result" #define XML_RULE_ATTR_BOOLEAN_OP "boolean-op" #define XML_TAG_EXPRESSION "expression" #define XML_EXPR_ATTR_ATTRIBUTE "attribute" #define XML_EXPR_ATTR_OPERATION "operation" #define XML_EXPR_ATTR_VALUE "value" #define XML_EXPR_ATTR_TYPE "type" #define XML_CONS_TAG_RSC_DEPEND "rsc_colocation" #define XML_CONS_TAG_RSC_ORDER "rsc_order" #define XML_CONS_TAG_RSC_LOCATION "rsc_location" +#define XML_CONS_TAG_RSC_TICKET "rsc_ticket" #define XML_CONS_TAG_RSC_SET "resource_set" #define XML_CONS_ATTR_SYMMETRICAL "symmetrical" #define XML_COLOC_ATTR_SOURCE "rsc" #define XML_COLOC_ATTR_SOURCE_ROLE "rsc-role" #define XML_COLOC_ATTR_TARGET "with-rsc" #define XML_COLOC_ATTR_TARGET_ROLE "with-rsc-role" #define XML_COLOC_ATTR_NODE_ATTR "node-attribute" #define XML_COLOC_ATTR_SOURCE_INSTANCE "rsc-instance" #define XML_COLOC_ATTR_TARGET_INSTANCE "with-rsc-instance" #define XML_ORDER_ATTR_FIRST "first" #define XML_ORDER_ATTR_THEN "then" #define XML_ORDER_ATTR_FIRST_ACTION "first-action" #define XML_ORDER_ATTR_THEN_ACTION "then-action" #define XML_ORDER_ATTR_FIRST_INSTANCE "first-instance" #define XML_ORDER_ATTR_THEN_INSTANCE "then-instance" #define XML_ORDER_ATTR_KIND "kind" +#define XML_TICKET_ATTR_TICKET "ticket" +#define XML_TICKET_ATTR_LOSS_POLICY "loss-policy" + #define XML_NVPAIR_ATTR_NAME "name" #define XML_NVPAIR_ATTR_VALUE "value" #define XML_NODE_ATTR_STATE "state" #define XML_CONFIG_ATTR_DC_DEADTIME "dc-deadtime" #define XML_CONFIG_ATTR_ELECTION_FAIL "election-timeout" #define XML_CONFIG_ATTR_FORCE_QUIT "shutdown-escalation" #define XML_CONFIG_ATTR_RECHECK "cluster-recheck-interval" #define XML_CIB_TAG_GENERATION_TUPPLE "generation_tuple" #define XML_ATTR_TRANSITION_MAGIC "transition-magic" #define XML_ATTR_TRANSITION_KEY "transition-key" #define XML_ATTR_TE_NOWAIT "op_no_wait" #define XML_ATTR_TE_TARGET_RC "op_target_rc" #define XML_ATTR_TE_ALLOWFAIL "op_allow_fail" #define XML_ATTR_LRM_PROBE "lrm-is-probe" #define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes" #define XML_TAG_DIFF_ADDED "diff-added" #define XML_TAG_DIFF_REMOVED "diff-removed" #define XML_ACL_TAG_USER "acl_user" #define XML_ACL_TAG_ROLE "acl_role" #define XML_ACL_TAG_ROLE_REF "role_ref" #define XML_ACL_TAG_READ "read" #define XML_ACL_TAG_WRITE "write" #define XML_ACL_TAG_DENY "deny" #define XML_ACL_ATTR_REF "ref" #define XML_ACL_ATTR_TAG "tag" #define XML_ACL_ATTR_XPATH "xpath" #define XML_ACL_ATTR_ATTRIBUTE "attribute" +#define XML_CIB_TAG_TICKETS "tickets" + #include #define ID(x) crm_element_value(x, XML_ATTR_ID) #define INSTANCE(x) crm_element_value(x, XML_CIB_ATTR_INSTANCE) #define TSTAMP(x) crm_element_value(x, XML_ATTR_TSTAMP) #define TYPE(x) crm_element_name(x) #define NAME(x) crm_element_value(x, XML_NVPAIR_ATTR_NAME) #define VALUE(x) crm_element_value(x, XML_NVPAIR_ATTR_VALUE) #endif diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h index 355d494d9b..13cbec113e 100644 --- a/include/crm/pengine/status.h +++ b/include/crm/pengine/status.h @@ -1,299 +1,309 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef PENGINE_STATUS__H #define PENGINE_STATUS__H #include #include #include typedef struct node_s node_t; typedef struct pe_action_s action_t; typedef struct pe_action_s pe_action_t; typedef struct resource_s resource_t; +typedef struct ticket_s ticket_t; typedef enum no_quorum_policy_e { no_quorum_freeze, no_quorum_stop, no_quorum_ignore, no_quorum_suicide } no_quorum_policy_t; enum node_type { node_ping, node_member }; enum pe_restart { pe_restart_restart, pe_restart_ignore }; enum pe_find { pe_find_renamed = 0x001, pe_find_partial = 0x002, pe_find_clone = 0x004, pe_find_current = 0x008, pe_find_inactive = 0x010, }; #define pe_flag_have_quorum 0x00000001ULL #define pe_flag_symmetric_cluster 0x00000002ULL #define pe_flag_is_managed_default 0x00000004ULL #define pe_flag_maintenance_mode 0x00000008ULL #define pe_flag_stonith_enabled 0x00000010ULL #define pe_flag_have_stonith_resource 0x00000020ULL #define pe_flag_stop_rsc_orphans 0x00000100ULL #define pe_flag_stop_action_orphans 0x00000200ULL #define pe_flag_stop_everything 0x00000400ULL #define pe_flag_start_failure_fatal 0x00001000ULL #define pe_flag_remove_after_stop 0x00002000ULL #define pe_flag_startup_probes 0x00010000ULL #define pe_flag_have_status 0x00020000ULL typedef struct pe_working_set_s { xmlNode *input; ha_time_t *now; /* options extracted from the input */ char *dc_uuid; node_t *dc_node; const char *stonith_action; const char *placement_strategy; unsigned long long flags; int stonith_timeout; int default_resource_stickiness; no_quorum_policy_t no_quorum_policy; GHashTable *config_hash; GHashTable *domains; + GHashTable *tickets; GListPtr nodes; GListPtr resources; GListPtr placement_constraints; GListPtr ordering_constraints; GListPtr colocation_constraints; + GListPtr ticket_constraints; GListPtr actions; xmlNode *failed; xmlNode *op_defaults; xmlNode *rsc_defaults; /* stats */ int num_synapse; int max_valid_nodes; int order_id; int action_id; /* final output */ xmlNode *graph; } pe_working_set_t; struct node_shared_s { const char *id; const char *uname; gboolean online; gboolean standby; gboolean standby_onfail; gboolean pending; gboolean unclean; gboolean shutdown; gboolean expected_up; gboolean is_dc; int num_resources; GListPtr running_rsc; /* resource_t* */ GListPtr allocated_rsc; /* resource_t* */ GHashTable *attrs; /* char* => char* */ enum node_type type; GHashTable *utilization; }; struct node_s { int weight; gboolean fixed; int count; struct node_shared_s *details; }; #include #define pe_rsc_orphan 0x00000001ULL #define pe_rsc_managed 0x00000002ULL #define pe_rsc_notify 0x00000010ULL #define pe_rsc_unique 0x00000020ULL #define pe_rsc_can_migrate 0x00000040ULL #define pe_rsc_provisional 0x00000100ULL #define pe_rsc_allocating 0x00000200ULL #define pe_rsc_merging 0x00000400ULL #define pe_rsc_failed 0x00010000ULL #define pe_rsc_shutdown 0x00020000ULL #define pe_rsc_runnable 0x00040000ULL #define pe_rsc_start_pending 0x00080000ULL #define pe_rsc_starting 0x00100000ULL #define pe_rsc_stopping 0x00200000ULL #define pe_rsc_failure_ignored 0x01000000ULL enum pe_graph_flags { pe_graph_none = 0x00000, pe_graph_updated_first = 0x00001, pe_graph_updated_then = 0x00002, pe_graph_disable = 0x00004, }; enum pe_action_flags { pe_action_pseudo = 0x00001, pe_action_runnable = 0x00002, pe_action_optional = 0x00004, pe_action_print_always = 0x00008, pe_action_have_node_attrs = 0x00010, pe_action_failure_is_fatal = 0x00020, pe_action_implied_by_stonith = 0x00040, pe_action_allow_reload_conversion = 0x00080, pe_action_dumped = 0x00100, pe_action_processed = 0x00200, pe_action_clear = 0x00400, pe_action_dangle = 0x00800, }; struct resource_s { char *id; char *clone_name; char *long_name; xmlNode *xml; xmlNode *ops_xml; resource_t *parent; void *variant_opaque; enum pe_obj_types variant; resource_object_functions_t *fns; resource_alloc_functions_t *cmds; enum rsc_recovery_type recovery_type; enum pe_restart restart_type; int priority; int stickiness; int sort_index; int failure_timeout; int effective_priority; int migration_threshold; unsigned long long flags; GListPtr rsc_cons_lhs; /* rsc_colocation_t* */ GListPtr rsc_cons; /* rsc_colocation_t* */ GListPtr rsc_location; /* rsc_to_node_t* */ GListPtr actions; /* action_t* */ + GListPtr rsc_tickets; /* rsc_ticket* */ node_t *allocated_to; GListPtr running_on; /* node_t* */ GHashTable *known_on; /* node_t* */ GHashTable *allowed_nodes; /* node_t* */ enum rsc_role_e role; enum rsc_role_e next_role; GHashTable *meta; GHashTable *parameters; GHashTable *utilization; GListPtr children; /* resource_t* */ GListPtr dangling_migrations; /* node_t* */ }; struct pe_action_s { int id; int priority; resource_t *rsc; node_t *node; xmlNode *op_entry; char *task; char *uuid; enum pe_action_flags flags; enum rsc_start_requirement needs; enum action_fail_response on_fail; enum rsc_role_e fail_role; action_t *pre_notify; action_t *pre_notified; action_t *post_notify; action_t *post_notified; int seen_count; GHashTable *meta; GHashTable *extra; GListPtr actions_before; /* action_warpper_t* */ GListPtr actions_after; /* action_warpper_t* */ }; typedef struct notify_data_s { GHashTable *keys; const char *action; action_t *pre; action_t *post; action_t *pre_done; action_t *post_done; GListPtr active; /* notify_entry_t* */ GListPtr inactive; /* notify_entry_t* */ GListPtr start; /* notify_entry_t* */ GListPtr stop; /* notify_entry_t* */ GListPtr demote; /* notify_entry_t* */ GListPtr promote; /* notify_entry_t* */ GListPtr master; /* notify_entry_t* */ GListPtr slave; /* notify_entry_t* */ } notify_data_t; +struct ticket_s { + char *id; + gboolean granted; + time_t last_granted; +}; + gboolean cluster_status(pe_working_set_t *data_set); extern void set_working_set_defaults(pe_working_set_t *data_set); extern void cleanup_calculations(pe_working_set_t *data_set); extern resource_t *pe_find_resource(GListPtr rsc_list, const char *id_rh); extern node_t *pe_find_node(GListPtr node_list, const char *uname); extern node_t *pe_find_node_id(GListPtr node_list, const char *id); extern GListPtr find_operations( const char *rsc, const char *node, gboolean active_filter, pe_working_set_t *data_set); #endif diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c index bf4821a20e..91894edb44 100644 --- a/lib/cib/cib_attrs.c +++ b/lib/cib/cib_attrs.c @@ -1,513 +1,538 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define attr_msg(level, fmt, args...) do { \ if(to_console) { \ printf(fmt"\n", ##args); \ } else { \ do_crm_log(level, fmt , ##args); \ } \ } while(0) extern enum cib_errors find_nvpair_attr_delegate( cib_t *the_cib, const char *attr, const char *section, const char *node_uuid, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, gboolean to_console, char **value, const char *user_name) { int offset = 0; static int xpath_max = 1024; enum cib_errors rc = cib_ok; char *xpath_string = NULL; xmlNode *xml_search = NULL; const char *set_type = NULL; - + const char *node_type = NULL; + if (attr_set_type) { set_type = attr_set_type; } else { set_type = XML_TAG_ATTR_SETS; } CRM_ASSERT(value != NULL); *value = NULL; if(safe_str_eq(section, XML_CIB_TAG_CRMCONFIG)) { node_uuid = NULL; set_type = XML_CIB_TAG_PROPSET; } else if(safe_str_eq(section, XML_CIB_TAG_OPCONFIG) || safe_str_eq(section, XML_CIB_TAG_RSCCONFIG)) { node_uuid = NULL; set_type = XML_TAG_META_SETS; + } else if(safe_str_eq(section, XML_CIB_TAG_TICKETS)) { + node_uuid = NULL; + section = XML_CIB_TAG_STATUS; + node_type = XML_CIB_TAG_TICKETS; + } else if(node_uuid == NULL) { return cib_missing_data; } crm_malloc0(xpath_string, xpath_max); offset += snprintf(xpath_string + offset, xpath_max - offset, "%s", get_object_path(section)); - if(node_uuid) { + if(safe_str_eq(node_type, XML_CIB_TAG_TICKETS)) { + offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s", node_type); + + } else if(node_uuid) { const char *node_type = XML_CIB_TAG_NODE; if(safe_str_eq(section, XML_CIB_TAG_STATUS)) { node_type = XML_CIB_TAG_STATE; set_type = XML_TAG_TRANSIENT_NODEATTRS; } offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s[@id='%s']", node_type, node_uuid); } if(set_name) { offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s[@id='%s']", set_type, set_name); } else { offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s", set_type); } offset += snprintf(xpath_string + offset, xpath_max - offset, "//nvpair["); if(attr_id) { offset += snprintf(xpath_string + offset, xpath_max - offset, "@id='%s'", attr_id); } if(attr_name) { if(attr_id) { offset += snprintf(xpath_string + offset, xpath_max - offset, " and "); } offset += snprintf(xpath_string + offset, xpath_max - offset, "@name='%s'", attr_name); } offset += snprintf(xpath_string + offset, xpath_max - offset, "]"); rc = the_cib->cmds->delegated_variant_op( the_cib, CIB_OP_QUERY, NULL, xpath_string, NULL, &xml_search, cib_sync_call|cib_scope_local|cib_xpath, user_name); if(rc != cib_ok) { do_crm_log(LOG_DEBUG_2, "Query failed for attribute %s (section=%s, node=%s, set=%s, xpath=%s): %s", attr_name, section, crm_str(node_uuid), crm_str(set_name), xpath_string, cib_error2string(rc)); goto done; } crm_log_xml_debug(xml_search, "Match"); if(xml_has_children(xml_search)) { xmlNode *child = NULL; rc = cib_missing_data; attr_msg(LOG_WARNING, "Multiple attributes match name=%s", attr_name); for(child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) { attr_msg(LOG_INFO, " Value: %s \t(id=%s)", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } } else { const char *tmp = crm_element_value(xml_search, attr); if(tmp) { *value = crm_strdup(tmp); } } done: crm_free(xpath_string); free_xml(xml_search); return rc; } enum cib_errors update_attr_delegate(cib_t *the_cib, int call_options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, gboolean to_console, const char *user_name) { const char *tag = NULL; enum cib_errors rc = cib_ok; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; char *local_attr_id = NULL; char *local_set_name = NULL; gboolean use_attributes_tag = FALSE; CRM_CHECK(section != NULL, return cib_missing); CRM_CHECK(attr_value != NULL, return cib_missing); CRM_CHECK(attr_name != NULL || attr_id != NULL, return cib_missing); rc = find_nvpair_attr_delegate(the_cib, XML_ATTR_ID, section, node_uuid, set_type, set_name, attr_id, attr_name, FALSE, &local_attr_id, user_name); if(rc == cib_ok) { attr_id = local_attr_id; goto do_modify; } else if(rc != cib_NOTEXISTS) { return rc; /* } else if(attr_id == NULL) { */ /* return cib_missing; */ } else { const char *value = NULL; + const char *node_type = NULL; xmlNode *cib_top = NULL; rc = the_cib->cmds->delegated_variant_op( the_cib, CIB_OP_QUERY, NULL, "/cib", NULL, &cib_top, cib_sync_call|cib_scope_local|cib_xpath|cib_no_children, user_name); value = crm_element_value(cib_top, "ignore_dtd"); if(value != NULL) { use_attributes_tag = TRUE; } else { value = crm_element_value(cib_top, XML_ATTR_VALIDATION); if(value && strstr(value, "-0.6")) { use_attributes_tag = TRUE; } } free_xml(cib_top); - if(safe_str_eq(section, XML_CIB_TAG_NODES)) { + if(safe_str_eq(section, XML_CIB_TAG_TICKETS)) { + node_uuid = NULL; + section = XML_CIB_TAG_STATUS; + node_type = XML_CIB_TAG_TICKETS; + + xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_STATUS); + if(xml_top == NULL) { + xml_top = xml_obj; + } + + xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_TICKETS); + + } else if(safe_str_eq(section, XML_CIB_TAG_NODES)) { tag = XML_CIB_TAG_NODE; if(node_uuid == NULL) { return cib_missing; } } else if(safe_str_eq(section, XML_CIB_TAG_STATUS)) { tag = XML_TAG_TRANSIENT_NODEATTRS; if(node_uuid == NULL) { return cib_missing; } xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_STATE); crm_xml_add(xml_obj, XML_ATTR_ID, node_uuid); if(xml_top == NULL) { xml_top = xml_obj; } } else { tag = section; node_uuid = NULL; } if(set_name == NULL) { if(safe_str_eq(section, XML_CIB_TAG_CRMCONFIG)) { local_set_name = crm_strdup(CIB_OPTIONS_FIRST); + } else if(safe_str_eq(node_type, XML_CIB_TAG_TICKETS)) { + local_set_name = crm_concat(section, XML_CIB_TAG_TICKETS, '-'); + } else if(node_uuid) { local_set_name = crm_concat(section, node_uuid, '-'); if(set_type) { char *tmp_set_name = local_set_name; local_set_name = crm_concat(tmp_set_name, set_type, '-'); crm_free(tmp_set_name); } } else { local_set_name = crm_concat(section, "options", '-'); } set_name = local_set_name; } if(attr_id == NULL) { int lpc = 0; local_attr_id = crm_concat(set_name, attr_name, '-'); attr_id = local_attr_id; /* Minimal attempt at sanitizing automatic IDs */ for(lpc = 0; local_attr_id[lpc] != 0; lpc++) { switch(local_attr_id[lpc]) { case ':': local_attr_id[lpc] = '.'; } } } else if(attr_name == NULL) { attr_name = attr_id; } crm_debug_2("Creating %s/%s", section, tag); if(tag != NULL) { xml_obj = create_xml_node(xml_obj, tag); crm_xml_add(xml_obj, XML_ATTR_ID, node_uuid); if(xml_top == NULL) { xml_top = xml_obj; } } - if(node_uuid == NULL) { + if(node_uuid == NULL && safe_str_neq(node_type, XML_CIB_TAG_TICKETS)) { if(safe_str_eq(section, XML_CIB_TAG_CRMCONFIG)) { xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_PROPSET); } else { xml_obj = create_xml_node(xml_obj, XML_TAG_META_SETS); } } else if(set_type) { xml_obj = create_xml_node(xml_obj, set_type); } else { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS); } crm_xml_add(xml_obj, XML_ATTR_ID, set_name); if(xml_top == NULL) { xml_top = xml_obj; } if(use_attributes_tag) { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTRS); } } do_modify: xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); if(xml_top == NULL) { xml_top = xml_obj; } crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, attr_value); crm_log_xml_debug_2(xml_top, "update_attr"); rc = the_cib->cmds->delegated_variant_op( the_cib, CIB_OP_MODIFY, NULL, section, xml_top, NULL, call_options|cib_quorum_override, user_name); if(rc < cib_ok) { attr_msg(LOG_ERR, "Error setting %s=%s (section=%s, set=%s): %s", attr_name, attr_value, section, crm_str(set_name), cib_error2string(rc)); crm_log_xml_info(xml_top, "Update"); } crm_free(local_set_name); crm_free(local_attr_id); free_xml(xml_top); return rc; } enum cib_errors read_attr_delegate(cib_t *the_cib, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **attr_value, gboolean to_console, const char *user_name) { enum cib_errors rc = cib_ok; CRM_ASSERT(attr_value != NULL); CRM_CHECK(section != NULL, return cib_missing); CRM_CHECK(attr_name != NULL || attr_id != NULL, return cib_missing); *attr_value = NULL; rc = find_nvpair_attr_delegate(the_cib, XML_NVPAIR_ATTR_VALUE, section, node_uuid, set_type, set_name, attr_id, attr_name, to_console, attr_value, user_name); if(rc != cib_ok) { do_crm_log(LOG_DEBUG_2, "Query failed for attribute %s (section=%s, node=%s, set=%s): %s", attr_name, section, crm_str(set_name), crm_str(node_uuid), cib_error2string(rc)); } return rc; } enum cib_errors delete_attr_delegate(cib_t *the_cib, int options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, gboolean to_console, const char *user_name) { enum cib_errors rc = cib_ok; xmlNode *xml_obj = NULL; char *local_attr_id = NULL; CRM_CHECK(section != NULL, return cib_missing); CRM_CHECK(attr_name != NULL || attr_id != NULL, return cib_missing); if(attr_id == NULL) { rc = find_nvpair_attr_delegate(the_cib, XML_ATTR_ID, section, node_uuid, set_type, set_name, attr_id, attr_name, to_console, &local_attr_id, user_name); if(rc != cib_ok) { return rc; } attr_id = local_attr_id; } xml_obj = create_xml_node(NULL, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, attr_value); rc = the_cib->cmds->delegated_variant_op( the_cib, CIB_OP_DELETE, NULL, section, xml_obj, NULL, options|cib_quorum_override, user_name); if(rc == cib_ok) { attr_msg(LOG_DEBUG, "Deleted %s %s: id=%s%s%s%s%s\n", section, node_uuid?"attribute":"option", local_attr_id, set_name?" set=":"", set_name?set_name:"", attr_name?" name=":"", attr_name?attr_name:""); } crm_free(local_attr_id); free_xml(xml_obj); return rc; } enum cib_errors query_node_uuid(cib_t *the_cib, const char *uname, char **uuid) { enum cib_errors rc = cib_ok; xmlNode *a_child = NULL; xmlNode *xml_obj = NULL; xmlNode *fragment = NULL; const char *child_name = NULL; CRM_ASSERT(uname != NULL); CRM_ASSERT(uuid != NULL); rc = the_cib->cmds->query(the_cib, XML_CIB_TAG_NODES, &fragment, cib_sync_call|cib_scope_local); if(rc != cib_ok) { return rc; } xml_obj = fragment; CRM_CHECK(safe_str_eq(crm_element_name(xml_obj), XML_CIB_TAG_NODES), return cib_output_data); CRM_ASSERT(xml_obj != NULL); crm_log_xml_debug(xml_obj, "Result section"); rc = cib_NOTEXISTS; *uuid = NULL; for(a_child = __xml_first_child(xml_obj); a_child != NULL; a_child = __xml_next(a_child)) { if(crm_str_eq((const char *)a_child->name, XML_CIB_TAG_NODE, TRUE)) { child_name = crm_element_value(a_child, XML_ATTR_UNAME); if(safe_str_eq(uname, child_name)) { child_name = ID(a_child); if(child_name != NULL) { *uuid = crm_strdup(child_name); rc = cib_ok; } break; } } } free_xml(fragment); return rc; } enum cib_errors query_node_uname(cib_t *the_cib, const char *uuid, char **uname) { enum cib_errors rc = cib_ok; xmlNode *a_child = NULL; xmlNode *xml_obj = NULL; xmlNode *fragment = NULL; const char *child_name = NULL; CRM_ASSERT(uname != NULL); CRM_ASSERT(uuid != NULL); rc = the_cib->cmds->query(the_cib, XML_CIB_TAG_NODES, &fragment, cib_sync_call|cib_scope_local); if(rc != cib_ok) { return rc; } xml_obj = fragment; CRM_CHECK(safe_str_eq(crm_element_name(xml_obj), XML_CIB_TAG_NODES), return cib_output_data); CRM_ASSERT(xml_obj != NULL); crm_log_xml_debug_2(xml_obj, "Result section"); rc = cib_NOTEXISTS; *uname = NULL; for(a_child = __xml_first_child(xml_obj); a_child != NULL; a_child = __xml_next(a_child)) { if(crm_str_eq((const char *)a_child->name, XML_CIB_TAG_NODE, TRUE)) { child_name = ID(a_child); if(safe_str_eq(uuid, child_name)) { child_name = crm_element_value(a_child, XML_ATTR_UNAME); if(child_name != NULL) { *uname = crm_strdup(child_name); rc = cib_ok; } break; } } } free_xml(fragment); return rc; } enum cib_errors set_standby(cib_t *the_cib, const char *uuid, const char *scope, const char *standby_value) { enum cib_errors rc = cib_ok; int str_length = 3; char *attr_id = NULL; char *set_name = NULL; const char *attr_name = "standby"; CRM_CHECK(standby_value != NULL, return cib_missing_data); if(scope == NULL) { scope = XML_CIB_TAG_NODES; } CRM_CHECK(uuid != NULL, return cib_missing_data); str_length += strlen(attr_name); str_length += strlen(uuid); if(safe_str_eq(scope, "reboot") || safe_str_eq(scope, XML_CIB_TAG_STATUS)) { const char *extra = "transient"; scope = XML_CIB_TAG_STATUS; str_length += strlen(extra); crm_malloc0(attr_id, str_length); sprintf(attr_id, "%s-%s-%s", extra, attr_name, uuid); } else { scope = XML_CIB_TAG_NODES; crm_malloc0(attr_id, str_length); sprintf(attr_id, "%s-%s", attr_name, uuid); } rc = update_attr(the_cib, cib_sync_call, scope, uuid, NULL, set_name, attr_id, attr_name, standby_value, TRUE); crm_free(attr_id); crm_free(set_name); return rc; } diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c index 68aec2162e..5e1465b203 100644 --- a/lib/pengine/complex.c +++ b/lib/pengine/complex.c @@ -1,472 +1,474 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include extern xmlNode *get_object_root(const char *object_type,xmlNode *the_root); void populate_hash(xmlNode *nvpair_list, GHashTable *hash, const char **attrs, int attrs_length); resource_object_functions_t resource_class_functions[] = { { native_unpack, native_find_rsc, native_parameter, native_print, native_active, native_resource_state, native_location, native_free }, { group_unpack, native_find_rsc, native_parameter, group_print, group_active, group_resource_state, native_location, group_free }, { clone_unpack, native_find_rsc, native_parameter, clone_print, clone_active, clone_resource_state, native_location, clone_free }, { master_unpack, native_find_rsc, native_parameter, clone_print, clone_active, clone_resource_state, native_location, clone_free } }; enum pe_obj_types get_resource_type(const char *name) { if(safe_str_eq(name, XML_CIB_TAG_RESOURCE)) { return pe_native; } else if(safe_str_eq(name, XML_CIB_TAG_GROUP)) { return pe_group; } else if(safe_str_eq(name, XML_CIB_TAG_INCARNATION)) { return pe_clone; } else if(safe_str_eq(name, XML_CIB_TAG_MASTER)) { return pe_master; } return pe_unknown; } const char *get_resource_typename(enum pe_obj_types type) { switch(type) { case pe_native: return XML_CIB_TAG_RESOURCE; case pe_group: return XML_CIB_TAG_GROUP; case pe_clone: return XML_CIB_TAG_INCARNATION; case pe_master: return XML_CIB_TAG_MASTER; case pe_unknown: return "unknown"; } return ""; } static void dup_attr(gpointer key, gpointer value, gpointer user_data) { add_hash_param(user_data, key, value); } void get_meta_attributes(GHashTable *meta_hash, resource_t *rsc, node_t *node, pe_working_set_t *data_set) { GHashTable *node_hash = NULL; if(node) { node_hash = node->details->attrs; } xml_prop_iter(rsc->xml, prop_name, prop_value, add_hash_param(meta_hash, prop_name, prop_value); ); unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_META_SETS, node_hash, meta_hash, NULL, FALSE, data_set->now); /* populate from the regular attributes until the GUI can create * meta attributes */ unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash, meta_hash, NULL, FALSE, data_set->now); /* set anything else based on the parent */ if(rsc->parent != NULL) { g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash); } /* and finally check the defaults */ unpack_instance_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_META_SETS, node_hash, meta_hash, NULL, FALSE, data_set->now); } void get_rsc_attributes(GHashTable *meta_hash, resource_t *rsc, node_t *node, pe_working_set_t *data_set) { GHashTable *node_hash = NULL; if(node) { node_hash = node->details->attrs; } unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash, meta_hash, NULL, FALSE, data_set->now); /* set anything else based on the parent */ if(rsc->parent != NULL) { get_rsc_attributes(meta_hash, rsc->parent, node, data_set); } else { /* and finally check the defaults */ unpack_instance_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_ATTR_SETS, node_hash, meta_hash, NULL, FALSE, data_set->now); } } gboolean common_unpack(xmlNode * xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set) { xmlNode *ops = NULL; resource_t *top = NULL; const char *value = NULL; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *class = crm_element_value(xml_obj, XML_AGENT_ATTR_CLASS); crm_log_xml_debug_3(xml_obj, "Processing resource input..."); if(id == NULL) { pe_err("Must specify id tag in "); return FALSE; } else if(rsc == NULL) { pe_err("Nowhere to unpack resource into"); return FALSE; } crm_malloc0(*rsc, sizeof(resource_t)); ops = find_xml_node(xml_obj, "operations", FALSE); (*rsc)->xml = xml_obj; (*rsc)->parent = parent; (*rsc)->ops_xml = expand_idref(ops, data_set->input); (*rsc)->variant = get_resource_type(crm_element_name(xml_obj)); if((*rsc)->variant == pe_unknown) { pe_err("Unknown resource type: %s", crm_element_name(xml_obj)); crm_free(*rsc); return FALSE; } (*rsc)->parameters = g_hash_table_new_full( crm_str_hash,g_str_equal, g_hash_destroy_str,g_hash_destroy_str); (*rsc)->meta = g_hash_table_new_full( crm_str_hash,g_str_equal, g_hash_destroy_str,g_hash_destroy_str); (*rsc)->allowed_nodes = g_hash_table_new_full( crm_str_hash,g_str_equal, NULL, g_hash_destroy_str); (*rsc)->known_on = g_hash_table_new_full( crm_str_hash,g_str_equal, NULL, g_hash_destroy_str); value = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION); if(value) { (*rsc)->id = crm_concat(id, value, ':'); add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value); } else { (*rsc)->id = crm_strdup(id); } if(parent) { (*rsc)->long_name = crm_concat(parent->long_name, (*rsc)->id, ':'); } else { (*rsc)->long_name = crm_strdup((*rsc)->id); } (*rsc)->fns = &resource_class_functions[(*rsc)->variant]; crm_debug_3("Unpacking resource..."); get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set); (*rsc)->flags = 0; set_bit((*rsc)->flags, pe_rsc_runnable); set_bit((*rsc)->flags, pe_rsc_provisional); if(is_set(data_set->flags, pe_flag_is_managed_default)) { set_bit((*rsc)->flags, pe_rsc_managed); } (*rsc)->rsc_cons = NULL; + (*rsc)->rsc_tickets = NULL; (*rsc)->actions = NULL; (*rsc)->role = RSC_ROLE_STOPPED; (*rsc)->next_role = RSC_ROLE_UNKNOWN; (*rsc)->recovery_type = recovery_stop_start; (*rsc)->stickiness = data_set->default_resource_stickiness; (*rsc)->migration_threshold= INFINITY; (*rsc)->failure_timeout = 0; value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY); (*rsc)->priority = crm_parse_int(value, "0"); (*rsc)->effective_priority = (*rsc)->priority; value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY); if(crm_is_true(value)) { set_bit((*rsc)->flags, pe_rsc_notify); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED); if(value != NULL && safe_str_neq("default", value)) { gboolean bool_value = TRUE; crm_str_to_boolean(value, &bool_value); if(bool_value == FALSE) { clear_bit((*rsc)->flags, pe_rsc_managed); } else { set_bit((*rsc)->flags, pe_rsc_managed); } } if(is_set(data_set->flags, pe_flag_maintenance_mode)) { clear_bit((*rsc)->flags, pe_rsc_managed); } crm_debug_2("Options for %s", (*rsc)->id); value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE); top = uber_parent(*rsc); if(crm_is_true(value) || top->variant < pe_clone) { set_bit((*rsc)->flags, pe_rsc_unique); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART); if(safe_str_eq(value, "restart")) { (*rsc)->restart_type = pe_restart_restart; crm_debug_2("\tDependency restart handling: restart"); } else { (*rsc)->restart_type = pe_restart_ignore; crm_debug_2("\tDependency restart handling: ignore"); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE); if(safe_str_eq(value, "stop_only")) { (*rsc)->recovery_type = recovery_stop_only; crm_debug_2("\tMultiple running resource recovery: stop only"); } else if(safe_str_eq(value, "block")) { (*rsc)->recovery_type = recovery_block; crm_debug_2("\tMultiple running resource recovery: block"); } else { (*rsc)->recovery_type = recovery_stop_start; crm_debug_2("\tMultiple running resource recovery: stop/start"); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS); if(value != NULL && safe_str_neq("default", value)) { (*rsc)->stickiness = char2score(value); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS); if(value != NULL && safe_str_neq("default", value)) { (*rsc)->migration_threshold = char2score(value); } else if(value == NULL) { /* Make a best-effort guess at a migration threshold for people with 0.6 configs * try with underscores and hyphens, from both the resource and global defaults section */ value = g_hash_table_lookup((*rsc)->meta, "resource-failure-stickiness"); if(value == NULL) { value = g_hash_table_lookup((*rsc)->meta, "resource_failure_stickiness"); } if(value == NULL) { value = g_hash_table_lookup(data_set->config_hash, "default-resource-failure-stickiness"); } if(value == NULL) { value = g_hash_table_lookup(data_set->config_hash, "default_resource_failure_stickiness"); } if(value) { int fail_sticky = char2score(value); if(fail_sticky == -INFINITY) { (*rsc)->migration_threshold = 1; crm_info("Set a migration threshold of %d for %s based on a failure-stickiness of %s", (*rsc)->migration_threshold, (*rsc)->id, value); } else if((*rsc)->stickiness != 0 && fail_sticky != 0) { (*rsc)->migration_threshold = (*rsc)->stickiness / fail_sticky; if((*rsc)->migration_threshold < 0) { /* Make sure it's positive */ (*rsc)->migration_threshold = 0 - (*rsc)->migration_threshold; } (*rsc)->migration_threshold += 1; crm_info("Calculated a migration threshold for %s of %d based on a stickiness of %d/%s", (*rsc)->id, (*rsc)->migration_threshold, (*rsc)->stickiness, value); } } } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT); if(value != NULL) { /* call crm_get_msec() and convert back to seconds */ (*rsc)->failure_timeout = (crm_get_msec(value) / 1000); } get_target_role(*rsc, &((*rsc)->next_role)); crm_debug_2("\tDesired next state: %s", (*rsc)->next_role!=RSC_ROLE_UNKNOWN?role2text((*rsc)->next_role):"default"); if((*rsc)->fns->unpack(*rsc, data_set) == FALSE) { return FALSE; } if(is_set(data_set->flags, pe_flag_symmetric_cluster)) { resource_location(*rsc, NULL, 0, "symmetric_default", data_set); } crm_debug_2("\tAction notification: %s", is_set((*rsc)->flags, pe_rsc_notify)?"required":"not required"); if(safe_str_eq(class, "stonith")) { set_bit_inplace(data_set->flags, pe_flag_have_stonith_resource); } (*rsc)->utilization = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); unpack_instance_attributes(data_set->input, (*rsc)->xml, XML_TAG_UTILIZATION, NULL, (*rsc)->utilization, NULL, FALSE, data_set->now); /* data_set->resources = g_list_append(data_set->resources, (*rsc)); */ return TRUE; } void common_update_score(resource_t *rsc, const char *id, int score) { node_t *node = NULL; node = pe_hash_table_lookup(rsc->allowed_nodes, id); if(node != NULL) { crm_debug_2("Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score); node->weight = merge_weights(node->weight, score); } if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; common_update_score(child_rsc, id, score); } } } resource_t *uber_parent(resource_t *rsc) { resource_t *parent = rsc; if(parent == NULL) { return NULL; } while(parent->parent != NULL) { parent = parent->parent; } return parent; } void common_free(resource_t *rsc) { if(rsc == NULL) { return; } crm_debug_5("Freeing %s %d", rsc->id, rsc->variant); g_list_free(rsc->rsc_cons); g_list_free(rsc->rsc_cons_lhs); + g_list_free(rsc->rsc_tickets); g_list_free(rsc->dangling_migrations); if(rsc->parameters != NULL) { g_hash_table_destroy(rsc->parameters); } if(rsc->meta != NULL) { g_hash_table_destroy(rsc->meta); } if(rsc->utilization != NULL) { g_hash_table_destroy(rsc->utilization); } if(rsc->parent == NULL && is_set(rsc->flags, pe_rsc_orphan)) { free_xml(rsc->xml); } if(rsc->running_on) { g_list_free(rsc->running_on); rsc->running_on = NULL; } if(rsc->known_on) { g_hash_table_destroy(rsc->known_on); rsc->known_on = NULL; } if(rsc->actions) { g_list_free(rsc->actions); rsc->actions = NULL; } if(rsc->allowed_nodes) { g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = NULL; } g_list_free(rsc->rsc_location); crm_free(rsc->id); crm_free(rsc->long_name); crm_free(rsc->clone_name); crm_free(rsc->allocated_to); crm_free(rsc->variant_opaque); crm_free(rsc); crm_debug_5("Resource freed"); } diff --git a/lib/pengine/status.c b/lib/pengine/status.c index 9641d72867..3e210af0b4 100644 --- a/lib/pengine/status.c +++ b/lib/pengine/status.c @@ -1,285 +1,289 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include extern xmlNode*get_object_root( const char *object_type, xmlNode *the_root); #define MEMCHECK_STAGE_0 0 #define check_and_exit(stage) cleanup_calculations(data_set); \ crm_mem_stats(NULL); \ crm_err("Exiting: stage %d", stage); \ exit(1); /* * Unpack everything * At the end you'll have: * - A list of nodes * - A list of resources (each with any dependencies on other resources) * - A list of constraints between resources and nodes * - A list of constraints between start/stop actions * - A list of nodes that need to be stonith'd * - A list of nodes that need to be shutdown * - A list of the possible stop/start actions (without dependencies) */ gboolean cluster_status(pe_working_set_t *data_set) { xmlNode * config = get_object_root( XML_CIB_TAG_CRMCONFIG, data_set->input); xmlNode * cib_nodes = get_object_root( XML_CIB_TAG_NODES, data_set->input); xmlNode * cib_resources = get_object_root( XML_CIB_TAG_RESOURCES, data_set->input); xmlNode * cib_status = get_object_root( XML_CIB_TAG_STATUS, data_set->input); xmlNode * cib_domains = get_object_root( XML_CIB_TAG_DOMAINS, data_set->input); const char *value = crm_element_value( data_set->input, XML_ATTR_HAVE_QUORUM); crm_debug_3("Beginning unpack"); pe_dataset = data_set; /* reset remaining global variables */ data_set->failed = create_xml_node(NULL, "failed-ops"); if(data_set->input == NULL) { return FALSE; } if(data_set->now == NULL) { data_set->now = new_ha_date(TRUE); } if(data_set->input != NULL && crm_element_value(data_set->input, XML_ATTR_DC_UUID) != NULL) { /* this should always be present */ data_set->dc_uuid = crm_element_value_copy( data_set->input, XML_ATTR_DC_UUID); } clear_bit_inplace(data_set->flags, pe_flag_have_quorum); if(crm_is_true(value)) { set_bit_inplace(data_set->flags, pe_flag_have_quorum); } data_set->op_defaults = get_object_root(XML_CIB_TAG_OPCONFIG, data_set->input); data_set->rsc_defaults = get_object_root(XML_CIB_TAG_RSCCONFIG, data_set->input); unpack_config(config, data_set); if(is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy != no_quorum_ignore) { crm_warn("We do not have quorum" " - fencing and resource management disabled"); } unpack_nodes(cib_nodes, data_set); unpack_domains(cib_domains, data_set); unpack_resources(cib_resources, data_set); unpack_status(cib_status, data_set); set_bit_inplace(data_set->flags, pe_flag_have_status); return TRUE; } static void pe_free_resources(GListPtr resources) { resource_t *rsc = NULL; GListPtr iterator = resources; while(iterator != NULL) { iterator = iterator; rsc = (resource_t *)iterator->data; iterator = iterator->next; rsc->fns->free(rsc); } if(resources != NULL) { g_list_free(resources); } } static void pe_free_actions(GListPtr actions) { GListPtr iterator = actions; while(iterator != NULL) { pe_free_action(iterator->data); iterator = iterator->next; } if(actions != NULL) { g_list_free(actions); } } static void pe_free_nodes(GListPtr nodes) { GListPtr iterator = nodes; while(iterator != NULL) { node_t *node = (node_t*)iterator->data; struct node_shared_s *details = node->details; iterator = iterator->next; crm_debug_5("deleting node"); crm_debug_5("%s is being deleted", details->uname); print_node("delete", node, FALSE); if(details != NULL) { if(details->attrs != NULL) { g_hash_table_destroy(details->attrs); } if(details->utilization != NULL) { g_hash_table_destroy(details->utilization); } g_list_free(details->running_rsc); g_list_free(details->allocated_rsc); crm_free(details); } crm_free(node); } if(nodes != NULL) { g_list_free(nodes); } } void cleanup_calculations(pe_working_set_t *data_set) { pe_dataset = NULL; if(data_set == NULL) { return; } clear_bit_inplace(data_set->flags, pe_flag_have_status); if(data_set->config_hash != NULL) { g_hash_table_destroy(data_set->config_hash); } + if(data_set->tickets) { + g_hash_table_destroy(data_set->tickets); + } + crm_free(data_set->dc_uuid); crm_debug_3("deleting resources"); pe_free_resources(data_set->resources); crm_debug_3("deleting actions"); pe_free_actions(data_set->actions); if(data_set->domains) { g_hash_table_destroy(data_set->domains); } crm_debug_3("deleting nodes"); pe_free_nodes(data_set->nodes); free_xml(data_set->graph); free_ha_date(data_set->now); free_xml(data_set->input); free_xml(data_set->failed); set_working_set_defaults(data_set); CRM_CHECK(data_set->ordering_constraints == NULL, ;); CRM_CHECK(data_set->placement_constraints == NULL, ;); } void set_working_set_defaults(pe_working_set_t *data_set) { pe_dataset = data_set; memset(data_set, 0, sizeof(pe_working_set_t)); data_set->order_id = 1; data_set->action_id = 1; data_set->no_quorum_policy = no_quorum_freeze; data_set->flags = 0x0ULL; set_bit_inplace(data_set->flags, pe_flag_stop_rsc_orphans); set_bit_inplace(data_set->flags, pe_flag_symmetric_cluster); set_bit_inplace(data_set->flags, pe_flag_is_managed_default); set_bit_inplace(data_set->flags, pe_flag_stop_action_orphans); } resource_t * pe_find_resource(GListPtr rsc_list, const char *id) { unsigned lpc = 0; resource_t *rsc = NULL; resource_t *match = NULL; if(id == NULL) { return NULL; } for(lpc = 0; lpc < g_list_length(rsc_list); lpc++) { rsc = g_list_nth_data(rsc_list, lpc); match = rsc->fns->find_rsc(rsc, id, NULL, pe_find_renamed|pe_find_current); if(match != NULL) { return match; } } crm_debug_2("No match for %s", id); return NULL; } node_t * pe_find_node_id(GListPtr nodes, const char *id) { GListPtr gIter = nodes; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if(node && safe_str_eq(node->details->id, id)) { return node; } } /* error */ return NULL; } node_t * pe_find_node(GListPtr nodes, const char *uname) { GListPtr gIter = nodes; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if(node && safe_str_eq(node->details->uname, uname)) { return node; } } /* error */ return NULL; } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 8ecd1a27b1..b6b6fd1516 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,2144 +1,2243 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); #define set_config_flag(data_set, option, flag) do { \ const char *tmp = pe_pref(data_set->config_hash, option); \ if(tmp) { \ if(crm_is_true(tmp)) { \ set_bit_inplace(data_set->flags, flag); \ } else { \ clear_bit_inplace(data_set->flags, flag); \ } \ } \ } while(0) gboolean unpack_rsc_op( resource_t *rsc, node_t *node, xmlNode *xml_op, GListPtr next, enum action_fail_response *failed, pe_working_set_t *data_set); static void pe_fence_node(pe_working_set_t *data_set, node_t *node, const char *reason) { CRM_CHECK(node, return); if(node->details->unclean == FALSE) { if(is_set(data_set->flags, pe_flag_stonith_enabled)) { crm_warn("Node %s will be fenced %s", node->details->uname, reason); } else { crm_warn("Node %s is unclean %s", node->details->uname, reason); } } node->details->unclean = TRUE; } gboolean unpack_config(xmlNode *config, pe_working_set_t *data_set) { const char *value = NULL; GHashTable *config_hash = g_hash_table_new_full( crm_str_hash,g_str_equal, g_hash_destroy_str,g_hash_destroy_str); data_set->config_hash = config_hash; unpack_instance_attributes( data_set->input, config, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set->now); verify_pe_options(data_set->config_hash); set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes); crm_info("Startup probes: %s", is_set(data_set->flags, pe_flag_startup_probes)?"enabled":"disabled (dangerous)"); value = pe_pref(data_set->config_hash, "stonith-timeout"); data_set->stonith_timeout = crm_get_msec(value); crm_debug("STONITH timeout: %d", data_set->stonith_timeout); set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled); crm_debug("STONITH of failed nodes is %s", is_set(data_set->flags, pe_flag_stonith_enabled)?"enabled":"disabled"); data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action"); crm_debug_2("STONITH will %s nodes", data_set->stonith_action); set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything); crm_debug("Stop all active resources: %s", is_set(data_set->flags, pe_flag_stop_everything)?"true":"false"); set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster); if(is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(data_set->config_hash, "default-resource-stickiness"); data_set->default_resource_stickiness = char2score(value); crm_debug("Default stickiness: %d", data_set->default_resource_stickiness); value = pe_pref(data_set->config_hash, "no-quorum-policy"); if(safe_str_eq(value, "ignore")) { data_set->no_quorum_policy = no_quorum_ignore; } else if(safe_str_eq(value, "freeze")) { data_set->no_quorum_policy = no_quorum_freeze; } else if(safe_str_eq(value, "suicide")) { gboolean do_panic = FALSE; crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic); if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE){ crm_config_err("Setting no-quorum-policy=suicide makes no sense if stonith-enabled=false"); } if(do_panic && is_set(data_set->flags, pe_flag_stonith_enabled)) { data_set->no_quorum_policy = no_quorum_suicide; } else if(is_set(data_set->flags, pe_flag_have_quorum) == FALSE && do_panic == FALSE) { crm_notice("Resetting no-quorum-policy to 'stop': The cluster has never had quorum"); data_set->no_quorum_policy = no_quorum_stop; } } else { data_set->no_quorum_policy = no_quorum_stop; } switch (data_set->no_quorum_policy) { case no_quorum_freeze: crm_debug("On loss of CCM Quorum: Freeze resources"); break; case no_quorum_stop: crm_debug("On loss of CCM Quorum: Stop ALL resources"); break; case no_quorum_suicide: crm_notice("On loss of CCM Quorum: Fence all remaining nodes"); break; case no_quorum_ignore: crm_notice("On loss of CCM Quorum: Ignore"); break; } set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans); crm_debug_2("Orphan resources are %s", is_set(data_set->flags, pe_flag_stop_rsc_orphans)?"stopped":"ignored"); set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans); crm_debug_2("Orphan resource actions are %s", is_set(data_set->flags, pe_flag_stop_action_orphans)?"stopped":"ignored"); set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop); crm_debug_2("Stopped resources are removed from the status section: %s", is_set(data_set->flags, pe_flag_remove_after_stop)?"true":"false"); set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode); crm_debug_2("Maintenance mode: %s", is_set(data_set->flags, pe_flag_maintenance_mode)?"true":"false"); if(is_set(data_set->flags, pe_flag_maintenance_mode)) { clear_bit(data_set->flags, pe_flag_is_managed_default); } else { set_config_flag(data_set, "is-managed-default", pe_flag_is_managed_default); } crm_debug_2("By default resources are %smanaged", is_set(data_set->flags, pe_flag_is_managed_default)?"":"not "); set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal); crm_debug_2("Start failures are %s", is_set(data_set->flags, pe_flag_start_failure_fatal)?"always fatal":"handled by failcount"); node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); crm_info("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s", pe_pref(data_set->config_hash, "node-health-red"), pe_pref(data_set->config_hash, "node-health-yellow"), pe_pref(data_set->config_hash, "node-health-green")); data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); crm_debug_2("Placement strategy: %s", data_set->placement_strategy); return TRUE; } gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; gboolean unseen_are_unclean = TRUE; const char *blind_faith = pe_pref( data_set->config_hash, "startup-fencing"); if(crm_is_true(blind_faith) == FALSE) { unseen_are_unclean = FALSE; crm_warn("Blind faith: not fencing unseen nodes"); } for(xml_obj = __xml_first_child(xml_nodes); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) { if(crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) { new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_debug_3("Processing node %s/%s", uname, id); if(id == NULL) { crm_config_err("Must specify id tag in "); continue; } if(type == NULL) { crm_config_err("Must specify type tag in "); continue; } if(pe_find_node(data_set->nodes, uname) != NULL) { crm_config_warn("Detected multiple node entries with uname=%s" " - this is rarely intended", uname); } crm_malloc0(new_node, sizeof(node_t)); if(new_node == NULL) { return FALSE; } new_node->weight = char2score(score); new_node->fixed = FALSE; crm_malloc0(new_node->details, sizeof(struct node_shared_s)); if(new_node->details == NULL) { crm_free(new_node); return FALSE; } crm_debug_3("Creaing node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->type = node_ping; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->running_rsc = NULL; new_node->details->attrs = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); new_node->details->utilization = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); /* if(data_set->have_quorum == FALSE */ /* && data_set->no_quorum_policy == no_quorum_stop) { */ /* /\* start shutting resources down *\/ */ /* new_node->weight = -INFINITY; */ /* } */ if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE || unseen_are_unclean == FALSE) { /* blind faith... */ new_node->details->unclean = FALSE; } else { /* all nodes are unclean until we've seen their * status entry */ new_node->details->unclean = TRUE; } if(type == NULL || safe_str_eq(type, "member") || safe_str_eq(type, NORMALNODE)) { new_node->details->type = node_member; } add_node_attrs(xml_obj, new_node, FALSE, data_set); unpack_instance_attributes( data_set->input, xml_obj, XML_TAG_UTILIZATION, NULL, new_node->details->utilization, NULL, FALSE, data_set->now); data_set->nodes = g_list_append(data_set->nodes, new_node); crm_debug_3("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); } } return TRUE; } static void g_hash_destroy_node_list(gpointer data) { GListPtr domain = data; slist_basic_destroy(domain); } gboolean unpack_domains(xmlNode *xml_domains, pe_working_set_t *data_set) { const char *id = NULL; GListPtr domain = NULL; xmlNode *xml_node = NULL; xmlNode *xml_domain = NULL; crm_info("Unpacking domains"); data_set->domains = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_node_list); for(xml_domain = __xml_first_child(xml_domains); xml_domain != NULL; xml_domain = __xml_next(xml_domain)) { if(crm_str_eq((const char *)xml_domain->name, XML_CIB_TAG_DOMAIN, TRUE)) { domain = NULL; id = crm_element_value(xml_domain, XML_ATTR_ID); for(xml_node = __xml_first_child(xml_domain); xml_node != NULL; xml_node = __xml_next(xml_node)) { if(crm_str_eq((const char *)xml_node->name, XML_CIB_TAG_NODE, TRUE)) { node_t *copy = NULL; node_t *node = NULL; const char *uname = crm_element_value(xml_node, "name"); const char *score = crm_element_value(xml_node, XML_RULE_ATTR_SCORE); if(uname == NULL) { crm_config_err("Invalid domain %s: Must specify id tag in ", id); continue; } node = pe_find_node(data_set->nodes, uname); if(node == NULL) { node = pe_find_node_id(data_set->nodes, uname); } if(node == NULL) { crm_config_warn("Invalid domain %s: Node %s does not exist", id, uname); continue; } copy = node_copy(node); copy->weight = char2score(score); crm_debug("Adding %s to domain %s with score %s", node->details->uname, id, score); domain = g_list_prepend(domain, copy); } } if(domain) { crm_debug("Created domain %s with %d members", id, g_list_length(domain)); g_hash_table_replace(data_set->domains, crm_strdup(id), domain); } } } return TRUE; } gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; for(xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) { resource_t *new_rsc = NULL; crm_debug_3("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj)); if(common_unpack(xml_obj, &new_rsc, NULL, data_set)) { data_set->resources = g_list_append( data_set->resources, new_rsc); print_resource(LOG_DEBUG_3, "Added", new_rsc, FALSE); } else { crm_config_err("Failed unpacking %s %s", crm_element_name(xml_obj), crm_element_value(xml_obj, XML_ATTR_ID)); if(new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } } } data_set->resources = g_list_sort( data_set->resources, sort_rsc_priority); if(is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { crm_config_err("Resource start-up disabled since no STONITH resources have been defined"); crm_config_err("Either configure some or disable STONITH with the stonith-enabled option"); crm_config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } +static void +get_ticket_state(gpointer key, gpointer value, gpointer user_data) +{ + const char *attr_key = key; + + const char *granted_prefix = "granted-ticket-"; + const char *last_granted_prefix = "last-granted-ticket-"; + static int granted_prefix_strlen = 0; + static int last_granted_prefix_strlen = 0; + + const char *ticket_id = NULL; + const char *is_granted = NULL; + const char *last_granted = NULL; + + ticket_t *ticket = NULL; + GHashTable *tickets = user_data; + + if(granted_prefix_strlen == 0) { + granted_prefix_strlen = strlen(granted_prefix); + } + + if(last_granted_prefix_strlen == 0) { + last_granted_prefix_strlen = strlen(last_granted_prefix); + } + + if (strstr(attr_key, granted_prefix) == attr_key) { + ticket_id = attr_key + granted_prefix_strlen; + if(strlen(ticket_id)) { + is_granted = value; + } + } else if (strstr(attr_key, last_granted_prefix) == attr_key) { + ticket_id = attr_key + last_granted_prefix_strlen; + if(strlen(ticket_id)) { + last_granted = value; + } + } + + if(ticket_id == NULL || strlen(ticket_id) == 0) { + return; + } + + ticket = g_hash_table_lookup(tickets, ticket_id); + if(ticket == NULL) { + crm_malloc0(ticket, sizeof(ticket_t)); + if(ticket == NULL) { + crm_config_err("Cannot allocate ticket '%s'", ticket_id); + return; + } + + ticket->id = crm_strdup(ticket_id); + ticket->granted = FALSE; + ticket->last_granted = -1; + + g_hash_table_insert(tickets, crm_strdup(ticket->id), ticket); + } + if(is_granted) { + if(crm_is_true(is_granted)) { + ticket->granted = TRUE; + crm_info("We have ticket '%s'", ticket->id); + } else { + ticket->granted = FALSE; + crm_info("We do not have ticket '%s'", ticket->id); + } + + } else if(last_granted) { + ticket->last_granted = crm_parse_int(last_granted, 0); + } +} + +static void destroy_ticket(gpointer data) +{ + ticket_t *ticket = data; + + crm_free(ticket->id); + crm_free(ticket); +} /* remove nodes that are down, stopping */ /* create +ve rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode * status, pe_working_set_t *data_set) { const char *id = NULL; const char *uname = NULL; xmlNode * lrm_rsc = NULL; xmlNode * attrs = NULL; + xmlNode * state = NULL; xmlNode * node_state = NULL; node_t *this_node = NULL; crm_debug_3("Beginning unpack"); - for(node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next(node_state)) { - if(crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { + + data_set->tickets = g_hash_table_new_full( + crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket); + + for(state = __xml_first_child(status); state != NULL; state = __xml_next(state)) { + if(crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) { + xmlNode *tickets = state; + GHashTable *attrs_hash = g_hash_table_new_full( + crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); + + unpack_instance_attributes( + data_set->input, tickets, XML_TAG_ATTR_SETS, NULL, + attrs_hash, NULL, TRUE, data_set->now); + + g_hash_table_foreach(attrs_hash, get_ticket_state, data_set->tickets); + + if(attrs_hash) { + g_hash_table_destroy(attrs_hash); + } + } + + if(crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE)) { + node_state = state; + id = crm_element_value(node_state, XML_ATTR_ID); uname = crm_element_value(node_state, XML_ATTR_UNAME); attrs = find_xml_node(node_state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); crm_debug_3("Processing node id=%s, uname=%s", id, uname); this_node = pe_find_node_id(data_set->nodes, id); if(uname == NULL) { /* error */ continue; } else if(this_node == NULL) { crm_config_warn("Node %s in status section no longer exists", uname); continue; } /* Mark the node as provisionally clean * - at least we have seen it in the current cluster's lifetime */ this_node->details->unclean = FALSE; add_node_attrs(attrs, this_node, TRUE, data_set); if(crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } crm_debug_3("determining node state"); determine_online_status(node_state, this_node, data_set); if(this_node->details->online && data_set->no_quorum_policy == no_quorum_suicide) { /* Everything else should flow from this automatically * At least until the PE becomes able to migrate off healthy resources */ pe_fence_node(data_set, this_node, "because the cluster does not have quorum"); } } } /* Now that we know all node states, we can safely handle migration ops * But, for now, only process healthy nodes * - this is necessary for the logic in bug lf#2508 to function correctly */ for(node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next(node_state)) { if(crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { continue; } id = crm_element_value(node_state, XML_ATTR_ID); this_node = pe_find_node_id(data_set->nodes, id); if(this_node == NULL) { crm_info("Node %s is unknown", id); continue; } else if(this_node->details->online) { crm_trace("Processing lrm resource entries on healthy node: %s", this_node->details->uname); lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); unpack_lrm_resources(this_node, lrm_rsc, data_set); } } /* Now handle failed nodes - but only if stonith is enabled * * By definition, offline nodes run no resources so there is nothing to do. * Only when stonith is enabled do we need to know what is on the node to * ensure rsc start events happen after the stonith */ for(node_state = __xml_first_child(status); node_state != NULL && is_set(data_set->flags, pe_flag_stonith_enabled); node_state = __xml_next(node_state)) { if(crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { continue; } id = crm_element_value(node_state, XML_ATTR_ID); this_node = pe_find_node_id(data_set->nodes, id); if(this_node == NULL || this_node->details->online) { continue; } else { crm_trace("Processing lrm resource entries on unhealthy node: %s", this_node->details->uname); lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); unpack_lrm_resources(this_node, lrm_rsc, data_set); } } return TRUE; } static gboolean determine_online_status_no_fencing(pe_working_set_t *data_set, xmlNode * node_state, node_t *this_node) { gboolean online = FALSE; const char *join_state = crm_element_value(node_state, XML_CIB_ATTR_JOINSTATE); const char *crm_state = crm_element_value(node_state, XML_CIB_ATTR_CRMDSTATE); const char *ccm_state = crm_element_value(node_state, XML_CIB_ATTR_INCCM); const char *ha_state = crm_element_value(node_state, XML_CIB_ATTR_HASTATE); const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE); if(ha_state == NULL) { ha_state = DEADSTATUS; } if(!crm_is_true(ccm_state) || safe_str_eq(ha_state, DEADSTATUS)){ crm_debug_2("Node is down: ha_state=%s, ccm_state=%s", crm_str(ha_state), crm_str(ccm_state)); } else if(safe_str_eq(crm_state, ONLINESTATUS)) { if(safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) { online = TRUE; } else { crm_debug("Node is not ready to run resources: %s", join_state); } } else if(this_node->details->expected_up == FALSE) { crm_debug_2("CRMd is down: ha_state=%s, ccm_state=%s", crm_str(ha_state), crm_str(ccm_state)); crm_debug_2("\tcrm_state=%s, join_state=%s, expected=%s", crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } else { /* mark it unclean */ pe_fence_node(data_set, this_node, "because it is partially and/or un-expectedly down"); crm_info("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } return online; } static gboolean determine_online_status_fencing(pe_working_set_t *data_set, xmlNode * node_state, node_t *this_node) { gboolean online = FALSE; gboolean do_terminate = FALSE; const char *join_state = crm_element_value(node_state, XML_CIB_ATTR_JOINSTATE); const char *crm_state = crm_element_value(node_state, XML_CIB_ATTR_CRMDSTATE); const char *ccm_state = crm_element_value(node_state, XML_CIB_ATTR_INCCM); const char *ha_state = crm_element_value(node_state, XML_CIB_ATTR_HASTATE); const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE); const char *terminate = g_hash_table_lookup(this_node->details->attrs, "terminate"); if(ha_state == NULL) { ha_state = DEADSTATUS; } if(crm_is_true(terminate)) { do_terminate = TRUE; } else if(terminate != NULL && strlen(terminate) > 0) { /* could be a time() value */ char t = terminate[0]; if(t != '0' && isdigit(t)) { do_terminate = TRUE; } } if(crm_is_true(ccm_state) && safe_str_eq(ha_state, ACTIVESTATUS) && safe_str_eq(crm_state, ONLINESTATUS)) { if(safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) { online = TRUE; if(do_terminate) { pe_fence_node(data_set, this_node, "because termination was requested"); } } else if(join_state == exp_state /* == NULL */) { crm_info("Node %s is coming up", this_node->details->uname); crm_debug("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } else if(safe_str_eq(join_state, CRMD_JOINSTATE_PENDING)) { crm_info("Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; } else if(safe_str_eq(join_state, CRMD_JOINSTATE_NACK)) { crm_warn("Node %s is not part of the cluster", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; } else if(safe_str_eq(join_state, exp_state)) { crm_info("Node %s is still coming up: %s", this_node->details->uname, join_state); crm_info("\tha_state=%s, ccm_state=%s, crm_state=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state)); this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; } else { crm_warn("Node %s (%s) is un-expectedly down", this_node->details->uname, this_node->details->id); crm_info("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); pe_fence_node(data_set, this_node, "because it is un-expectedly down"); } } else if(crm_is_true(ccm_state) == FALSE && safe_str_eq(ha_state, DEADSTATUS) && safe_str_eq(crm_state, OFFLINESTATUS) && this_node->details->expected_up == FALSE) { crm_debug("Node %s is down: join_state=%s, expected=%s", this_node->details->uname, crm_str(join_state), crm_str(exp_state)); #if 0 /* While a nice optimization, it causes the cluster to block until the node * comes back online. Which is a serious problem if the cluster software * is not configured to start at boot or stonith is configured to merely * stop the node instead of restart it. * Easily triggered by setting terminate=true for the DC */ } else if(do_terminate) { crm_info("Node %s is %s after forced termination", this_node->details->uname, crm_is_true(ccm_state)?"coming up":"going down"); crm_debug("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); if(crm_is_true(ccm_state) == FALSE) { this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; } #endif } else if(this_node->details->expected_up) { /* mark it unclean */ pe_fence_node(data_set, this_node, "because it is un-expectedly down"); crm_info("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } else { crm_info("Node %s is down", this_node->details->uname); crm_debug("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } return online; } gboolean determine_online_status( xmlNode * node_state, node_t *this_node, pe_working_set_t *data_set) { gboolean online = FALSE; const char *shutdown = NULL; const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE); if(this_node == NULL) { crm_config_err("No node to check"); return online; } this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN); if(shutdown != NULL && safe_str_neq("0", shutdown)) { this_node->details->shutdown = TRUE; } else if(safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) { this_node->details->expected_up = TRUE; } if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { online = determine_online_status_no_fencing( data_set, node_state, this_node); } else { online = determine_online_status_fencing( data_set, node_state, this_node); } if(online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if(online && this_node->details->shutdown) { /* dont run resources here */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if(this_node->details->unclean) { pe_proc_warn("Node %s is unclean", this_node->details->uname); } else if(this_node->details->online) { crm_info("Node %s is %s", this_node->details->uname, this_node->details->shutdown?"shutting down": this_node->details->pending?"pending": this_node->details->standby?"standby":"online"); } else { crm_debug_2("Node %s is offline", this_node->details->uname); } return online; } #define set_char(x) last_rsc_id[lpc] = x; complete = TRUE; char * clone_zero(const char *last_rsc_id) { int lpc = 0; char *zero = NULL; CRM_CHECK(last_rsc_id != NULL, return NULL); if(last_rsc_id != NULL) { lpc = strlen(last_rsc_id); } while(--lpc > 0) { switch(last_rsc_id[lpc]) { case 0: return NULL; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': crm_malloc0(zero, lpc + 3); memcpy(zero, last_rsc_id, lpc); zero[lpc] = ':'; zero[lpc+1] = '0'; zero[lpc+2] = 0; return zero; } } return NULL; } char * increment_clone(char *last_rsc_id) { int lpc = 0; int len = 0; char *tmp = NULL; gboolean complete = FALSE; CRM_CHECK(last_rsc_id != NULL, return NULL); if(last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len-1; while(complete == FALSE && lpc > 0) { switch (last_rsc_id[lpc]) { case 0: lpc--; break; case '0': set_char('1'); break; case '1': set_char('2'); break; case '2': set_char('3'); break; case '3': set_char('4'); break; case '4': set_char('5'); break; case '5': set_char('6'); break; case '6': set_char('7'); break; case '7': set_char('8'); break; case '8': set_char('9'); break; case '9': last_rsc_id[lpc] = '0'; lpc--; break; case ':': tmp = last_rsc_id; crm_malloc0(last_rsc_id, len + 2); memcpy(last_rsc_id, tmp, len); last_rsc_id[++lpc] = '1'; last_rsc_id[len] = '0'; last_rsc_id[len+1] = 0; complete = TRUE; crm_free(tmp); break; default: crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc); break; } } return last_rsc_id; } static int get_clone(char *last_rsc_id) { int clone = 0; int lpc = 0; int len = 0; CRM_CHECK(last_rsc_id != NULL, return -1); if(last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len-1; while(lpc > 0) { switch (last_rsc_id[lpc]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': clone += (int)(last_rsc_id[lpc] - '0') * (len - lpc); lpc--; break; case ':': return clone; break; default: crm_err("Unexpected char: %d (%c)", lpc, last_rsc_id[lpc]); return clone; break; } } return -1; } static resource_t * create_fake_resource(const char *rsc_id, xmlNode *rsc_entry, pe_working_set_t *data_set) { resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if(!common_unpack(xml_rsc, &rsc, NULL, data_set)) { return NULL; } set_bit(rsc->flags, pe_rsc_orphan); data_set->resources = g_list_append(data_set->resources, rsc); return rsc; } extern resource_t *create_child_clone(resource_t *rsc, int sub_id, pe_working_set_t *data_set); static resource_t *find_clone(pe_working_set_t *data_set, node_t *node, resource_t *parent, const char *rsc_id) { int len = 0; resource_t *rsc = NULL; char *base = clone_zero(rsc_id); char *alt_rsc_id = crm_strdup(rsc_id); CRM_ASSERT(parent != NULL); CRM_ASSERT(parent->variant == pe_clone || parent->variant == pe_master); if(base) { len = strlen(base); } if(len > 0) { base[len-1] = 0; } crm_debug_3("Looking for %s on %s in %s %d", rsc_id, node->details->uname, parent->id, is_set(parent->flags, pe_rsc_unique)); if(is_set(parent->flags, pe_rsc_unique)) { crm_debug_3("Looking for %s", rsc_id); rsc = parent->fns->find_rsc(parent, rsc_id, NULL, pe_find_current); } else { crm_trace("Looking for %s on %s", base, node->details->uname); rsc = parent->fns->find_rsc(parent, base, node, pe_find_partial|pe_find_current); if(rsc != NULL && rsc->running_on) { GListPtr gIter = parent->children; rsc = NULL; crm_debug_3("Looking for an existing orphan for %s: %s on %s", parent->id, rsc_id, node->details->uname); /* There is already an instance of this _anonymous_ clone active on "node". * * If there is a partially active orphan (only applies to clone groups) on * the same node, use that. * Otherwise create a new (orphaned) instance at "orphan_check:". */ for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; node_t *loc = child->fns->location(child, NULL, TRUE); if(loc && loc->details == node->details) { resource_t *tmp = child->fns->find_rsc(child, base, NULL, pe_find_partial|pe_find_current); if(tmp && tmp->running_on == NULL) { rsc = tmp; break; } } } goto orphan_check; } else if(((resource_t*)parent->children->data)->variant == pe_group) { /* If we're grouped, we need to look for a peer thats active on $node * and use their clone instance number */ resource_t *peer = parent->fns->find_rsc(parent, NULL, node, pe_find_clone|pe_find_current); if(peer && peer->running_on) { char buffer[256]; int clone_num = get_clone(peer->id); snprintf(buffer, 256, "%s%d", base, clone_num); rsc = parent->fns->find_rsc(parent, buffer, node, pe_find_current|pe_find_inactive); if(rsc) { crm_trace("Found someone active: %s on %s, becoming %s", peer->id, ((node_t*)peer->running_on->data)->details->uname, buffer); } } } while(rsc == NULL) { rsc = parent->fns->find_rsc(parent, alt_rsc_id, NULL, pe_find_current); if(rsc == NULL) { crm_trace("Unknown resource: %s", alt_rsc_id); break; } if(rsc->running_on == NULL) { crm_trace("Resource %s: just right", alt_rsc_id); break; } crm_trace("Resource %s: already active", alt_rsc_id); alt_rsc_id = increment_clone(alt_rsc_id); rsc = NULL; } } orphan_check: if(rsc == NULL) { /* Create an extra orphan */ resource_t *top = create_child_clone(parent, -1, data_set); crm_debug("Created orphan for %s: %s on %s", parent->id, rsc_id, node->details->uname); rsc = top->fns->find_rsc(top, base, NULL, pe_find_current|pe_find_partial); CRM_ASSERT(rsc != NULL); } crm_free(rsc->clone_name); rsc->clone_name = NULL; if(safe_str_neq(rsc_id, rsc->id)) { crm_info("Internally renamed %s on %s to %s%s", rsc_id, node->details->uname, rsc->id, is_set(rsc->flags, pe_rsc_orphan)?" (ORPHAN)":""); rsc->clone_name = crm_strdup(rsc_id); } crm_free(alt_rsc_id); crm_free(base); return rsc; } static resource_t * unpack_find_resource( pe_working_set_t *data_set, node_t *node, const char *rsc_id, xmlNode *rsc_entry) { resource_t *rsc = NULL; resource_t *clone_parent = NULL; char *alt_rsc_id = crm_strdup(rsc_id); crm_debug_2("looking for %s", rsc_id); rsc = pe_find_resource(data_set->resources, alt_rsc_id); /* no match */ if(rsc == NULL) { /* Even when clone-max=0, we still create a single :0 orphan to match against */ char *tmp = clone_zero(alt_rsc_id); resource_t *clone0 = pe_find_resource(data_set->resources, tmp); clone_parent = uber_parent(clone0); crm_free(tmp); crm_debug_2("%s not found: %s", alt_rsc_id, clone_parent?clone_parent->id:"orphan"); } else { clone_parent = uber_parent(rsc); } if(clone_parent && clone_parent->variant > pe_group) { rsc = find_clone(data_set, node, clone_parent, rsc_id); CRM_ASSERT(rsc != NULL); } crm_free(alt_rsc_id); return rsc; } static resource_t * process_orphan_resource(xmlNode *rsc_entry, node_t *node, pe_working_set_t *data_set) { resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname); rsc = create_fake_resource(rsc_id, rsc_entry, data_set); if(is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } else { print_resource(LOG_DEBUG_3, "Added orphan", rsc, FALSE); CRM_CHECK(rsc != NULL, return NULL); resource_location(rsc, NULL, -INFINITY, "__orphan_dont_run__", data_set); } return rsc; } static void process_rsc_state(resource_t *rsc, node_t *node, enum action_fail_response on_fail, xmlNode *migrate_op, pe_working_set_t *data_set) { crm_debug_2("Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail)); /* process current state */ if(rsc->role != RSC_ROLE_UNKNOWN) { resource_t *iter = rsc; while(iter) { if(g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { node_t *n = node_copy(node); g_hash_table_insert(iter->known_on, (gpointer)n->details->id, n); } if(is_set(iter->flags, pe_rsc_unique)) { break; } iter = iter->parent; } } if(node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = action_fail_ignore; } switch(on_fail) { case action_fail_ignore: /* nothing to do */ break; case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean */ pe_fence_node(data_set, node, "to recover from resource failure(s)"); break; case action_fail_standby: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case action_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ clear_bit(rsc->flags, pe_rsc_managed); break; case action_fail_migrate: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__",data_set); break; case action_fail_stop: rsc->next_role = RSC_ROLE_STOPPED; break; case action_fail_recover: if(rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { set_bit(rsc->flags, pe_rsc_failed); stop_action(rsc, node, FALSE); } break; } if(rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { if(is_set(rsc->flags, pe_rsc_orphan)) { if(is_set(rsc->flags, pe_rsc_managed)) { crm_config_warn("Detected active orphan %s running on %s", rsc->id, node->details->uname); } else { crm_config_warn("Cluster configured not to stop active orphans." " %s must be stopped manually on %s", rsc->id, node->details->uname); } } native_add_running(rsc, node, data_set); if(on_fail != action_fail_ignore) { set_bit(rsc->flags, pe_rsc_failed); } } else if(rsc->clone_name) { crm_debug_2("Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); crm_free(rsc->clone_name); rsc->clone_name = NULL; } else { char *key = stop_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, node); GListPtr gIter = possible_matches; for(; gIter != NULL; gIter = gIter->next) { action_t *stop = (action_t*)gIter->data; stop->flags |= pe_action_optional; } crm_free(key); } } /* create active recurring operations as optional */ static void process_recurring(node_t *node, resource_t *rsc, int start_index, int stop_index, GListPtr sorted_op_list, pe_working_set_t *data_set) { int counter = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; crm_debug_3("%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for(; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode*)gIter->data; int interval = 0; char *key = NULL; const char *id = ID(rsc_op); const char *interval_s = NULL; counter++; if(node->details->online == FALSE) { crm_debug_4("Skipping %s/%s: node is offline", rsc->id, node->details->uname); break; /* Need to check if there's a monitor for role="Stopped" */ } else if(start_index < stop_index && counter <= stop_index) { crm_debug_4("Skipping %s/%s: resource is not active", id, node->details->uname); continue; } else if(counter < start_index) { crm_debug_4("Skipping %s/%s: old %d", id, node->details->uname, counter); continue; } interval_s = crm_element_value(rsc_op,XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if(interval == 0) { crm_debug_4("Skipping %s/%s: non-recurring", id, node->details->uname); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if(safe_str_eq(status, "-1")) { crm_debug_4("Skipping %s/%s: status", id, node->details->uname); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = generate_op_key(rsc->id, task, interval); crm_debug_3("Creating %s/%s", key, node->details->uname); custom_action(rsc, key, task, node, TRUE, TRUE, data_set); } } void calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) { int counter = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; *stop_index = -1; *start_index = -1; for(; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode*)gIter->data; counter++; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if(safe_str_eq(task, CRMD_ACTION_STOP) && safe_str_eq(status, "0")) { *stop_index = counter; } else if(safe_str_eq(task, CRMD_ACTION_START)) { *start_index = counter; } else if(*start_index <= *stop_index && safe_str_eq(task, CRMD_ACTION_STATUS)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if(safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) { *start_index = counter; } } } } static void unpack_lrm_rsc_state( node_t *node, xmlNode * rsc_entry, pe_working_set_t *data_set) { GListPtr gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; const char *task = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; xmlNode *migrate_op = NULL; xmlNode *rsc_op = NULL; enum action_fail_response on_fail = FALSE; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; crm_debug_3("[%s] Processing %s on %s", crm_element_name(rsc_entry), rsc_id, node->details->uname); /* extract operations */ op_list = NULL; sorted_op_list = NULL; for(rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { if(crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } if(op_list == NULL) { /* if there are no operations, there is nothing to do */ return; } /* find the resource */ rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); if(rsc == NULL) { rsc = process_orphan_resource(rsc_entry, node, data_set); } CRM_ASSERT(rsc != NULL); /* process operations */ saved_role = rsc->role; on_fail = action_fail_ignore; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for(gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode*)gIter->data; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); if(safe_str_eq(task, CRMD_ACTION_MIGRATED)) { migrate_op = rsc_op; } unpack_rsc_op(rsc, node, rsc_op, gIter->next, &on_fail, data_set); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail, migrate_op, data_set); if(get_target_role(rsc, &req_role)) { if(rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { crm_debug("%s: Overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); rsc->next_role = req_role; } else if(req_role > rsc->next_role) { crm_info("%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if(saved_role > rsc->role) { rsc->role = saved_role; } } gboolean unpack_lrm_resources(node_t *node, xmlNode * lrm_rsc_list, pe_working_set_t *data_set) { xmlNode *rsc_entry = NULL; CRM_CHECK(node != NULL, return FALSE); crm_debug_3("Unpacking resources on %s", node->details->uname); for(rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next(rsc_entry)) { if(crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { unpack_lrm_rsc_state(node, rsc_entry, data_set); } } return TRUE; } static void set_active(resource_t *rsc) { resource_t *top = uber_parent(rsc); if(top && top->variant == pe_master) { rsc->role = RSC_ROLE_SLAVE; } else { rsc->role = RSC_ROLE_STARTED; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { node_t *node = value; int *score = user_data; node->weight = *score; } #define STATUS_PATH_MAX 1024 static xmlNode *find_lrm_op(const char *resource, const char *op, const char *node, const char *source, pe_working_set_t *data_set) { int offset = 0; char xpath[STATUS_PATH_MAX]; offset += snprintf(xpath+offset, STATUS_PATH_MAX-offset, "//node_state[@uname='%s']", node); offset += snprintf(xpath+offset, STATUS_PATH_MAX-offset, "//"XML_LRM_TAG_RESOURCE"[@id='%s']", resource); /* Need to check against transition_magic too? */ if(source && safe_str_eq(op, CRMD_ACTION_MIGRATE)) { offset += snprintf(xpath+offset, STATUS_PATH_MAX-offset, "/"XML_LRM_TAG_RSC_OP"[@operation='%s' and @migrate_target='%s']", op, source); } else if(source && safe_str_eq(op, CRMD_ACTION_MIGRATED)) { offset += snprintf(xpath+offset, STATUS_PATH_MAX-offset, "/"XML_LRM_TAG_RSC_OP"[@operation='%s' and @migrate_source='%s']", op, source); } else { offset += snprintf(xpath+offset, STATUS_PATH_MAX-offset, "/"XML_LRM_TAG_RSC_OP"[@operation='%s']", op); } return get_xpath_object(xpath, data_set->input, LOG_DEBUG); } gboolean unpack_rsc_op(resource_t *rsc, node_t *node, xmlNode *xml_op, GListPtr next, enum action_fail_response *on_fail, pe_working_set_t *data_set) { int task_id = 0; const char *id = NULL; const char *key = NULL; const char *task = NULL; const char *magic = NULL; const char *actual_rc = NULL; /* const char *target_rc = NULL; */ const char *task_status = NULL; const char *interval_s = NULL; const char *op_version = NULL; int interval = 0; int task_status_i = -2; int actual_rc_i = 0; int target_rc = -1; int last_failure = 0; action_t *action = NULL; node_t *effective_node = NULL; resource_t *failed = NULL; gboolean expired = FALSE; gboolean is_probe = FALSE; gboolean clear_past_failure = FALSE; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(node != NULL, return FALSE); CRM_CHECK(xml_op != NULL, return FALSE); id = ID(xml_op); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); task_status = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC); key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id); CRM_CHECK(id != NULL, return FALSE); CRM_CHECK(task != NULL, return FALSE); CRM_CHECK(task_status != NULL, return FALSE); task_status_i = crm_parse_int(task_status, NULL); CRM_CHECK(task_status_i <= LRM_OP_ERROR, return FALSE); CRM_CHECK(task_status_i >= LRM_OP_PENDING, return FALSE); if(safe_str_eq(task, CRMD_ACTION_NOTIFY)) { /* safe to ignore these */ return TRUE; } if(rsc->failure_timeout > 0) { int last_run = 0; if(crm_element_value_int(xml_op, "last-rc-change", &last_run) == 0) { time_t now = get_timet_now(data_set); if(now > (last_run + rsc->failure_timeout)) { expired = TRUE; } } } crm_debug_2("Unpacking task %s/%s (call_id=%d, status=%s) on %s (role=%s)", id, task, task_id, task_status, node->details->uname, role2text(rsc->role)); interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if(interval == 0 && safe_str_eq(task, CRMD_ACTION_STATUS)) { is_probe = TRUE; } if(node->details->unclean) { crm_debug_2("Node %s (where %s is running) is unclean." " Further action depends on the value of the stop's on-fail attribue", node->details->uname, rsc->id); } actual_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); CRM_CHECK(actual_rc != NULL, return FALSE); actual_rc_i = crm_parse_int(actual_rc, NULL); if(key) { int dummy = 0; char *dummy_string = NULL; decode_transition_key(key, &dummy_string, &dummy, &dummy, &target_rc); crm_free(dummy_string); } if(task_status_i == LRM_OP_DONE && target_rc >= 0) { if(target_rc == actual_rc_i) { task_status_i = LRM_OP_DONE; } else { task_status_i = LRM_OP_ERROR; crm_debug("%s on %s returned %d (%s) instead of the expected value: %d (%s)", id, node->details->uname, actual_rc_i, execra_code2string(actual_rc_i), target_rc, execra_code2string(target_rc)); } } else if(task_status_i == LRM_OP_ERROR) { /* let us decide that */ task_status_i = LRM_OP_DONE; } if(task_status_i == LRM_OP_NOTSUPPORTED) { actual_rc_i = EXECRA_UNIMPLEMENT_FEATURE; } if(task_status_i != actual_rc_i && rsc->failure_timeout > 0 && get_failcount(node, rsc, &last_failure, data_set) == 0) { if(last_failure > 0) { action_t *clear_op = NULL; clear_op = custom_action( rsc, crm_concat(rsc->id, CRM_OP_CLEAR_FAILCOUNT, '_'), CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set); add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); crm_notice("Clearing expired failcount for %s on %s", rsc->id, node->details->uname); } } if(expired && actual_rc_i != EXECRA_NOT_RUNNING && actual_rc_i != EXECRA_RUNNING_MASTER && actual_rc_i != EXECRA_OK) { crm_notice("Ignoring expired failure %s (rc=%d, magic=%s) on %s", id, actual_rc_i, magic, node->details->uname); goto done; } /* we could clean this up significantly except for old LRMs and CRMs that * didnt include target_rc and liked to remap status */ switch(actual_rc_i) { case EXECRA_NOT_RUNNING: if(is_probe || target_rc == actual_rc_i) { task_status_i = LRM_OP_DONE; rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } else if(safe_str_neq(task, CRMD_ACTION_STOP)) { task_status_i = LRM_OP_ERROR; } break; case EXECRA_RUNNING_MASTER: if(is_probe) { task_status_i = LRM_OP_DONE; crm_notice("Operation %s found resource %s active in master mode on %s", id, rsc->id, node->details->uname); } else if(target_rc == actual_rc_i) { /* nothing to do */ } else if(target_rc >= 0) { task_status_i = LRM_OP_ERROR; /* legacy code for pre-0.6.5 operations */ } else if(safe_str_neq(task, CRMD_ACTION_STATUS) || rsc->role != RSC_ROLE_MASTER) { task_status_i = LRM_OP_ERROR; if(rsc->role != RSC_ROLE_MASTER) { crm_err("%s reported %s in master mode on %s", id, rsc->id, node->details->uname); } } rsc->role = RSC_ROLE_MASTER; break; case EXECRA_FAILED_MASTER: rsc->role = RSC_ROLE_MASTER; task_status_i = LRM_OP_ERROR; break; case EXECRA_UNIMPLEMENT_FEATURE: if(interval > 0) { task_status_i = LRM_OP_NOTSUPPORTED; break; } /* else: fall through */ case EXECRA_INSUFFICIENT_PRIV: case EXECRA_NOT_INSTALLED: case EXECRA_INVALID_PARAM: effective_node = node; /* fall through */ case EXECRA_NOT_CONFIGURED: failed = rsc; if(is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(failed); } do_crm_log(actual_rc_i==EXECRA_NOT_INSTALLED?LOG_NOTICE:LOG_ERR, "Hard error - %s failed with rc=%d: Preventing %s from re-starting %s %s", id, actual_rc_i, failed->id, effective_node?"on":"anywhere", effective_node?effective_node->details->uname:"in the cluster"); resource_location(failed, effective_node, -INFINITY, "hard-error", data_set); if(is_probe) { /* treat these like stops */ task = CRMD_ACTION_STOP; task_status_i = LRM_OP_DONE; crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); if(actual_rc_i != EXECRA_NOT_INSTALLED || is_set(data_set->flags, pe_flag_symmetric_cluster)) { if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) { add_node_copy(data_set->failed, xml_op); } } } break; case EXECRA_OK: if(is_probe && target_rc == 7) { task_status_i = LRM_OP_DONE; crm_notice("Operation %s found resource %s active on %s", id, rsc->id, node->details->uname); /* legacy code for pre-0.6.5 operations */ } else if(target_rc < 0 && interval > 0 && rsc->role == RSC_ROLE_MASTER) { /* catch status ops that return 0 instead of 8 while they * are supposed to be in master mode */ task_status_i = LRM_OP_ERROR; } break; default: if(task_status_i == LRM_OP_DONE) { crm_info("Remapping %s (rc=%d) on %s to an ERROR", id, actual_rc_i, node->details->uname); task_status_i = LRM_OP_ERROR; } } if(task_status_i == LRM_OP_ERROR || task_status_i == LRM_OP_TIMEOUT || task_status_i == LRM_OP_NOTSUPPORTED) { action = custom_action(rsc, crm_strdup(id), task, NULL, TRUE, FALSE, data_set); if(expired) { crm_notice("Ignoring expired failure (calculated) %s (rc=%d, magic=%s) on %s", id, actual_rc_i, magic, node->details->uname); goto done; } else if(action->on_fail == action_fail_ignore) { crm_warn("Remapping %s (rc=%d) on %s to DONE: ignore", id, actual_rc_i, node->details->uname); task_status_i = LRM_OP_DONE; set_bit(rsc->flags, pe_rsc_failure_ignored); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) { add_node_copy(data_set->failed, xml_op); } } } switch(task_status_i) { case LRM_OP_PENDING: if(safe_str_eq(task, CRMD_ACTION_START)) { set_bit(rsc->flags, pe_rsc_start_pending); set_active(rsc); } else if(safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } /* * Intentionally ignoring pending migrate ops here; * haven't decided if we need to do anything special * with them yet... */ break; case LRM_OP_DONE: crm_debug_3("%s/%s completed on %s", rsc->id, task, node->details->uname); if(actual_rc_i == EXECRA_NOT_RUNNING) { clear_past_failure = TRUE; } else if(safe_str_eq(task, CRMD_ACTION_START)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if(safe_str_eq(task, CRMD_ACTION_STOP)) { rsc->role = RSC_ROLE_STOPPED; clear_past_failure = TRUE; } else if(safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; clear_past_failure = TRUE; } else if(safe_str_eq(task, CRMD_ACTION_DEMOTE)) { rsc->role = RSC_ROLE_SLAVE; clear_past_failure = TRUE; } else if(safe_str_eq(task, CRMD_ACTION_MIGRATED)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if(safe_str_eq(task, CRMD_ACTION_MIGRATE)) { /* * The normal sequence is (now): migrate_to(Src) -> migrate_from(Tgt) -> stop(Src) * * So if a migrate_to is followed by a stop, then we dont need to care what * happended on the target node * * Without the stop, we need to look for a successful migrate_from. * This would also imply we're no longer running on the source * * Without the stop, and without a migrate_from op we make sure the resource * gets stopped on both source and target (assuming the target is up) * */ int stop_id = 0; xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->id, NULL, data_set); if(stop_op) { crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id); } if(stop_op == NULL || stop_id < task_id) { int from_rc = 0, from_status = 0; const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); node_t *target = pe_find_node(data_set->nodes, migrate_target); xmlNode *migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source, data_set); rsc->role = RSC_ROLE_STARTED; /* can be master? */ if(migrate_from) { crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc); crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status); crm_trace("%s op on %s exited with status=%d, rc=%d", ID(migrate_from), migrate_target, from_status, from_rc); } if(migrate_from && from_rc == EXECRA_OK && from_status == LRM_OP_DONE) { crm_trace("Detected dangling migration op: %s on %s", ID(xml_op), migrate_source); /* all good * just need to arrange for the stop action to get sent * but _without_ affecting the target somehow */ rsc->role = RSC_ROLE_STOPPED; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } else if(migrate_from) { /* Failed */ crm_trace("Marking active on %s %p %d", migrate_target, target, target->details->online); if(target && target->details->online) { native_add_running(rsc, target, data_set); } } else { /* Pending or complete but erased */ node_t *target = pe_find_node_id(data_set->nodes, migrate_target); crm_trace("Marking active on %s %p %d", migrate_target, target, target->details->online); if(target && target->details->online) { /* TODO: One day, figure out how to complete the migration * For now, consider it active in both locations so it gets stopped everywhere */ native_add_running(rsc, target, data_set); } else { /* Consider it failed here - forces a restart, prevents migration */ set_bit_inplace(rsc->flags, pe_rsc_failed); } } } } else if(rsc->role < RSC_ROLE_STARTED) { /* start, migrate_to and migrate_from will land here */ crm_debug_3("%s active on %s", rsc->id, node->details->uname); set_active(rsc); } /* clear any previous failure actions */ if(clear_past_failure) { switch(*on_fail) { case action_fail_block: case action_fail_stop: case action_fail_fence: case action_fail_migrate: case action_fail_standby: crm_debug_2("%s.%s is not cleared by a completed stop", rsc->id, fail2text(*on_fail)); break; case action_fail_ignore: case action_fail_recover: *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } } break; case LRM_OP_ERROR: case LRM_OP_TIMEOUT: case LRM_OP_NOTSUPPORTED: crm_warn("Processing failed op %s on %s: %s (%d)", id, node->details->uname, execra_code2string(actual_rc_i), actual_rc_i); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) { add_node_copy(data_set->failed, xml_op); } if(*on_fail < action->on_fail) { *on_fail = action->on_fail; } if(safe_str_eq(task, CRMD_ACTION_STOP)) { resource_location( rsc, node, -INFINITY, "__stop_fail__", data_set); } else if(safe_str_eq(task, CRMD_ACTION_MIGRATED)) { int stop_id = 0; int migrate_id = 0; const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_source, NULL, data_set); xmlNode *migrate_op = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, migrate_source, migrate_target, data_set); if(stop_op) { crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id); } if(migrate_op) { crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id); } /* Get our state right */ rsc->role = RSC_ROLE_STARTED; /* can be master? */ if(stop_op == NULL || stop_id < migrate_id) { node_t *source = pe_find_node(data_set->nodes, migrate_source); if(source && source->details->online) { native_add_running(rsc, source, data_set); } } } else if(safe_str_eq(task, CRMD_ACTION_MIGRATE)) { int stop_id = 0; int migrate_id = 0; const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_target, NULL, data_set); xmlNode *migrate_op = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source, data_set); if(stop_op) { crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id); } if(migrate_op) { crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id); } /* Get our state right */ rsc->role = RSC_ROLE_STARTED; /* can be master? */ if(stop_op == NULL || stop_id < migrate_id) { node_t *target = pe_find_node(data_set->nodes, migrate_target); crm_trace("Stop: %p %d, Migrated: %p %d", stop_op, stop_id, migrate_op, migrate_id); if(target && target->details->online) { native_add_running(rsc, target, data_set); } } else if(migrate_op == NULL) { /* Make sure it gets cleaned up, the stop may pre-date the migrate_from */ rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } } else if(safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if(safe_str_eq(task, CRMD_ACTION_DEMOTE)) { /* * staying in role=master ends up putting the PE/TE into a loop * setting role=slave is not dangerous because no master will be * promoted until the failed resource has been fully stopped */ crm_warn("Forcing %s to stop after a failed demote action", rsc->id); rsc->next_role = RSC_ROLE_STOPPED; rsc->role = RSC_ROLE_SLAVE; } else if(compare_version("2.0", op_version) > 0 && safe_str_eq(task, CRMD_ACTION_START)) { crm_warn("Compatibility handling for failed op %s on %s", id, node->details->uname); resource_location( rsc, node, -INFINITY, "__legacy_start__", data_set); } if(rsc->role < RSC_ROLE_STARTED) { set_active(rsc); } crm_debug_2("Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s", rsc->id, role2text(rsc->role), node->details->unclean?"true":"false", fail2text(action->on_fail), role2text(action->fail_role)); if(action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { rsc->next_role = action->fail_role; } if(action->fail_role == RSC_ROLE_STOPPED) { int score = -INFINITY; crm_err("Making sure %s doesn't come up again", rsc->id); /* make sure it doesnt come up again */ g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = node_hash_from_list(data_set->nodes); g_hash_table_foreach(rsc->allowed_nodes, set_node_score, &score); } pe_free_action(action); action = NULL; break; case LRM_OP_CANCELLED: /* do nothing?? */ pe_err("Dont know what to do for cancelled ops yet"); break; } done: crm_debug_3("Resource %s after %s: role=%s", rsc->id, task, role2text(rsc->role)); pe_free_action(action); return TRUE; } gboolean add_node_attrs(xmlNode *xml_obj, node_t *node, gboolean overwrite, pe_working_set_t *data_set) { g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_UNAME), crm_strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_ID), crm_strdup(node->details->id)); if(safe_str_eq(node->details->id, data_set->dc_uuid)) { data_set->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_DC), crm_strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_DC), crm_strdup(XML_BOOLEAN_FALSE)); } unpack_instance_attributes( data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, node->details->attrs, NULL, overwrite, data_set->now); return TRUE; } static GListPtr extract_operations(const char *node, const char *rsc, xmlNode *rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GListPtr gIter = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for(rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { if(crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if(op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if(active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for(gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode*)gIter->data; counter++; if(start_index < stop_index) { crm_debug_4("Skipping %s: not active", ID(rsc_entry)); break; } else if(counter < start_index) { crm_debug_4("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GListPtr find_operations( const char *rsc, const char *node, gboolean active_filter, pe_working_set_t *data_set) { GListPtr output = NULL; GListPtr intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE); const char *uname = NULL; node_t *this_node = NULL; xmlNode *node_state = NULL; for(node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next(node_state)) { if(crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { uname = crm_element_value(node_state, XML_ATTR_UNAME); if(node != NULL && safe_str_neq(uname, node)) { continue; } this_node = pe_find_node(data_set->nodes, uname); CRM_CHECK(this_node != NULL, continue); determine_online_status(node_state, this_node, data_set); if(this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); for(lrm_rsc = __xml_first_child(tmp); lrm_rsc != NULL; lrm_rsc = __xml_next(lrm_rsc)) { if(crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) { const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if(rsc != NULL && safe_str_neq(rsc_id, rsc)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; } diff --git a/pacemaker.spec.in b/pacemaker.spec.in index 9d4432e55f..63f1f8d1b4 100644 --- a/pacemaker.spec.in +++ b/pacemaker.spec.in @@ -1,703 +1,704 @@ %global gname haclient %global uname hacluster %global pcmk_docdir %{_docdir}/%{name} %global specversion 0 %global upstream_version tip %global upstream_prefix pacemaker # Compatibility macros for distros (fedora) that don't provide Python macros by default # Do this instead of trying to conditionally include {_rpmconfigdir}/macros.python %{!?py_ver: %{expand: %%global py_ver %%(echo `python -c "import sys; print sys.version[:3]"`)}} %{!?py_prefix: %{expand: %%global py_prefix %%(echo `python -c "import sys; print sys.prefix"`)}} %{!?py_libdir: %{expand: %%global py_libdir %%{expand:%%%%{py_prefix}/%%%%{_lib}/python%%%%{py_ver}}}} %{!?py_sitedir: %{expand: %%global py_sitedir %%{expand:%%%%{py_libdir}/site-packages}}} # Compatibility macro wrappers for legacy RPM versions that do not # support conditional builds %{!?bcond_without: %{expand: %%global bcond_without() %%{expand:%%%%{!?_without_%%{1}:%%%%global with_%%{1} 1}}}} %{!?bcond_with: %{expand: %%global bcond_with() %%{expand:%%%%{?_with_%%{1}:%%%%global with_%%{1} 1}}}} %{!?with: %{expand: %%global with() %%{expand:%%%%{?with_%%{1}:1}%%%%{!?with_%%{1}:0}}}} %{!?without: %{expand: %%global without() %%{expand:%%%%{?with_%%{1}:0}%%%%{!?with_%%{1}:1}}}} # Conditionals # Invoke "rpmbuild --without " or "rpmbuild --with " # to disable or enable specific features # Supported cluster stacks, must support at least one %bcond_with cman %bcond_without doc %bcond_without ais %bcond_without heartbeat # ESMTP is not available in RHEL, only in EPEL. Allow people to build # the RPM without ESMTP in case they choose not to use EPEL packages %bcond_without esmtp %bcond_without snmp # Build with/without support for profiling tools # 'profiling' also disables -debuginfo package creation and also the stripping binaries/libraries # Useful if you want sane profile data %bcond_with profiling %bcond_with gcov # Support additional trace logging %bcond_without tracedata # We generate some docs using Publican, but its not available everywhere %bcond_without publican # Use a different versioning scheme %bcond_with pre_release %if %{with profiling} %global debug_package %{nil} %endif %if %{with pre_release} %global pcmk_release 0.%{specversion}.%{upstream_version}.hg %else %global pcmk_release %{specversion} %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: 1.1.6 Release: %{pcmk_release}%{?dist} License: GPLv2+ and LGPLv2+ Url: http://www.clusterlabs.org Group: System Environment/Daemons Source0: pacemaker-%{upstream_version}.tar.bz2 BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) AutoReqProv: on Requires(pre): cluster-glue Requires: resource-agents Requires: python >= 2.4 Conflicts: heartbeat < 2.99 Requires: %{name}-cli = %{version}-%{release} Requires: %{name}-libs = %{version}-%{release} %if 0%{?fedora} || 0%{?centos} > 4 || 0%{?rhel} > 4 Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) BuildRequires: help2man libtool-ltdl-devel %endif %if 0%{?suse_version} # net-snmp-devel on SLES10 does not suck in tcpd-devel automatically BuildRequires: help2man tcpd-devel # Suse splits this off into a separate package Requires: python-curses python-xml BuildRequires: python-curses python-xml %endif # Required for core functionality BuildRequires: automake autoconf libtool pkgconfig python BuildRequires: glib2-devel cluster-glue-libs-devel libxml2-devel libxslt-devel BuildRequires: pkgconfig python-devel gcc-c++ bzip2-devel gnutls-devel pam-devel libtool-ltdl-devel # Enables optional functionality BuildRequires: ncurses-devel openssl-devel libselinux-devel docbook-style-xsl resource-agents %ifarch alpha %{ix86} x86_64 BuildRequires: lm_sensors-devel %endif %if %{with cman} BuildRequires: clusterlib-devel %endif %if %{with esmtp} BuildRequires: libesmtp-devel %endif %if %{with snmp} BuildRequires: net-snmp-devel %endif %if %{with ais} # Do not require corosync, the admin should select which stack to use and install it BuildRequires: corosynclib-devel %endif %if %{with heartbeat} # Do not require heartbeat, the admin should select which stack to use and install it BuildRequires: heartbeat-devel heartbeat-libs >= 3.0.0 %endif %if %{with doc} BuildRequires: asciidoc help2man %if %{with publican} BuildRequires: publican inkscape %endif %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --without : heartbeat ais snmp esmtp publican %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling the Pacemaker cluster resource manager Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. %package -n %{name}-libs License: GPLv2+ and LGPLv2+ Summary: Libraries used by the Pacemaker cluster resource manager and its clients Group: System Environment/Daemons %description -n %{name}-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. %package -n %{name}-libs-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{name}-libs = %{version}-%{release} Requires: cluster-glue-libs-devel libtool-ltdl-devel Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel %if %{with ais} Requires: corosynclib-devel %endif %if %{with heartbeat} Requires: heartbeat-devel %endif %description -n %{name}-libs-devel Headers and shared libraries for developing tools for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: python %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: GPLv2+ and LGPLv2+ Summary: Documentation for Pacemaker Group: Documentation %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. %prep %setup -q -n %{upstream_prefix}-%{upstream_version} # Force the local time # # 'hg archive' sets the file date to the date of the last commit. # This can result in files having been created in the future # when building on machines in timezones 'behind' the one the # commit occurred in - which seriously confuses 'make' find . -exec touch \{\} \; %build ./autogen.sh %if %{with snmp} eval `objdump --headers --private-headers /usr/bin/perl | grep RPATH | awk '{print "export LD_LIBRARY_PATH="$2}'` %endif # RHEL <= 5 does not support --docdir docdir=%{pcmk_docdir} %{configure} \ %{!?with_heartbeat: --without-heartbeat} \ %{!?with_ais: --without-ais} \ %{!?with_esmtp: --without-esmtp} \ %{!?with_snmp: --without-snmp} \ %{?with_cman: --with-cman} \ %{?with_profiling: --with-profiling} \ %{?with_gcov: --with-gcov} \ %{?with_tracedata: --with-tracedata} \ --with-initdir=%{_initrddir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} \ --enable-fatal-warnings=no make %{_smp_mflags} docdir=%{pcmk_docdir} %install rm -rf %{buildroot} make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} install mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker # Scripts that need should be executable chmod a+x %{buildroot}/%{_libdir}/heartbeat/hb2openais-helper.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/extracttests.py # These are not actually scripts find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x # Dont package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # Do not package these either rm -f %{buildroot}/%{_libdir}/heartbeat/crm_primitive.* rm -f %{buildroot}/%{_libdir}/heartbeat/atest rm -f %{buildroot}/%{_libdir}/service_crm.so %if %{with gcov} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %clean rm -rf %{buildroot} %post /sbin/chkconfig --add pacemaker || : %preun if [ $1 -eq 0 ]; then /sbin/service pacemaker stop &>/dev/null || : /sbin/chkconfig --del pacemaker || : fi %post -n %{name}-libs -p /sbin/ldconfig %postun -n %{name}-libs -p /sbin/ldconfig %files ########################################################### %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %{_initrddir}/pacemaker %if %{defined _unitdir} %{_unitdir}/pacemaker.service %endif %{_datadir}/pacemaker %{_datadir}/snmp/mibs/PCMK-MIB.txt %{_libdir}/heartbeat/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/attrd_updater %{_sbindir}/fence_legacy %{_sbindir}/stonith_admin %doc COPYING %doc AUTHORS %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine %dir %attr (750, %{uname}, %{gname}) %{_var}/run/crm %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %if %{with ais} %{_libexecdir}/lcrso/pacemaker.lcrso %endif %files cli %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm %{_sbindir}/crm_resource %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/ptest %{_sbindir}/crm_shadow %{_sbindir}/cibpipe %{_sbindir}/crm_node %{_sbindir}/crm_simulate %{_sbindir}/crm_report +%{_sbindir}/crm_ticket %{py_sitedir}/crm %doc %{_mandir} %if %{with heartbeat} %{_sbindir}/crm_uuid %else %exclude %{_sbindir}/crm_uuid %endif %doc COPYING %doc AUTHORS %doc ChangeLog %files -n %{name}-libs %defattr(-,root,root) %{_libdir}/libcib.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libcrmcluster.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpengine.so.* %{_libdir}/libtransitioner.so.* %{_libdir}/libstonithd.so.* %doc COPYING.LIB %doc AUTHORS %files doc %defattr(-,root,root) %doc %{pcmk_docdir} %files cts %defattr(-,root,root) %{py_sitedir}/cts %{_datadir}/pacemaker/tests/cts %doc COPYING.LIB %doc AUTHORS %files -n %{name}-libs-devel %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests/cts %{_datadir}/pacemaker/tests %{_includedir}/pacemaker %{_libdir}/*.so %if %{with gcov} %dir %{_var}/lib/pacemaker %{_var}/lib/pacemaker %endif %doc COPYING.LIB %doc AUTHORS %changelog * Fri Feb 11 2011 Andrew Beekhof 1.1.5-1 - Update source tarball to revision: baad6636a053 - Statistics: Changesets: 184 Diff: 605 files changed, 46103 insertions(+), 26417 deletions(-) - See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details * Wed Oct 20 2010 Andrew Beekhof 1.1.4-1 - Moved all the interesting parts of the changelog into a separate file as per the Fedora policy :-/ - Update source tarball to revision: 75406c3eb2c1 tip - Significant performance enhancements to the Policy Engine and CIB - Statistics: Changesets: 169 Diff: 772 files changed, 56172 insertions(+), 39309 deletions(-) - See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details * Tue Sep 21 2010 Andrew Beekhof 1.1.3-1 - Update source tarball to revision: e3bb31c56244 tip - Statistics: Changesets: 352 Diff: 481 files changed, 14130 insertions(+), 11156 deletions(-) * Wed May 12 2010 Andrew Beekhof 1.1.2-1 - Update source tarball to revision: c25c972a25cc tip - Statistics: Changesets: 339 Diff: 708 files changed, 37918 insertions(+), 10584 deletions(-) * Tue Feb 16 2010 Andrew Beekhof - 1.1.1-1 - First public release of Pacemaker 1.1 - Package reference documentation in a doc subpackage - Move cts into a subpackage so that it can be easily consumed by others - Update source tarball to revision: 17d9cd4ee29f + New stonith daemon that supports global notifications + Service placement influenced by the physical resources + A new tool for simulating failures and the cluster’s reaction to them + Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations) * Wed Feb 10 2010 Andrew Beekhof - 1.0.7-4 - Rebuild for heartbeat 3.0.2-2 * Wed Feb 10 2010 Andrew Beekhof - 1.0.7-3 - Rebuild for cluster-glue 1.0.3 * Tue Jan 19 2010 Andrew Beekhof - 1.0.7-2 - Rebuild for corosync 1.2.0 * Mon Jan 18 2010 Andrew Beekhof - 1.0.7-1 - Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip - Statistics: Changesets: 193 Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-) * Thu Oct 29 2009 Andrew Beekhof - 1.0.5-4 - Include the fixes from CoroSync integration testing - Move the resource templates - they are not documentation - Ensure documentation is placed in a standard location - Exclude documentation that is included elsewhere in the package - Update the tarball from upstream to version ee19d8e83c2a + High: cib: Correctly clean up when both plaintext and tls remote ports are requested + High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + High: PE: Make sure promote/demote pseudo actions are created correctly + High: PE: Prevent target-role from promoting more than master-max instances + High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + High: ais: Prevent deadlock - dont try to release IPC message if the connection failed + High: cib: For validation errors, send back the full CIB so the client can display the errors + High: cib: Prevent use-after-free for remote plaintext connections + High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat * Wed Oct 13 2009 Andrew Beekhof - 1.0.5-3 - Update the tarball from upstream to version 38cd629e5c3c + High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + High: PE: Bug lf#2170 - stop-all-resources option had no effect + High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not + High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined + High: PE: do not include master score if it would prevent allocation + High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + High: ais: Gracefully handle changes to the AIS nodeid + High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + High: crmd: Prevent use-after-free with LOG_DEBUG_3 + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + Medium: PE: Bug lf#2178 - Indicate unmanaged clones + Medium: PE: Bug lf#2180 - Include node information for all failed ops + Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + Medium: PE: Correctly log resources that would like to start but can not + Medium: PE: Stop ptest from logging to syslog + Medium: ais: Include version details in plugin name + Medium: crmd: Requery the resource metadata after every start operation * Fri Aug 21 2009 Tomas Mraz - 1.0.5-2.1 - rebuilt with new openssl * Wed Aug 19 2009 Andrew Beekhof - 1.0.5-2 - Add versioned perl dependency as specified by https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl - No longer remove RPATH data, it prevents us finding libperl.so and no other libraries were being hardcoded - Compile in support for heartbeat - Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements depending on which stacks are supported * Mon Aug 17 2009 Andrew Beekhof - 1.0.5-1 - Add dependency on resource-agents - Use the version of the configure macro that supplies --prefix, --libdir, etc - Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + High: Tools: crm_resource - Advertise --move instead of --migrate + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches * Tue Aug 11 2009 Ville Skyttä - 1.0.5-0.7.c9120a53a6ae.hg - Use bzipped upstream tarball. * Wed Jul 29 2009 Andrew Beekhof - 1.0.5-0.6.c9120a53a6ae.hg - Add back missing build auto* dependancies - Minor cleanups to the install directive * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.5.c9120a53a6ae.hg - Add a leading zero to the revision when alphatag is used * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.4.c9120a53a6ae.hg - Incorporate the feedback from the cluster-glue review - Realistically, the version is a 1.0.5 pre-release - Use the global directive instead of define for variables - Use the haclient/hacluster group/user instead of daemon - Use the _configure macro - Fix install dependancies * Fri Jul 24 2009 Andrew Beekhof - 1.0.4-3 - Initial Fedora checkin - Include an AUTHORS and license file in each package - Change the library package name to pacemaker-libs to be more Fedora compliant - Remove execute permissions from xml related files - Reference the new cluster-glue devel package name - Update the tarball from upstream to version c9120a53a6ae + High: PE: Only prevent migration if the clone dependency is stopping/starting on the target node + High: PE: Bug 2160 - Dont shuffle clones due to colocation + High: PE: New implementation of the resource migration (not stop/start) logic + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + Medium: PE: Prevent use-of-NULL in find_first_action() * Tue Jul 14 2009 Andrew Beekhof - 1.0.4-2 - Reference authors from the project AUTHORS file instead of listing in description - Change Source0 to reference the Mercurial repo - Cleaned up the summaries and descriptions - Incorporate the results of Fedora package self-review * Thu Jun 04 2009 Andrew Beekhof - 1.0.4-1 - Update source tarball to revision: 1d87d3e0fc7f (stable-1.0) - Statistics: Changesets: 209 Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-) * Wed Apr 08 2009 Andrew Beekhof - 1.0.3-1 - Update source tarball to revision: b133b3f19797 (stable-1.0) tip - Statistics: Changesets: 383 Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-) * Mon Feb 16 2009 Andrew Beekhof - 1.0.2-1 - Update source tarball to revision: d232d19daeb9 (stable-1.0) tip - Statistics: Changesets: 441 Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-) * Tue Nov 18 2008 Andrew Beekhof - 1.0.1-1 - Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip - Statistics: Changesets: 170 Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-) * Thu Oct 16 2008 Andrew Beekhof - 1.0.0-1 - Update source tarball to revision: 388654dfef8f tip - Statistics: Changesets: 261 Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-) * Mon Sep 22 2008 Andrew Beekhof - 0.7.3-1 - Update source tarball to revision: 33e677ab7764+ tip - Statistics: Changesets: 133 Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-) * Wed Aug 20 2008 Andrew Beekhof - 0.7.1-1 - Update source tarball to revision: f805e1b30103+ tip - Statistics: Changesets: 184 Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-) * Fri Jul 18 2008 Andrew Beekhof - 0.7.0-19 - Update source tarball to revision: 007c3a1c50f5 (unstable) tip - Statistics: Changesets: 108 Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-) * Wed Jun 25 2008 Andrew Beekhof - 0.7.0-1 - Update source tarball to revision: bde0c7db74fb tip - Statistics: Changesets: 439 Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-) * Thu Jun 19 2008 Andrew Beekhof - 0.6.5-1 - Update source tarball to revision: b9fe723d1ac5 tip - Statistics: Changesets: 48 Diff: 37 files changed, 1204 insertions(+), 234 deletions(-) * Thu May 22 2008 Andrew Beekhof - 0.6.4-1 - Update source tarball to revision: 226d8e356924 tip - Statistics: Changesets: 55 Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-) * Wed Apr 23 2008 Andrew Beekhof - 0.6.3-1 - Update source tarball to revision: fd8904c9bc67 tip - Statistics: Changesets: 117 Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-) * Thu Feb 14 2008 Andrew Beekhof - 0.6.2-1 - Update source tarball to revision: 28b1a8c1868b tip - Statistics: Changesets: 11 Diff: 7 files changed, 58 insertions(+), 18 deletions(-) * Tue Feb 12 2008 Andrew Beekhof - 0.6.1-1 - Update source tarball to revision: e7152d1be933 tip - Statistics: Changesets: 25 Diff: 37 files changed, 1323 insertions(+), 227 deletions(-) * Mon Jan 14 2008 Andrew Beekhof - 0.6.0-2 - This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat. - For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in the new pacemaker-pygui project. Build dependancies prevent them from being included in Heartbeat (since the built-in CRM is no longer supported) and, being non-core components, are not included with Pacemaker. - Update source tarball to revision: c94b92d550cf - Statistics: Changesets: 347 Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-) - Test hardware: + 6-node vmware cluster (sles10-sp1/256Mb/vmware stonith) on a single host (opensuse10.3/2Gb/2.66Ghz Quad Core2) + 7-node EMC Centera cluster (sles10/512Mb/2Ghz Xeon/ssh stonith) - Notes: Heartbeat Stack + All testing was performed with STONITH enabled + The CRM was enabled using the "crm respawn" directive - Notes: OpenAIS Stack + This release contains a preview of support for the OpenAIS cluster stack + The current release of the OpenAIS project is missing two important patches that we require. OpenAIS packages containing these patches are available for most major distributions at: http://download.opensuse.org/repositories/server:/ha-clustering + The OpenAIS stack is not currently recommended for use in clusters that have shared data as STONITH support is not yet implimented + pingd is not yet available for use with the OpenAIS stack + 3 significant OpenAIS issues were found during testing of 4 and 6 node clusters. We are activly working together with the OpenAIS project to get these resolved. - Pending bugs encountered during testing: + OpenAIS #1736 - Openais membership took 20s to stabilize + Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match + OpenAIS #1793 - Assertion failure in memb_state_gather_enter() + OpenAIS #1796 - Cluster message corruption * Mon Dec 10 2007 Andrew Beekhof - 0.6.0-1 - Initial opensuse package check-in diff --git a/pengine/allocate.c b/pengine/allocate.c index 2b2d2ba2ad..d6b68da9eb 100644 --- a/pengine/allocate.c +++ b/pengine/allocate.c @@ -1,2181 +1,2186 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_allocate); void set_alloc_actions(pe_working_set_t *data_set); void migrate_reload_madness(pe_working_set_t *data_set); resource_alloc_functions_t resource_class_alloc_functions[] = { { native_merge_weights, native_color, native_create_actions, native_create_probe, native_internal_constraints, native_rsc_colocation_lh, native_rsc_colocation_rh, native_rsc_location, native_action_flags, native_update_actions, native_expand, native_append_meta, }, { group_merge_weights, group_color, group_create_actions, native_create_probe, group_internal_constraints, group_rsc_colocation_lh, group_rsc_colocation_rh, group_rsc_location, group_action_flags, group_update_actions, group_expand, group_append_meta, }, { native_merge_weights, clone_color, clone_create_actions, clone_create_probe, clone_internal_constraints, clone_rsc_colocation_lh, clone_rsc_colocation_rh, clone_rsc_location, clone_action_flags, clone_update_actions, clone_expand, clone_append_meta, }, { native_merge_weights, master_color, master_create_actions, clone_create_probe, master_internal_constraints, clone_rsc_colocation_lh, master_rsc_colocation_rh, clone_rsc_location, clone_action_flags, clone_update_actions, clone_expand, master_append_meta, } }; static gboolean check_rsc_parameters(resource_t *rsc, node_t *node, xmlNode *rsc_entry, pe_working_set_t *data_set) { int attr_lpc = 0; gboolean force_restart = FALSE; gboolean delete_resource = FALSE; const char *value = NULL; const char *old_value = NULL; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; for(; attr_lpc < DIMOF(attr_list); attr_lpc++) { value = crm_element_value(rsc->xml, attr_list[attr_lpc]); old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]); if(value == old_value /* ie. NULL */ || crm_str_eq(value, old_value, TRUE)) { continue; } force_restart = TRUE; crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s", rsc->id, node->details->uname, attr_list[attr_lpc], crm_str(old_value), crm_str(value)); } if(force_restart) { /* make sure the restart happens */ stop_action(rsc, node, FALSE); set_bit(rsc->flags, pe_rsc_start_pending); delete_resource = TRUE; } return delete_resource; } static void CancelXmlOp(resource_t *rsc, xmlNode *xml_op, node_t *active_node, const char *reason, pe_working_set_t *data_set) { int interval = 0; action_t *cancel = NULL; char *key = NULL; const char *task = NULL; const char *call_id = NULL; const char *interval_s = NULL; CRM_CHECK(xml_op != NULL, return); CRM_CHECK(active_node != NULL, return); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = generate_op_key(rsc->id, task, interval); crm_info("Action %s on %s will be stopped: %s", key, active_node->details->uname, reason?reason:"unknown"); cancel = custom_action(rsc, crm_strdup(key), RSC_CANCEL, active_node, FALSE, TRUE, data_set); crm_free(cancel->task); cancel->task = crm_strdup(RSC_CANCEL); add_hash_param(cancel->meta, XML_LRM_ATTR_TASK, task); add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id); add_hash_param(cancel->meta, XML_LRM_ATTR_INTERVAL, interval_s); custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set); crm_free(key); key = NULL; } static gboolean check_action_definition(resource_t *rsc, node_t *active_node, xmlNode *xml_op, pe_working_set_t *data_set) { char *key = NULL; int interval = 0; const char *interval_s = NULL; gboolean did_change = FALSE; xmlNode *params_all = NULL; xmlNode *params_restart = NULL; GHashTable *local_rsc_params = NULL; char *digest_all_calc = NULL; const char *digest_all = NULL; const char *restart_list = NULL; const char *digest_restart = NULL; char *digest_restart_calc = NULL; action_t *action = NULL; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); CRM_CHECK(active_node != NULL, return FALSE); if(safe_str_eq(task, RSC_STOP)) { return FALSE; } interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = generate_op_key(rsc->id, task, interval); if(interval > 0) { xmlNode *op_match = NULL; crm_debug_2("Checking parameters for %s", key); op_match = find_rsc_op_entry(rsc, key); if(op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) { CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set); crm_free(key); key = NULL; return TRUE; } else if(op_match == NULL) { crm_debug("Orphan action detected: %s on %s", key, active_node->details->uname); crm_free(key); key = NULL; return TRUE; } } action = custom_action(rsc, key, task, active_node, TRUE, FALSE, data_set); local_rsc_params = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); get_rsc_attributes(local_rsc_params, rsc, active_node, data_set); params_all = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(local_rsc_params, hash2field, params_all); g_hash_table_foreach(action->extra, hash2field, params_all); g_hash_table_foreach(rsc->parameters, hash2field, params_all); g_hash_table_foreach(action->meta, hash2metafield, params_all); filter_action_parameters(params_all, op_version); digest_all_calc = calculate_operation_digest(params_all, op_version); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); if(interval == 0 && safe_str_eq(task, RSC_STATUS)) { /* Reload based on the start action not a probe */ task = RSC_START; } if(digest_restart) { /* Changes that force a restart */ params_restart = copy_xml(params_all); if(restart_list) { filter_reload_parameters(params_restart, restart_list); } digest_restart_calc = calculate_operation_digest(params_restart, op_version); if(safe_str_neq(digest_restart_calc, digest_restart)) { did_change = TRUE; crm_log_xml_info(params_restart, "params:restart"); crm_info("Parameters to %s on %s changed: recorded %s vs. %s (restart:%s) %s", key, active_node->details->uname, crm_str(digest_restart), digest_restart_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); key = generate_op_key(rsc->id, task, interval); custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set); goto cleanup; } } if(safe_str_neq(digest_all_calc, digest_all)) { /* Changes that can potentially be handled by a reload */ action_t *op = NULL; did_change = TRUE; crm_log_xml_info(params_all, "params:reload"); crm_crit("Parameters to %s on %s changed: recorded %s vs. %s (reload:%s) %s", key, active_node->details->uname, crm_str(digest_all), digest_all_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); if(interval > 0) { #if 0 /* Always reload/restart the entire resource */ op = custom_action(rsc, start_key(rsc), RSC_START, NULL, FALSE, TRUE, data_set); update_action_flags(op, pe_action_allow_reload_conversion); #else /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */ key = generate_op_key(rsc->id, task, interval); op = custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set); custom_action_order(rsc, start_key(rsc), NULL, NULL, NULL, op, pe_order_runnable_left, data_set); #endif } else if(digest_restart) { crm_trace("Reloading '%s' action for resource %s", task, rsc->id); /* Allow this resource to reload */ /* TODO: Set for the resource itself * - thus avoiding causing depedant resources to restart */ key = generate_op_key(rsc->id, task, interval); op = custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set); update_action_flags(op, pe_action_allow_reload_conversion); } else { crm_trace("Resource %s doesn't know how to reload", rsc->id); /* Re-send the start/demote/promote op * Recurring ops will be detected independantly */ key = generate_op_key(rsc->id, task, interval); custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set); } } cleanup: free_xml(params_all); free_xml(params_restart); crm_free(digest_all_calc); crm_free(digest_restart_calc); g_hash_table_destroy(local_rsc_params); pe_free_action(action); return did_change; } extern gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set); static void check_actions_for(xmlNode *rsc_entry, resource_t *rsc, node_t *node, pe_working_set_t *data_set) { GListPtr gIter = NULL; int offset = -1; int interval = 0; int stop_index = 0; int start_index = 0; const char *task = NULL; const char *interval_s = NULL; xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; gboolean is_probe = FALSE; CRM_CHECK(node != NULL, return); if(is_set(rsc->flags, pe_rsc_orphan)) { crm_debug_2("Skipping param check for %s: orphan", rsc->id); return; } else if(pe_find_node_id(rsc->running_on, node->details->id) == NULL) { crm_debug_2("Skipping param check for %s: no longer active on %s", rsc->id, node->details->uname); return; } crm_debug_3("Processing %s on %s", rsc->id, node->details->uname); if(check_rsc_parameters(rsc, node, rsc_entry, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } for(rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { if(crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); calculate_active_ops(sorted_op_list, &start_index, &stop_index); for(gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode*)gIter->data; offset++; if(start_index < stop_index) { /* stopped */ continue; } else if(offset < start_index) { /* action occurred prior to a start */ continue; } is_probe = FALSE; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if(interval == 0 && safe_str_eq(task, RSC_STATUS)) { is_probe = TRUE; } if(interval > 0 && is_set(data_set->flags, pe_flag_maintenance_mode)) { CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); } else if(is_probe || safe_str_eq(task, RSC_START) || interval > 0) { check_action_definition(rsc, node, rsc_op, data_set); } } g_list_free(sorted_op_list); } static GListPtr find_rsc_list( GListPtr result, resource_t *rsc, const char *id, gboolean renamed_clones, gboolean partial, pe_working_set_t *data_set) { GListPtr gIter = NULL; gboolean match = FALSE; if(id == NULL) { return NULL; } else if(rsc == NULL && data_set) { for(gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } return result; } else if(rsc == NULL) { return NULL; } if(partial) { if(strstr(rsc->id, id)) { match = TRUE; } else if(rsc->long_name && strstr(rsc->long_name, id)) { match = TRUE; } else if(renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) { match = TRUE; } } else { if(strcmp(rsc->id, id) == 0){ match = TRUE; } else if(rsc->long_name && strcmp(rsc->long_name, id) == 0) { match = TRUE; } else if(renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } } if(match) { result = g_list_prepend(result, rsc); } if(rsc->children) { gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } } return result; } static void check_actions(pe_working_set_t *data_set) { const char *id = NULL; node_t *node = NULL; xmlNode *lrm_rscs = NULL; xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); xmlNode *node_state = NULL; for(node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next(node_state)) { if(crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { id = crm_element_value(node_state, XML_ATTR_ID); lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE); node = pe_find_node_id(data_set->nodes, id); if(node == NULL) { continue; } else if(can_run_resources(node) == FALSE) { crm_debug_2("Skipping param check for %s: cant run resources", node->details->uname); continue; } crm_debug_2("Processing node %s", node->details->uname); if(node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { xmlNode *rsc_entry = NULL; for(rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL; rsc_entry = __xml_next(rsc_entry)) { if(crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if(xml_has_children(rsc_entry)) { GListPtr gIter = NULL; GListPtr result = NULL; const char *rsc_id = ID(rsc_entry); CRM_CHECK(rsc_id != NULL, return); result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set); for(gIter = result; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; check_actions_for(rsc_entry, rsc, node, data_set); } g_list_free(result); } } } } } } } static gboolean apply_placement_constraints(pe_working_set_t *data_set) { GListPtr gIter = NULL; crm_debug_3("Applying constraints..."); for(gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { rsc_to_node_t *cons = (rsc_to_node_t*)gIter->data; cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons); } return TRUE; } static void common_apply_stickiness(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { int fail_count = 0; resource_t *failed = rsc; if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; common_apply_stickiness(child_rsc, node, data_set); } return; } if(is_set(rsc->flags, pe_rsc_managed) && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) { node_t *current = pe_find_node_id(rsc->running_on, node->details->id); node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if(current == NULL) { } else if(match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) { resource_t *sticky_rsc = rsc; resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set); crm_debug("Resource %s: preferring current location" " (node=%s, weight=%d)", sticky_rsc->id, node->details->uname, rsc->stickiness); } else { GHashTableIter iter; node_t *nIter = NULL; crm_debug("Ignoring stickiness for %s: the cluster is asymmetric" " and node %s is not explicitly allowed", rsc->id, node->details->uname); g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&nIter)) { crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight); } } } if(is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(rsc); } fail_count = get_failcount(node, rsc, NULL, data_set); if(fail_count > 0 && rsc->migration_threshold != 0) { if(rsc->migration_threshold <= fail_count) { resource_location(failed, node, -INFINITY, "__fail_limit__", data_set); crm_warn("Forcing %s away from %s after %d failures (max=%d)", failed->id, node->details->uname, fail_count, rsc->migration_threshold); } else { crm_notice("%s can fail %d more times on %s before being forced off", failed->id, rsc->migration_threshold - fail_count, node->details->uname); } } } static void complex_set_cmds(resource_t *rsc) { GListPtr gIter = rsc->children; rsc->cmds = &resource_class_alloc_functions[rsc->variant]; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; complex_set_cmds(child_rsc); } } void set_alloc_actions(pe_working_set_t *data_set) { GListPtr gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; complex_set_cmds(rsc); } } static void calculate_system_health (gpointer gKey, gpointer gValue, gpointer user_data) { const char *key = (const char *)gKey; const char *value = (const char *)gValue; int *system_health = (int *)user_data; if (!gKey || !gValue || !user_data) { return; } /* Does it start with #health? */ if (0 == strncmp (key, "#health", 7)) { int score; /* Convert the value into an integer */ score = char2score (value); /* Add it to the running total */ *system_health = merge_weights (score, *system_health); } } static gboolean apply_system_health(pe_working_set_t *data_set) { GListPtr gIter = NULL; const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy"); if (health_strategy == NULL || safe_str_eq (health_strategy, "none")) { /* Prevent any accidental health -> score translation */ node_score_red = 0; node_score_yellow = 0; node_score_green = 0; return TRUE; } else if (safe_str_eq (health_strategy, "migrate-on-red")) { /* Resources on nodes which have health values of red are * weighted away from that node. */ node_score_red = -INFINITY; node_score_yellow = 0; node_score_green = 0; } else if (safe_str_eq (health_strategy, "only-green")) { /* Resources on nodes which have health values of red or yellow * are forced away from that node. */ node_score_red = -INFINITY; node_score_yellow = -INFINITY; node_score_green = 0; } else if (safe_str_eq (health_strategy, "progressive")) { /* Same as the above, but use the r/y/g scores provided by the user * Defaults are provided by the pe_prefs table */ } else if (safe_str_eq (health_strategy, "custom")) { /* Requires the admin to configure the rsc_location constaints for * processing the stored health scores */ /* TODO: Check for the existance of appropriate node health constraints */ return TRUE; } else { crm_err ("Unknown node health strategy: %s", health_strategy); return FALSE; } crm_info ("Applying automated node health strategy: %s", health_strategy); for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int system_health = 0; node_t *node = (node_t*)gIter->data; /* Search through the node hash table for system health entries. */ g_hash_table_foreach ( node->details->attrs, calculate_system_health, &system_health); crm_info (" Node %s has an combined system health of %d", node->details->uname, system_health); /* If the health is non-zero, then create a new rsc2node so that the * weight will be added later on. */ if (system_health != 0) { GListPtr gIter2 = data_set->resources; for(; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t*)gIter2->data; rsc2node_new (health_strategy, rsc, system_health, node, data_set); } } } return TRUE; } gboolean stage0(pe_working_set_t *data_set) { xmlNode * cib_constraints = get_object_root( XML_CIB_TAG_CONSTRAINTS, data_set->input); if(data_set->input == NULL) { return FALSE; } if(is_set(data_set->flags, pe_flag_have_status) == FALSE) { crm_trace("Calculating status"); cluster_status(data_set); } set_alloc_actions(data_set); apply_system_health(data_set); unpack_constraints(cib_constraints, data_set); return TRUE; } static void wait_for_probe( resource_t *rsc, const char *action, action_t *probe_complete, pe_working_set_t *data_set) { if(probe_complete == NULL) { return; } if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; wait_for_probe(child, action, probe_complete, data_set); } } else { char *key = generate_op_key(rsc->id, action, 0); custom_action_order( NULL, NULL, probe_complete, rsc, key, NULL, pe_order_optional, data_set); } } /* * Check nodes for resources started outside of the LRM */ gboolean probe_resources(pe_working_set_t *data_set) { action_t *probe_complete = NULL; action_t *probe_node_complete = NULL; GListPtr gIter = NULL; GListPtr gIter2 = NULL; gIter = data_set->nodes; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; const char *probed = g_hash_table_lookup( node->details->attrs, CRM_OP_PROBED); if(node->details->online == FALSE) { continue; } else if(node->details->unclean) { continue; } else if(probe_complete == NULL) { probe_complete = get_pseudo_op(CRM_OP_PROBED, data_set); } if(probed != NULL && crm_is_true(probed) == FALSE) { action_t *probe_op = custom_action( NULL, crm_strdup(CRM_OP_REPROBE), CRM_OP_REPROBE, node, FALSE, TRUE, data_set); add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); continue; } probe_node_complete = custom_action( NULL, crm_strdup(CRM_OP_PROBED), CRM_OP_PROBED, node, FALSE, TRUE, data_set); if(crm_is_true(probed)) { crm_trace("unset"); update_action_flags(probe_node_complete, pe_action_optional); } else { crm_trace("set"); update_action_flags(probe_node_complete, pe_action_optional|pe_action_clear); } crm_trace("%s - %d", node->details->uname, probe_node_complete->flags & pe_action_optional); probe_node_complete->priority = INFINITY; add_hash_param(probe_node_complete->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); if(node->details->pending) { update_action_flags(probe_node_complete, pe_action_runnable|pe_action_clear); crm_info("Action %s on %s is unrunnable (pending)", probe_node_complete->uuid, probe_node_complete->node->details->uname); } order_actions(probe_node_complete, probe_complete, pe_order_runnable_left/*|pe_order_implies_then*/); gIter2 = data_set->resources; for(; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t*)gIter2->data; if(rsc->cmds->create_probe( rsc, node, probe_node_complete, FALSE, data_set)) { update_action_flags(probe_complete, pe_action_optional|pe_action_clear); update_action_flags(probe_node_complete, pe_action_optional|pe_action_clear); wait_for_probe(rsc, RSC_START, probe_complete, data_set); } } } gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; wait_for_probe(rsc, RSC_STOP, probe_complete, data_set); } return TRUE; } /* * Count how many valid nodes we have (so we know the maximum number of * colors we can resolve). * * Apply node constraints (ie. filter the "allowed_nodes" part of resources */ gboolean stage2(pe_working_set_t *data_set) { GListPtr gIter = NULL; crm_debug_3("Applying placement constraints"); gIter = data_set->nodes; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if(node == NULL) { /* error */ } else if(node->weight >= 0.0 /* global weight */ && node->details->online && node->details->type == node_member) { data_set->max_valid_nodes++; } } apply_placement_constraints(data_set); gIter = data_set->nodes; for(; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; node_t *node = (node_t*)gIter->data; gIter2 = data_set->resources; for(; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t*)gIter2->data; common_apply_stickiness(rsc, node, data_set); } } return TRUE; } /* * Create internal resource constraints before allocation */ gboolean stage3(pe_working_set_t *data_set) { GListPtr gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; rsc->cmds->internal_constraints(rsc, data_set); } return TRUE; } /* * Check for orphaned or redefined actions */ gboolean stage4(pe_working_set_t *data_set) { check_actions(data_set); return TRUE; } static gint sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data) { int rc = 0; int level = LOG_TRACE; int r1_weight = -INFINITY; int r2_weight = -INFINITY; const char *reason = "existance"; const GListPtr nodes = (GListPtr)data; resource_t *resource1 = (resource_t*)convert_const_pointer(a); resource_t *resource2 = (resource_t*)convert_const_pointer(b); node_t *node = NULL; GListPtr gIter = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; if(a == NULL && b == NULL) { goto done; } if(a == NULL) { return 1; } if(b == NULL) { return -1; } reason = "priority"; r1_weight = resource1->priority; r2_weight = resource2->priority; if(r1_weight > r2_weight) { rc = -1; goto done; } if(r1_weight < r2_weight) { rc = 1; goto done; } reason = "no node list"; if (nodes == NULL) { goto done; } r1_nodes = rsc_merge_weights(resource1, resource1->id, NULL, NULL, 1, pe_weights_forward|pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes); r2_nodes = rsc_merge_weights(resource2, resource2->id, NULL, NULL, 1, pe_weights_forward|pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes); /* Current location score */ reason = "current location"; r1_weight = -INFINITY; r2_weight = -INFINITY; if(resource1->running_on) { node = g_list_nth_data(resource1->running_on, 0); node = g_hash_table_lookup(r1_nodes, node->details->id); r1_weight = node->weight; } if(resource2->running_on) { node = g_list_nth_data(resource2->running_on, 0); node = g_hash_table_lookup(r2_nodes, node->details->id); r2_weight = node->weight; } if(r1_weight > r2_weight) { rc = -1; goto done; } if(r1_weight < r2_weight) { rc = 1; goto done; } reason = "score"; for(gIter = nodes; gIter != NULL; gIter = gIter->next) { node_t *r1_node = NULL; node_t *r2_node = NULL; node = (node_t*)gIter->data; r1_weight = -INFINITY; if(r1_nodes) { r1_node = g_hash_table_lookup(r1_nodes, node->details->id); } if (r1_node) { r1_weight = r1_node->weight; } r2_weight = -INFINITY; if(r2_nodes) { r2_node = g_hash_table_lookup(r2_nodes, node->details->id); } if (r2_node) { r2_weight = r2_node->weight; } if(r1_weight > r2_weight) { rc = -1; goto done; } if(r1_weight < r2_weight) { rc = 1; goto done; } } done: if(r1_nodes) { g_hash_table_destroy(r1_nodes); } if(r2_nodes) { g_hash_table_destroy(r2_nodes); } do_crm_log_unlikely(level, "%s (%d) %c %s (%d) on %s: %s", resource1->id, r1_weight, rc<0?'>':rc>0?'<':'=', resource2->id, r2_weight, node?node->details->id:"n/a", reason); return rc; } gboolean stage5(pe_working_set_t *data_set) { GListPtr gIter = NULL; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr nodes = g_list_copy(data_set->nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); data_set->resources = g_list_sort_with_data( data_set->resources, sort_rsc_process_order, nodes); g_list_free(nodes); } gIter = data_set->nodes; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; dump_node_capacity(show_utilization?0:utilization_log_level, "Original", node); } crm_trace("Allocating services"); /* Take (next) highest resource, assign it and create its actions */ gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; crm_trace("Allocating: %s", rsc->id); rsc->cmds->allocate(rsc, NULL, data_set); } gIter = data_set->nodes; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; dump_node_capacity(show_utilization?0:utilization_log_level, "Remaining", node); } if(is_set(data_set->flags, pe_flag_startup_probes)) { crm_trace("Calculating needed probes"); /* This code probably needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=100: With probes: ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints 36s ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph Without probes: ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph */ probe_resources(data_set); } crm_trace("Creating actions"); gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; rsc->cmds->create_actions(rsc, data_set); } crm_trace("Creating done"); return TRUE; } static gboolean is_managed(const resource_t *rsc) { GListPtr gIter = rsc->children; if(is_set(rsc->flags, pe_rsc_managed)) { return TRUE; } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; if(is_managed(child_rsc)) { return TRUE; } } return FALSE; } static gboolean any_managed_resouces(pe_working_set_t *data_set) { GListPtr gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; if(is_managed(rsc)) { return TRUE; } } return FALSE; } /* * Create dependancies for stonith and shutdown operations */ gboolean stage6(pe_working_set_t *data_set) { action_t *dc_down = NULL; action_t *dc_fence = NULL; action_t *stonith_op = NULL; action_t *last_stonith = NULL; gboolean integrity_lost = FALSE; action_t *ready = get_pseudo_op(STONITH_UP, data_set); action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *done = get_pseudo_op(STONITH_DONE, data_set); gboolean need_stonith = FALSE; GListPtr gIter = data_set->nodes; crm_debug_3("Processing fencing and shutdown cases"); if(is_set(data_set->flags, pe_flag_stonith_enabled) && (is_set(data_set->flags, pe_flag_have_quorum) || data_set->no_quorum_policy == no_quorum_ignore || data_set->no_quorum_policy == no_quorum_suicide)) { need_stonith = TRUE; } if(need_stonith && any_managed_resouces(data_set) == FALSE) { crm_notice("Delaying fencing operations until there are resources to manage"); need_stonith = FALSE; } for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; stonith_op = NULL; if(node->details->unclean && need_stonith) { pe_warn("Scheduling Node %s for STONITH", node->details->uname); stonith_op = custom_action( NULL, crm_strdup(CRM_OP_FENCE), CRM_OP_FENCE, node, FALSE, TRUE, data_set); add_hash_param( stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param( stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param( stonith_op->meta, "stonith_action", data_set->stonith_action); stonith_constraints(node, stonith_op, data_set); order_actions(ready, stonith_op, pe_order_runnable_left); order_actions(stonith_op, all_stopped, pe_order_implies_then); clear_bit_inplace(ready->flags, pe_action_optional); if(node->details->is_dc) { dc_down = stonith_op; dc_fence = stonith_op; } else { if(last_stonith) { order_actions(last_stonith, stonith_op, pe_order_optional); } last_stonith = stonith_op; } } else if(node->details->online && node->details->shutdown) { action_t *down_op = NULL; crm_notice("Scheduling Node %s for shutdown", node->details->uname); down_op = custom_action( NULL, crm_strdup(CRM_OP_SHUTDOWN), CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set); shutdown_constraints(node, down_op, data_set); add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); if(node->details->is_dc) { dc_down = down_op; } } if(node->details->unclean && stonith_op == NULL) { integrity_lost = TRUE; pe_warn("Node %s is unclean!", node->details->uname); } } if(integrity_lost) { if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED"); pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE"); } else if(is_set(data_set->flags, pe_flag_have_quorum) == FALSE) { crm_notice("Cannot fence unclean nodes until quorum is" " attained (or no-quorum-policy is set to ignore)"); } } if(dc_down != NULL) { GListPtr shutdown_matches = find_actions( data_set->actions, CRM_OP_SHUTDOWN, NULL); crm_debug_2("Ordering shutdowns before %s on %s (DC)", dc_down->task, dc_down->node->details->uname); add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); gIter = shutdown_matches; for(; gIter != NULL; gIter = gIter->next) { action_t *node_stop = (action_t*)gIter->data; if(node_stop->node->details->is_dc) { continue; } crm_debug("Ordering shutdown on %s before %s on %s", node_stop->node->details->uname, dc_down->task, dc_down->node->details->uname); order_actions(node_stop, dc_down, pe_order_optional); } if(last_stonith && dc_down != last_stonith) { order_actions(last_stonith, dc_down, pe_order_optional); } g_list_free(shutdown_matches); } if(last_stonith) { order_actions(last_stonith, done, pe_order_implies_then); } else if(dc_fence) { order_actions(dc_down, done, pe_order_implies_then); } order_actions(ready, done, pe_order_optional); return TRUE; } /* * Determin the sets of independant actions and the correct order for the * actions in each set. * * Mark dependencies of un-runnable actions un-runnable * */ static GListPtr find_actions_by_task(GListPtr actions, resource_t *rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if(list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *tmp = NULL; char *task = NULL; int interval = 0; if(parse_op_key(original_key, &tmp, &task, &interval)) { key = generate_op_key(rsc->id, task, interval); /* crm_err("looking up %s instead of %s", key, original_key); */ /* slist_iter(action, action_t, actions, lpc, */ /* crm_err(" - %s", action->uuid)); */ list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } crm_free(key); crm_free(tmp); crm_free(task); } return list; } static void rsc_order_then( action_t *lh_action, resource_t *rsc, order_constraint_t *order) { GListPtr gIter = NULL; GListPtr rh_actions = NULL; action_t *rh_action = NULL; enum pe_ordering type = order->type; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); rh_action = order->rh_action; crm_debug_3("Processing RH of ordering constraint %d", order->id); if(rh_action != NULL) { rh_actions = g_list_prepend(NULL, rh_action); } else if(rsc != NULL) { rh_actions = find_actions_by_task( rsc->actions, rsc, order->rh_action_task); } if(rh_actions == NULL) { crm_debug_4("No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id,order->rh_action_task); if(lh_action) { crm_debug_4("LH-Side was: %s", lh_action->uuid); } return; } if(lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) { crm_trace("Detected dangling operation %s -> %s", lh_action->uuid, order->rh_action_task); clear_bit_inplace(type, pe_order_implies_then); } gIter = rh_actions; for(; gIter != NULL; gIter = gIter->next) { action_t *rh_action_iter = (action_t*)gIter->data; if(lh_action) { order_actions(lh_action, rh_action_iter, type); } else if(type & pe_order_implies_then) { update_action_flags(rh_action_iter, pe_action_runnable|pe_action_clear); crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type); } } g_list_free(rh_actions); } static void rsc_order_first(resource_t *lh_rsc, order_constraint_t *order, pe_working_set_t *data_set) { GListPtr gIter = NULL; GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_debug_3("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if(lh_action != NULL) { lh_actions = g_list_prepend(NULL, lh_action); } else if(lh_action == NULL) { lh_actions = find_actions_by_task( lh_rsc->actions, lh_rsc, order->lh_action_task); } if(lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *rsc_id = NULL; char *op_type = NULL; int interval = 0; parse_op_key(order->lh_action_task, &rsc_id, &op_type, &interval); key = generate_op_key(lh_rsc->id, op_type, interval); if(lh_rsc->fns->state(lh_rsc, TRUE) != RSC_ROLE_STOPPED || safe_str_neq(op_type, RSC_STOP)) { crm_debug_4("No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); lh_actions = g_list_prepend(NULL, lh_action); } else { crm_free(key); crm_debug_4("No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } crm_free(op_type); crm_free(rsc_id); } gIter = lh_actions; for(; gIter != NULL; gIter = gIter->next) { action_t *lh_action_iter = (action_t*)gIter->data; if(rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if(rh_rsc) { rsc_order_then(lh_action_iter, rh_rsc, order); } else if(order->rh_action) { order_actions(lh_action_iter, order->rh_action, order->type); } } g_list_free(lh_actions); } extern gboolean update_action(action_t *action); gboolean stage7(pe_working_set_t *data_set) { GListPtr gIter = NULL; crm_debug_4("Applying ordering constraints"); /* Don't ask me why, but apparently they need to be processed in * the order they were created in... go figure * * Also g_list_prepend() has horrendous performance characteristics * So we need to use g_list_prepend() and then reverse the list here */ data_set->ordering_constraints = g_list_reverse( data_set->ordering_constraints); gIter = data_set->ordering_constraints; for(; gIter != NULL; gIter = gIter->next) { order_constraint_t *order = (order_constraint_t*)gIter->data; resource_t *rsc = order->lh_rsc; crm_debug_3("Applying ordering constraint: %d", order->id); if(rsc != NULL) { crm_debug_4("rsc_action-to-*"); rsc_order_first(rsc, order, data_set); continue; } rsc = order->rh_rsc; if(rsc != NULL) { crm_debug_4("action-to-rsc_action"); rsc_order_then(order->lh_action, rsc, order); } else { crm_debug_4("action-to-action"); order_actions( order->lh_action, order->rh_action, order->type); } } crm_debug_2("Updating %d actions", g_list_length(data_set->actions)); gIter = data_set->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; update_action(action); } crm_debug_2("Processing migrations"); gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; rsc_migrate_reload(rsc, data_set); LogActions(rsc, data_set); } return TRUE; } static gint sort_notify_entries(gconstpointer a, gconstpointer b) { int tmp; const notify_entry_t *entry_a = a; const notify_entry_t *entry_b = b; if(entry_a == NULL && entry_b == NULL) { return 0; } if(entry_a == NULL) { return 1; } if(entry_b == NULL) { return -1; } if(entry_a->rsc == NULL && entry_b->rsc == NULL) { return 0; } if(entry_a->rsc == NULL) { return 1; } if(entry_b->rsc == NULL) { return -1; } tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id); if(tmp != 0) { return tmp; } if(entry_a->node == NULL && entry_b->node == NULL) { return 0; } if(entry_a->node == NULL) { return 1; } if(entry_b->node == NULL) { return -1; } return strcmp(entry_a->node->details->id, entry_b->node->details->id); } static void expand_list(GListPtr list, char **rsc_list, char **node_list) { GListPtr gIter = list; const char *uname = NULL; const char *rsc_id = NULL; const char *last_rsc_id = NULL; if(list == NULL) { *rsc_list = crm_strdup(" "); if(node_list) { *node_list = crm_strdup(" "); } return; } *rsc_list = NULL; if(node_list) { *node_list = NULL; } for(; gIter != NULL; gIter = gIter->next) { notify_entry_t *entry = (notify_entry_t*)gIter->data; CRM_CHECK(entry != NULL, continue); CRM_CHECK(entry->rsc != NULL, continue); CRM_CHECK(node_list == NULL || entry->node != NULL, continue); uname = NULL; rsc_id = entry->rsc->id; CRM_ASSERT(rsc_id != NULL); /* filter dups */ if(safe_str_eq(rsc_id, last_rsc_id)) { continue; } last_rsc_id = rsc_id; if(rsc_list != NULL) { int existing_len = 0; int len = 2 + strlen(rsc_id); /* +1 space, +1 EOS */ if(rsc_list && *rsc_list) { existing_len = strlen(*rsc_list); } crm_debug_5("Adding %s (%dc) at offset %d", rsc_id, len-2, existing_len); crm_realloc(*rsc_list, len + existing_len); sprintf(*rsc_list + existing_len, "%s ", rsc_id); } if(entry->node != NULL) { uname = entry->node->details->uname; } if(node_list != NULL && uname) { int existing_len = 0; int len = 2 + strlen(uname); if(node_list && *node_list) { existing_len = strlen(*node_list); } crm_debug_5("Adding %s (%dc) at offset %d", uname, len-2, existing_len); crm_realloc(*node_list, len + existing_len); sprintf(*node_list + existing_len, "%s ", uname); } } } static void dup_attr(gpointer key, gpointer value, gpointer user_data) { add_hash_param(user_data, key, value); } static action_t * pe_notify(resource_t *rsc, node_t *node, action_t *op, action_t *confirm, notify_data_t *n_data, pe_working_set_t *data_set) { char *key = NULL; action_t *trigger = NULL; const char *value = NULL; const char *task = NULL; if(op == NULL || confirm == NULL) { crm_debug_2("Op=%p confirm=%p", op, confirm); return NULL; } CRM_CHECK(node != NULL, return NULL); if(node->details->online == FALSE) { crm_debug_2("Skipping notification for %s: node offline", rsc->id); return NULL; } else if(is_set(op->flags, pe_action_runnable) == FALSE) { crm_debug_2("Skipping notification for %s: not runnable", op->uuid); return NULL; } value = g_hash_table_lookup(op->meta, "notify_type"); task = g_hash_table_lookup(op->meta, "notify_operation"); crm_debug_2("Creating notify actions for %s: %s (%s-%s)", op->uuid, rsc->id, value, task); key = generate_notify_key(rsc->id, value, task); trigger = custom_action(rsc, key, op->task, node, is_set(op->flags, pe_action_optional), TRUE, data_set); g_hash_table_foreach(op->meta, dup_attr, trigger->meta); g_hash_table_foreach(n_data->keys, dup_attr, trigger->meta); /* pseudo_notify before notify */ crm_debug_3("Ordering %s before %s (%d->%d)", op->uuid, trigger->uuid, trigger->id, op->id); order_actions(op, trigger, pe_order_optional); order_actions(trigger, confirm, pe_order_optional); return trigger; } static void pe_post_notify(resource_t *rsc, node_t *node, notify_data_t *n_data, pe_working_set_t *data_set) { action_t *notify = NULL; CRM_CHECK(rsc != NULL, return); if(n_data->post == NULL) { return; /* Nothing to do */ } notify = pe_notify(rsc, node, n_data->post, n_data->post_done, n_data, data_set); if(notify != NULL) { notify->priority = INFINITY; } if(n_data->post_done) { GListPtr gIter = rsc->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *mon = (action_t*)gIter->data; const char *interval = g_hash_table_lookup(mon->meta, "interval"); if(interval == NULL || safe_str_eq(interval, "0")) { crm_debug_3("Skipping %s: interval", mon->uuid); continue; } else if(safe_str_eq(mon->task, "cancel")) { crm_debug_3("Skipping %s: cancel", mon->uuid); continue; } order_actions(n_data->post_done, mon, pe_order_optional); } } } notify_data_t * create_notification_boundaries( resource_t *rsc, const char *action, action_t *start, action_t *end, pe_working_set_t *data_set) { /* Create the pseudo ops that preceed and follow the actual notifications */ /* * Creates two sequences (conditional on start and end being supplied): * pre_notify -> pre_notify_complete -> start, and * end -> post_notify -> post_notify_complete * * 'start' and 'end' may be the same event or ${X} and ${X}ed as per clones */ char *key = NULL; notify_data_t *n_data = NULL; if(is_not_set(rsc->flags, pe_rsc_notify)) { return NULL; } crm_malloc0(n_data, sizeof(notify_data_t)); n_data->action = action; n_data->keys = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); if(start) { /* create pre-event notification wrappers */ key = generate_notify_key(rsc->id, "pre", start->task); n_data->pre = custom_action( rsc, key, RSC_NOTIFY, NULL, is_set(start->flags, pe_action_optional), TRUE, data_set); update_action_flags(n_data->pre, pe_action_pseudo); update_action_flags(n_data->pre, pe_action_runnable); add_hash_param(n_data->pre->meta, "notify_type", "pre"); add_hash_param(n_data->pre->meta, "notify_operation", n_data->action); /* create pre_notify_complete */ key = generate_notify_key(rsc->id, "confirmed-pre", start->task); n_data->pre_done = custom_action( rsc, key, RSC_NOTIFIED, NULL, is_set(start->flags, pe_action_optional), TRUE, data_set); update_action_flags(n_data->pre_done, pe_action_pseudo); update_action_flags(n_data->pre_done, pe_action_runnable); add_hash_param(n_data->pre_done->meta, "notify_type", "pre"); add_hash_param(n_data->pre_done->meta, "notify_operation", n_data->action); order_actions(n_data->pre_done, start, pe_order_optional); order_actions(n_data->pre, n_data->pre_done, pe_order_optional); } if(end) { /* create post-event notification wrappers */ key = generate_notify_key(rsc->id, "post", end->task); n_data->post = custom_action( rsc, key, RSC_NOTIFY, NULL, is_set(end->flags, pe_action_optional), TRUE, data_set); n_data->post->priority = INFINITY; update_action_flags(n_data->post, pe_action_pseudo); if(is_set(end->flags, pe_action_runnable)) { update_action_flags(n_data->post, pe_action_runnable); } else { update_action_flags(n_data->post, pe_action_runnable|pe_action_clear); } add_hash_param(n_data->post->meta, "notify_type", "post"); add_hash_param(n_data->post->meta, "notify_operation", n_data->action); /* create post_notify_complete */ key = generate_notify_key(rsc->id, "confirmed-post", end->task); n_data->post_done = custom_action( rsc, key, RSC_NOTIFIED, NULL, is_set(end->flags, pe_action_optional), TRUE, data_set); n_data->post_done->priority = INFINITY; update_action_flags(n_data->post_done, pe_action_pseudo); if(is_set(end->flags, pe_action_runnable)) { update_action_flags(n_data->post_done, pe_action_runnable); } else { update_action_flags(n_data->post_done, pe_action_runnable|pe_action_clear); } add_hash_param(n_data->post_done->meta, "notify_type", "pre"); add_hash_param(n_data->post_done->meta, "notify_operation", n_data->action); order_actions(end, n_data->post, pe_order_implies_then); order_actions(n_data->post, n_data->post_done, pe_order_implies_then); } if(start && end) { order_actions(n_data->pre_done, n_data->post, pe_order_optional); } if(safe_str_eq(action, RSC_STOP)) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); order_actions(n_data->post_done, all_stopped, pe_order_optional); } return n_data; } void collect_notification_data(resource_t *rsc, gboolean state, gboolean activity, notify_data_t *n_data) { if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; collect_notification_data(child, state, activity, n_data); } return; } if(state) { notify_entry_t *entry = NULL; crm_malloc0(entry, sizeof(notify_entry_t)); entry->rsc = rsc; if(rsc->running_on) { /* we only take the first one */ entry->node = rsc->running_on->data; } crm_debug_2("%s state: %s", rsc->id, role2text(rsc->role)); switch(rsc->role) { case RSC_ROLE_STOPPED: n_data->inactive = g_list_prepend(n_data->inactive, entry); break; case RSC_ROLE_STARTED: n_data->active = g_list_prepend(n_data->active, entry); break; case RSC_ROLE_SLAVE: n_data->slave = g_list_prepend(n_data->slave, entry); break; case RSC_ROLE_MASTER: n_data->master = g_list_prepend(n_data->master, entry); break; default: crm_err("Unsupported notify role"); crm_free(entry); break; } } if(activity) { notify_entry_t *entry = NULL; enum action_tasks task; GListPtr gIter = rsc->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t*)gIter->data; if(is_set(op->flags, pe_action_optional) == FALSE && op->node != NULL) { crm_malloc0(entry, sizeof(notify_entry_t)); entry->node = op->node; entry->rsc = rsc; task = text2task(op->task); switch(task) { case start_rsc: n_data->start = g_list_prepend(n_data->start, entry); break; case stop_rsc: n_data->stop = g_list_prepend(n_data->stop, entry); break; case action_promote: n_data->promote = g_list_prepend(n_data->promote, entry); break; case action_demote: n_data->demote = g_list_prepend(n_data->demote, entry); break; default: crm_free(entry); break; } } } } } gboolean expand_notification_data(notify_data_t *n_data) { /* Expand the notification entries into a key=value hashtable * This hashtable is later used in action2xml() */ gboolean required = FALSE; char *rsc_list = NULL; char *node_list = NULL; if(n_data->stop) { n_data->stop = g_list_sort(n_data->stop, sort_notify_entries); } expand_list(n_data->stop, &rsc_list, &node_list); if(rsc_list != NULL && safe_str_neq(" ", rsc_list)) { if(safe_str_eq(n_data->action, RSC_STOP)) { required = TRUE; } } g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_uname"), node_list); if(n_data->start) { n_data->start = g_list_sort(n_data->start, sort_notify_entries); if(rsc_list && safe_str_eq(n_data->action, RSC_START)) { required = TRUE; } } expand_list(n_data->start, &rsc_list, &node_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_start_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_start_uname"), node_list); if(n_data->demote) { n_data->demote = g_list_sort(n_data->demote, sort_notify_entries); if(safe_str_eq(n_data->action, RSC_DEMOTE)) { required = TRUE; } } expand_list(n_data->demote, &rsc_list, &node_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_demote_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_demote_uname"), node_list); if(n_data->promote) { n_data->promote = g_list_sort(n_data->promote, sort_notify_entries); if(safe_str_eq(n_data->action, RSC_PROMOTE)) { required = TRUE; } } expand_list(n_data->promote, &rsc_list, &node_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_promote_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_promote_uname"), node_list); if(n_data->active) { n_data->active = g_list_sort(n_data->active, sort_notify_entries); } expand_list(n_data->active, &rsc_list, &node_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_active_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_active_uname"), node_list); if(n_data->slave) { n_data->slave = g_list_sort(n_data->slave, sort_notify_entries); } expand_list(n_data->slave, &rsc_list, &node_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_slave_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_slave_uname"), node_list); if(n_data->master) { n_data->master = g_list_sort(n_data->master, sort_notify_entries); } expand_list(n_data->master, &rsc_list, &node_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_master_resource"), rsc_list); g_hash_table_insert(n_data->keys, crm_strdup("notify_master_uname"), node_list); if(n_data->inactive) { n_data->inactive = g_list_sort(n_data->inactive, sort_notify_entries); } expand_list(n_data->inactive, &rsc_list, NULL); g_hash_table_insert(n_data->keys, crm_strdup("notify_inactive_resource"), rsc_list); if(required && n_data->pre) { update_action_flags(n_data->pre, pe_action_optional|pe_action_clear); update_action_flags(n_data->pre_done, pe_action_optional|pe_action_clear); } if(required && n_data->post) { update_action_flags(n_data->post, pe_action_optional|pe_action_clear); update_action_flags(n_data->post_done, pe_action_optional|pe_action_clear); } return required; } void create_notifications(resource_t *rsc, notify_data_t *n_data, pe_working_set_t *data_set) { GListPtr gIter = NULL; action_t *stop = NULL; action_t *start = NULL; enum action_tasks task = text2task(n_data->action); if(rsc->children) { gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; create_notifications(child, n_data, data_set); } return; } /* Copy notification details into standard ops */ gIter = rsc->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t*)gIter->data; if(is_set(op->flags, pe_action_optional) == FALSE && op->node != NULL) { enum action_tasks t = text2task(op->task); switch(t) { case start_rsc: case stop_rsc: case action_promote: case action_demote: g_hash_table_foreach(n_data->keys, dup_attr, op->meta); break; default: break; } } } crm_debug_2("Creating notificaitons for: %s.%s (%s->%s)", n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role)); stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL); start = find_first_action(rsc->actions, NULL, RSC_START, NULL); /* stop / demote */ if(rsc->role != RSC_ROLE_STOPPED) { if(task == stop_rsc || task == action_demote) { gIter = rsc->running_on; for(; gIter != NULL; gIter = gIter->next) { node_t *current_node = (node_t*)gIter->data; pe_notify(rsc, current_node, n_data->pre, n_data->pre_done, n_data, data_set); if(task == action_demote || stop == NULL || is_set(stop->flags, pe_action_optional)) { pe_post_notify(rsc, current_node, n_data, data_set); } } } } /* start / promote */ if(rsc->next_role != RSC_ROLE_STOPPED) { if(rsc->allocated_to == NULL) { pe_proc_err("Next role '%s' but %s is not allocated", role2text(rsc->next_role), rsc->id); } else if(task == start_rsc || task == action_promote) { if(task != start_rsc || start == NULL || is_set(start->flags, pe_action_optional)) { pe_notify(rsc, rsc->allocated_to, n_data->pre, n_data->pre_done, n_data, data_set); } pe_post_notify(rsc, rsc->allocated_to, n_data, data_set); } } } void free_notification_data(notify_data_t *n_data) { if(n_data == NULL) { return; } slist_basic_destroy(n_data->stop); slist_basic_destroy(n_data->start); slist_basic_destroy(n_data->demote); slist_basic_destroy(n_data->promote); slist_basic_destroy(n_data->master); slist_basic_destroy(n_data->slave); slist_basic_destroy(n_data->active); slist_basic_destroy(n_data->inactive); g_hash_table_destroy(n_data->keys); crm_free(n_data); } int transition_id = -1; /* * Create a dependency graph to send to the transitioner (via the CRMd) */ gboolean stage8(pe_working_set_t *data_set) { GListPtr gIter = NULL; const char *value = NULL; transition_id++; crm_debug_2("Creating transition graph %d.", transition_id); data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH); value = pe_pref(data_set->config_hash, "cluster-delay"); crm_xml_add(data_set->graph, "cluster-delay", value); value = pe_pref(data_set->config_hash, "stonith-timeout"); crm_xml_add(data_set->graph, "stonith-timeout", value); crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY"); if(is_set(data_set->flags, pe_flag_start_failure_fatal)) { crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(data_set->graph, "failed-start-offset", "1"); } value = pe_pref(data_set->config_hash, "batch-limit"); crm_xml_add(data_set->graph, "batch-limit", value); crm_xml_add_int(data_set->graph, "transition_id", transition_id); /* errors... slist_iter(action, action_t, action_list, lpc, if(action->optional == FALSE && action->runnable == FALSE) { print_action("Ignoring", action, TRUE); } ); */ gIter = data_set->resources; for(; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; crm_debug_4("processing actions for rsc=%s", rsc->id); rsc->cmds->expand(rsc, data_set); } crm_log_xml_debug_3( data_set->graph, "created resource-driven action list"); /* catch any non-resource specific actions */ crm_debug_4("processing non-resource actions"); gIter = data_set->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; graph_element_from_action(action, data_set); } crm_log_xml_debug_3(data_set->graph, "created generic action list"); crm_debug_2("Created transition graph %d.", transition_id); return TRUE; } void cleanup_alloc_calculations(pe_working_set_t *data_set) { if(data_set == NULL) { return; } crm_debug_3("deleting %d order cons: %p", g_list_length(data_set->ordering_constraints), data_set->ordering_constraints); pe_free_ordering(data_set->ordering_constraints); data_set->ordering_constraints = NULL; crm_debug_3("deleting %d node cons: %p", g_list_length(data_set->placement_constraints), data_set->placement_constraints); pe_free_rsc_to_node(data_set->placement_constraints); data_set->placement_constraints = NULL; crm_debug_3("deleting %d inter-resource cons: %p", g_list_length(data_set->colocation_constraints), data_set->colocation_constraints); slist_basic_destroy(data_set->colocation_constraints); data_set->colocation_constraints = NULL; + + crm_debug_3("deleting %d ticket deps: %p", + g_list_length(data_set->ticket_constraints), data_set->ticket_constraints); + slist_basic_destroy(data_set->ticket_constraints); + data_set->ticket_constraints = NULL; cleanup_calculations(data_set); } diff --git a/pengine/allocate.h b/pengine/allocate.h index 338c4ab424..e0afbc7152 100644 --- a/pengine/allocate.h +++ b/pengine/allocate.h @@ -1,222 +1,226 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_PENGINE_COMPLEX_ALLOC__H #define CRM_PENGINE_COMPLEX_ALLOC__H #include #include #include #include #include typedef struct notify_entry_s { resource_t *rsc; node_t *node; } notify_entry_t; struct resource_alloc_functions_s { GHashTable *(*merge_weights)(resource_t*, const char*, GHashTable*, const char*, int, gboolean, gboolean); node_t *(*allocate)(resource_t *, node_t *, pe_working_set_t *); void (*create_actions)(resource_t *, pe_working_set_t *); gboolean (*create_probe)( resource_t *, node_t *, action_t *, gboolean, pe_working_set_t *); void (*internal_constraints)(resource_t *, pe_working_set_t *); void (*rsc_colocation_lh)(resource_t *, resource_t *, rsc_colocation_t *); void (*rsc_colocation_rh)(resource_t *, resource_t *, rsc_colocation_t *); void (*rsc_location)(resource_t *, rsc_to_node_t *); enum pe_action_flags (*action_flags)(action_t *, node_t*); enum pe_graph_flags (*update_actions)(action_t *, action_t *, node_t*, enum pe_action_flags, enum pe_action_flags, enum pe_ordering); void (*expand)(resource_t *, pe_working_set_t *); void (*append_meta)(resource_t *rsc, xmlNode *xml); }; extern GHashTable *rsc_merge_weights( resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, int factor, enum pe_weights flags); extern GHashTable *native_merge_weights( resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, int factor, gboolean allow_rollback, gboolean only_positive); extern GHashTable *group_merge_weights( resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, int factor, gboolean allow_rollback, gboolean only_positive); extern node_t * native_color(resource_t *rsc, node_t *preferred, pe_working_set_t *data_set); extern void native_create_actions( resource_t *rsc, pe_working_set_t *data_set); extern void native_internal_constraints( resource_t *rsc, pe_working_set_t *data_set); extern void native_rsc_colocation_lh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); extern void native_rsc_colocation_rh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); +extern void rsc_ticket_constraint( + resource_t *lh_rsc, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set); extern enum pe_action_flags native_action_flags(action_t *action, node_t *node); extern void native_rsc_location(resource_t *rsc, rsc_to_node_t *constraint); extern void native_expand(resource_t *rsc, pe_working_set_t *data_set); extern void native_dump(resource_t *rsc, const char *pre_text, gboolean details); extern void create_notify_element( resource_t *rsc, action_t *op, notify_data_t *n_data, pe_working_set_t *data_set); extern gboolean native_create_probe( resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set); extern void native_append_meta(resource_t *rsc, xmlNode *xml); extern int group_num_allowed_nodes(resource_t *rsc); extern node_t *group_color(resource_t *rsc, node_t *preferred, pe_working_set_t *data_set); extern void group_create_actions( resource_t *rsc, pe_working_set_t *data_set); extern void group_internal_constraints( resource_t *rsc, pe_working_set_t *data_set); extern void group_rsc_colocation_lh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); extern void group_rsc_colocation_rh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); extern enum pe_action_flags group_action_flags(action_t *action, node_t *node); extern void group_rsc_location(resource_t *rsc, rsc_to_node_t *constraint); extern void group_expand(resource_t *rsc, pe_working_set_t *data_set); extern void group_append_meta(resource_t *rsc, xmlNode *xml); extern int clone_num_allowed_nodes(resource_t *rsc); extern node_t *clone_color(resource_t *rsc, node_t *preferred, pe_working_set_t *data_set); extern void clone_create_actions(resource_t *rsc, pe_working_set_t *data_set); extern void clone_internal_constraints( resource_t *rsc, pe_working_set_t *data_set); extern void clone_rsc_colocation_lh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); extern void clone_rsc_colocation_rh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); extern void clone_rsc_location(resource_t *rsc, rsc_to_node_t *constraint); extern enum pe_action_flags clone_action_flags(action_t *action, node_t *node); extern void clone_expand(resource_t *rsc, pe_working_set_t *data_set); extern gboolean clone_create_probe( resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set); extern void clone_append_meta(resource_t *rsc, xmlNode *xml); extern gboolean master_unpack(resource_t *rsc, pe_working_set_t *data_set); extern node_t *master_color(resource_t *rsc, node_t *preferred, pe_working_set_t *data_set); extern void master_create_actions(resource_t *rsc, pe_working_set_t *data_set); extern void master_internal_constraints( resource_t *rsc, pe_working_set_t *data_set); extern void master_rsc_colocation_rh( resource_t *lh_rsc, resource_t *rh_rsc, rsc_colocation_t *constraint); extern void master_append_meta(resource_t *rsc, xmlNode *xml); /* extern resource_object_functions_t resource_variants[]; */ extern resource_alloc_functions_t resource_class_alloc_functions[]; extern gboolean is_active(rsc_to_node_t *cons); extern gboolean native_constraint_violated( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint); extern gboolean unpack_rsc_to_attr(xmlNode *xml_obj, pe_working_set_t *data_set); extern gboolean unpack_rsc_to_node(xmlNode *xml_obj, pe_working_set_t *data_set); extern gboolean unpack_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set); extern gboolean unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set); extern gboolean unpack_rsc_location(xmlNode *xml_obj, pe_working_set_t *data_set); +extern gboolean unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set); + extern void LogActions(resource_t *rsc, pe_working_set_t *data_set); extern void cleanup_alloc_calculations(pe_working_set_t *data_set); extern notify_data_t *create_notification_boundaries( resource_t *rsc, const char *action, action_t *start, action_t *end, pe_working_set_t *data_set); extern void collect_notification_data(resource_t *rsc, gboolean state, gboolean activity, notify_data_t *n_data); extern gboolean expand_notification_data(notify_data_t *n_data); extern void create_notifications(resource_t *rsc, notify_data_t *n_data, pe_working_set_t *data_set); extern void free_notification_data(notify_data_t *n_data); extern void rsc_migrate_reload(resource_t *rsc, pe_working_set_t *data_set); extern void rsc_stonith_ordering(resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set); extern enum pe_graph_flags native_update_actions( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type); extern enum pe_graph_flags group_update_actions( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type); extern enum pe_graph_flags clone_update_actions( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type); static inline enum pe_action_flags get_action_flags(action_t *action, node_t *node) { enum pe_action_flags flags = action->flags; if(action->rsc) { flags = action->rsc->cmds->action_flags(action, NULL); if(action->rsc->variant >= pe_clone && node) { /* We only care about activity on $node */ enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node); /* Go to great lengths to ensure the correct value for pe_action_runnable... * * If we are a clone, then for _ordering_ constraints, its only relevant * if we are runnable _anywhere_. * * This only applies to _runnable_ though, and only for ordering constraints. * If this function is ever used during colocation, then we'll need additional logic * * Not very satisfying, but its logical and appears to work well. */ if(is_not_set(clone_flags, pe_action_runnable) && is_set(flags, pe_action_runnable)) { crm_trace("Fixing up runnable flag for %s", action->uuid); set_bit_inplace(clone_flags, pe_action_runnable); } flags = clone_flags; } } return flags; } static inline gboolean update_action_flags(action_t *action, enum pe_action_flags flags) { gboolean changed = FALSE; gboolean clear = is_set(flags, pe_action_clear); enum pe_action_flags last = action->flags; if(clear) { clear_bit_inplace(action->flags, flags); } else { set_bit_inplace(action->flags, flags); } if(last != action->flags) { changed = TRUE; clear_bit_inplace(flags, pe_action_clear); crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x)", action->uuid, action->node?action->node->details->uname:"[none]", clear?"un-":"", flags, last, action->flags); } return changed; } #endif diff --git a/pengine/clone.c b/pengine/clone.c index 76c7cc9376..8d754319e5 100644 --- a/pengine/clone.c +++ b/pengine/clone.c @@ -1,1492 +1,1501 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #define VARIANT_CLONE 1 #include gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); static void append_parent_colocation(resource_t *rsc, resource_t *child, gboolean all); static node_t * parent_node_instance(const resource_t *rsc, node_t *node) { node_t *ret = NULL; if(node != NULL) { ret = pe_hash_table_lookup(rsc->parent->allowed_nodes, node->details->id); } return ret; } static gboolean did_fail(const resource_t *rsc) { GListPtr gIter = rsc->children; if(is_set(rsc->flags, pe_rsc_failed)) { return TRUE; } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; if(did_fail(child_rsc)) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc = 0; int level = LOG_DEBUG_3; node_t *node1 = NULL; node_t *node2 = NULL; gboolean can1 = TRUE; gboolean can2 = TRUE; const resource_t *resource1 = (const resource_t*)a; const resource_t *resource2 = (const resource_t*)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); /* allocation order: * - active instances * - instances running on nodes with the least copies * - active instances on nodes that cant support them or are to be fenced * - failed instances * - inactive instances */ if(resource1->running_on && resource2->running_on) { if(g_list_length(resource1->running_on) < g_list_length(resource2->running_on)) { do_crm_log_unlikely(level, "%s < %s: running_on", resource1->id, resource2->id); return -1; } else if(g_list_length(resource1->running_on) > g_list_length(resource2->running_on)) { do_crm_log_unlikely(level, "%s > %s: running_on", resource1->id, resource2->id); return 1; } } if(resource1->running_on) { node1 = resource1->running_on->data; } if(resource2->running_on) { node2 = resource2->running_on->data; } if(node1) { node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id); if(match == NULL || match->weight < 0) { do_crm_log_unlikely(level, "%s: current location is unavailable", resource1->id); node1 = NULL; can1 = FALSE; } } if(node2) { node_t *match = pe_hash_table_lookup(resource2->allowed_nodes, node2->details->id); if(match == NULL || match->weight < 0) { do_crm_log_unlikely(level, "%s: current location is unavailable", resource2->id); node2 = NULL; can2 = FALSE; } } if(can1 != can2) { if(can1) { do_crm_log_unlikely(level, "%s < %s: availability of current location", resource1->id, resource2->id); return -1; } do_crm_log_unlikely(level, "%s > %s: availability of current location", resource1->id, resource2->id); return 1; } if(resource1->priority < resource2->priority) { do_crm_log_unlikely(level, "%s < %s: priority", resource1->id, resource2->id); return 1; } else if(resource1->priority > resource2->priority) { do_crm_log_unlikely(level, "%s > %s: priority", resource1->id, resource2->id); return -1; } if(node1 == NULL && node2 == NULL) { do_crm_log_unlikely(level, "%s == %s: not active", resource1->id, resource2->id); return 0; } if(node1 != node2) { if(node1 == NULL) { do_crm_log_unlikely(level, "%s > %s: active", resource1->id, resource2->id); return 1; } else if(node2 == NULL) { do_crm_log_unlikely(level, "%s < %s: active", resource1->id, resource2->id); return -1; } } can1 = can_run_resources(node1); can2 = can_run_resources(node2); if(can1 != can2) { if(can1) { do_crm_log_unlikely(level, "%s < %s: can", resource1->id, resource2->id); return -1; } do_crm_log_unlikely(level, "%s > %s: can", resource1->id, resource2->id); return 1; } node1 = parent_node_instance(resource1, node1); node2 = parent_node_instance(resource2, node2); if(node1 != NULL && node2 == NULL) { do_crm_log_unlikely(level, "%s < %s: not allowed", resource1->id, resource2->id); return -1; } else if(node1 == NULL && node2 != NULL) { do_crm_log_unlikely(level, "%s > %s: not allowed", resource1->id, resource2->id); return 1; } if(node1 == NULL) { do_crm_log_unlikely(level, "%s == %s: not allowed", resource1->id, resource2->id); return 0; } if(node1->count < node2->count) { do_crm_log_unlikely(level, "%s < %s: count", resource1->id, resource2->id); return -1; } else if(node1->count > node2->count) { do_crm_log_unlikely(level, "%s > %s: count", resource1->id, resource2->id); return 1; } can1 = did_fail(resource1); can2 = did_fail(resource2); if(can1 != can2) { if(can1) { do_crm_log_unlikely(level, "%s > %s: failed", resource1->id, resource2->id); return 1; } do_crm_log_unlikely(level, "%s < %s: failed", resource1->id, resource2->id); return -1; } if(node1 && node2) { int lpc = 0; int max = 0; node_t *n = NULL; GListPtr gIter = NULL; GListPtr list1 = NULL; GListPtr list2 = NULL; GHashTable *hash1 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); GHashTable *hash2 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); n = node_copy(resource1->running_on->data); g_hash_table_insert(hash1, (gpointer)n->details->id, n); n = node_copy(resource2->running_on->data); g_hash_table_insert(hash2, (gpointer)n->details->id, n); for(gIter = resource1->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; do_crm_log_unlikely(level+1, "Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights( constraint->rsc_rh, resource1->id, hash1, constraint->node_attribute, constraint->score/INFINITY, FALSE, FALSE); } for(gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; do_crm_log_unlikely(level+1, "Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights( constraint->rsc_lh, resource1->id, hash1, constraint->node_attribute, constraint->score/INFINITY, FALSE, TRUE); } for(gIter = resource2->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; do_crm_log_unlikely(level+1, "Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights( constraint->rsc_rh, resource2->id, hash2, constraint->node_attribute, constraint->score/INFINITY, FALSE, FALSE); } for(gIter = resource2->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; do_crm_log_unlikely(level+1, "Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights( constraint->rsc_lh, resource2->id, hash2, constraint->node_attribute, constraint->score/INFINITY, FALSE, TRUE); } /* Current location score */ node1 = g_list_nth_data(resource1->running_on, 0); node1 = g_hash_table_lookup(hash1, node1->details->id); node2 = g_list_nth_data(resource2->running_on, 0); node2 = g_hash_table_lookup(hash2, node2->details->id); if(node1->weight < node2->weight) { if(node1->weight < 0) { do_crm_log_unlikely(level, "%s > %s: current score", resource1->id, resource2->id); return -1; } else { do_crm_log_unlikely(level, "%s < %s: current score", resource1->id, resource2->id); return 1; } } else if(node1->weight > node2->weight) { do_crm_log_unlikely(level, "%s > %s: current score", resource1->id, resource2->id); return -1; } /* All location scores */ list1 = g_hash_table_get_values(hash1); list2 = g_hash_table_get_values(hash2); list1 = g_list_sort_with_data(list1, sort_node_weight, g_list_nth_data(resource1->running_on, 0)); list2 = g_list_sort_with_data(list2, sort_node_weight, g_list_nth_data(resource2->running_on, 0)); max = g_list_length(list1); if(max < g_list_length(list2)) { max = g_list_length(list2); } for(;lpc < max; lpc++) { node1 = g_list_nth_data(list1, lpc); node2 = g_list_nth_data(list2, lpc); if(node1 == NULL) { do_crm_log_unlikely(level, "%s < %s: colocated score NULL", resource1->id, resource2->id); rc = 1; break; } else if(node2 == NULL) { do_crm_log_unlikely(level, "%s > %s: colocated score NULL", resource1->id, resource2->id); rc = -1; break; } if(node1->weight < node2->weight) { do_crm_log_unlikely(level, "%s < %s: colocated score", resource1->id, resource2->id); rc = 1; break; } else if(node1->weight > node2->weight) { do_crm_log_unlikely(level, "%s > %s: colocated score", resource1->id, resource2->id); rc = -1; break; } } /* Order by reverse uname - same as sort_node_weight() does? */ g_hash_table_destroy(hash1); /* Free mem */ g_hash_table_destroy(hash2); /* Free mem */ g_list_free(list1); g_list_free(list2); if(rc != 0) { return rc; } } rc = strcmp(resource1->id, resource2->id); do_crm_log_unlikely(level, "%s %c %s: default", resource1->id, rc<0?'<':'>', resource2->id); return rc; } static node_t * can_run_instance(resource_t *rsc, node_t *node) { node_t *local_node = NULL; clone_variant_data_t *clone_data = NULL; if(can_run_resources(node) == FALSE) { goto bail; } else if(is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = parent_node_instance(rsc, node); get_clone_variant_data(clone_data, rsc->parent); if(local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); goto bail; } else if(local_node->count < clone_data->clone_node_max) { crm_trace("%s can run on %s: %d", rsc->id, node->details->uname, local_node->count); return local_node; } else { crm_debug_2("%s cannot run on %s: node full (%d >= %d)", rsc->id, node->details->uname, local_node->count, clone_data->clone_node_max); } bail: if(node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static node_t * color_instance(resource_t *rsc, node_t *prefer, gboolean all_coloc, pe_working_set_t *data_set) { node_t *chosen = NULL; node_t *local_node = NULL; crm_debug_2("Processing %s", rsc->id); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependency loop detected involving %s", rsc->id); return NULL; } /* Only include positive colocation preferences of dependant resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc->parent, rsc, all_coloc); if(prefer) { node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if(local_prefer == NULL || local_prefer->weight < 0) { crm_trace("Not pre-allocating %s to %s - unavailable", rsc->id, prefer->details->uname); return NULL; } } if(rsc->allowed_nodes) { GHashTableIter iter; node_t *try_node = NULL; g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&try_node)) { can_run_instance(rsc, try_node); } } chosen = rsc->cmds->allocate(rsc, prefer, data_set); if(chosen) { local_node = pe_hash_table_lookup( rsc->parent->allowed_nodes, chosen->details->id); if(prefer && chosen && chosen->details != prefer->details) { crm_err("Pre-allocation failed: got %s instead of %s", chosen->details->uname, prefer->details->uname); native_deallocate(rsc); chosen = NULL; } else if(local_node) { local_node->count++; } else if(is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ crm_config_err("%s not found in %s (list=%d)", chosen->details->id, rsc->parent->id, g_hash_table_size(rsc->parent->allowed_nodes)); } } return chosen; } static void append_parent_colocation(resource_t *rsc, resource_t *child, gboolean all) { GListPtr gIter = NULL; gIter = rsc->rsc_cons; for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t*)gIter->data; if(all || cons->score < 0 || cons->score == INFINITY) { child->rsc_cons = g_list_prepend(child->rsc_cons, cons); } } gIter = rsc->rsc_cons_lhs; for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t*)gIter->data; if(all || cons->score < 0) { child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); } } } node_t * clone_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { int allocated = 0; GHashTableIter iter; GListPtr gIter = NULL; node_t *node = NULL; int available_nodes = 0; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); crm_debug_2("Processing %s", rsc->id); /* this information is used by sort_clone_instance() when deciding in which * order to allocate clone instances */ gIter = rsc->rsc_cons; for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; crm_trace("%s: Coloring %s first", rsc->id, constraint->rsc_rh->id); constraint->rsc_rh->cmds->allocate(constraint->rsc_rh, prefer, data_set); } gIter = rsc->rsc_cons_lhs; for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE, TRUE); } + + gIter = rsc->rsc_tickets; + for(; gIter != NULL; gIter = gIter->next) { + rsc_ticket_t *rsc_ticket = (rsc_ticket_t*)gIter->data; + + if(rsc_ticket->ticket->granted == FALSE) { + rsc_ticket_constraint(rsc, rsc_ticket, data_set); + } + } dump_node_scores(show_scores?0:scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); /* count now tracks the number of clones currently allocated */ g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { node->count = 0; if(can_run_resources(node)) { available_nodes++; } } rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set); /* Pre-allocate as many instances as we can to their current location */ g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (available_nodes && available_nodes <= clone_data->clone_max && g_hash_table_iter_next (&iter, NULL, (void**)&node)) { int lpc; int loop_max = clone_data->clone_max / available_nodes; if(loop_max < 1) { loop_max = 1; } if(can_run_resources(node) == FALSE || node->weight < 0) { crm_trace("Not Pre-allocatiing %s", node->details->uname); continue; } crm_trace("Pre-allocatiing %s", node->details->uname); for(lpc = 0; allocated < clone_data->clone_max && node->count < clone_data->clone_node_max && lpc < clone_data->clone_node_max && lpc < loop_max; lpc++) { for(gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; if(child->running_on && is_set(child->flags, pe_rsc_provisional) && is_not_set(child->flags, pe_rsc_failed)) { node_t *child_node = child->running_on->data; if(child_node->details == node->details && color_instance(child, node, clone_data->clone_max < available_nodes, data_set)) { crm_trace("Pre-allocated %s to %s", child->id, node->details->uname); allocated++; break; } } } } } crm_trace("Done pre-allocating"); gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; if(g_list_length(child->running_on) > 0) { node_t *child_node = child->running_on->data; node_t *local_node = parent_node_instance(child, child->running_on->data); if(local_node == NULL) { crm_err("%s is running on %s which isn't allowed", child->id, child_node->details->uname); } } if(is_not_set(child->flags, pe_rsc_provisional)) { } else if(allocated >= clone_data->clone_max) { crm_debug("Child %s not allocated - limit reached", child->id); resource_location(child, NULL, -INFINITY, "clone_color:limit_reached", data_set); } else if(color_instance(child, NULL, clone_data->clone_max < available_nodes, data_set)) { allocated++; } } crm_debug("Allocated %d %s instances of a possible %d", allocated, rsc->id, clone_data->clone_max); clear_bit(rsc->flags, pe_rsc_provisional); clear_bit(rsc->flags, pe_rsc_allocating); return NULL; } static void clone_update_pseudo_status( resource_t *rsc, gboolean *stopping, gboolean *starting, gboolean *active) { GListPtr gIter = NULL; if(rsc->children) { gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; clone_update_pseudo_status(child, stopping, starting, active); } return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if(rsc->running_on) { *active = TRUE; } gIter = rsc->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; if(*starting && *stopping) { return; } else if(is_set(action->flags, pe_action_optional)) { crm_debug_3("Skipping optional: %s", action->uuid); continue; } else if(is_set(action->flags, pe_action_pseudo) == FALSE && is_set(action->flags, pe_action_runnable) == FALSE){ crm_debug_3("Skipping unrunnable: %s", action->uuid); continue; } else if(safe_str_eq(RSC_STOP, action->task)) { crm_debug_2("Stopping due to: %s", action->uuid); *stopping = TRUE; } else if(safe_str_eq(RSC_START, action->task)) { if(is_set(action->flags, pe_action_runnable) == FALSE) { crm_debug_3("Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); } else { crm_debug_2("Starting due to: %s", action->uuid); crm_debug_3("%s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); *starting = TRUE; } } } } static action_t * find_rsc_action(resource_t *rsc, const char *key, gboolean active_only, GListPtr *list) { action_t *match = NULL; GListPtr possible = NULL; GListPtr active = NULL; possible = find_actions(rsc->actions, key, NULL); if(active_only) { GListPtr gIter = possible; for(; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t*)gIter->data; if(is_set(op->flags, pe_action_optional) == FALSE) { active = g_list_prepend(active, op); } } if(active && g_list_length(active) == 1) { match = g_list_nth_data(active, 0); } if(list) { *list = active; active = NULL; } } else if(possible && g_list_length(possible) == 1) { match = g_list_nth_data(possible, 0); } if(list) { *list = possible; possible = NULL; } if(possible) { g_list_free(possible); } if(active) { g_list_free(active); } return match; } static void child_ordering_constraints(resource_t *rsc, pe_working_set_t *data_set) { char *key = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *last_stop = NULL; action_t *last_start = NULL; GListPtr gIter = rsc->children; gboolean active_only = TRUE; /* change to false to get the old behavior */ clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(clone_data->ordered == FALSE) { return; } for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; key = stop_key(child); stop = find_rsc_action(child, key, active_only, NULL); crm_free(key); key = start_key(child); start = find_rsc_action(child, key, active_only, NULL); crm_free(key); if(stop) { if(last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_optional); } last_stop = stop; } if(start) { if(last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_optional); } last_start = start; } } } void clone_create_actions(resource_t *rsc, pe_working_set_t *data_set) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; action_t *stop = NULL; action_t *stopped = NULL; action_t *start = NULL; action_t *started = NULL; GListPtr gIter = rsc->children; resource_t *last_start_rsc = NULL; resource_t *last_stop_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_debug_2("Creating actions for %s", rsc->id); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->create_actions(child_rsc, data_set); clone_update_pseudo_status( child_rsc, &child_stopping, &child_starting, &child_active); if(is_set(child_rsc->flags, pe_rsc_starting)) { last_start_rsc = child_rsc; } if(is_set(child_rsc->flags, pe_rsc_stopping)) { last_stop_rsc = child_rsc; } } /* start */ start = start_action(rsc, NULL, !child_starting); started = custom_action(rsc, started_key(rsc), RSC_STARTED, NULL, !child_starting, TRUE, data_set); update_action_flags(start, pe_action_pseudo|pe_action_runnable); update_action_flags(started, pe_action_pseudo); started->priority = INFINITY; if(child_active || child_starting) { update_action_flags(started, pe_action_runnable); } child_ordering_constraints(rsc, data_set); if(clone_data->start_notify == NULL) { clone_data->start_notify = create_notification_boundaries(rsc, RSC_START, start, started, data_set); } /* stop */ stop = stop_action(rsc, NULL, !child_stopping); stopped = custom_action(rsc, stopped_key(rsc), RSC_STOPPED, NULL, !child_stopping, TRUE, data_set); stopped->priority = INFINITY; update_action_flags(stop, pe_action_pseudo|pe_action_runnable); update_action_flags(stopped, pe_action_pseudo|pe_action_runnable); if(clone_data->stop_notify == NULL) { clone_data->stop_notify = create_notification_boundaries(rsc, RSC_STOP, stop, stopped, data_set); if(clone_data->stop_notify && clone_data->start_notify) { order_actions(clone_data->stop_notify->post_done, clone_data->start_notify->pre, pe_order_optional); } } } void clone_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { resource_t *last_rsc = NULL; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_trace("Internal constraints for %s", rsc->id); new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); if(rsc->variant == pe_master) { new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->internal_constraints(child_rsc, data_set); order_start_start(rsc, child_rsc, pe_order_runnable_left|pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); if(clone_data->ordered && last_rsc){ order_start_start(last_rsc, child_rsc, pe_order_optional); } order_stop_stop(rsc, child_rsc, pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if(clone_data->ordered && last_rsc){ order_stop_stop(child_rsc, last_rsc, pe_order_optional); } last_rsc = child_rsc; } } static void assign_node(resource_t *rsc, node_t *node, gboolean force) { if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; native_assign_node(child_rsc, NULL, node, force); } return; } native_assign_node(rsc, NULL, node, force); } static resource_t* find_compatible_child_by_node( resource_t *local_child, node_t *local_node, resource_t *rsc, enum rsc_role_e filter, gboolean current) { node_t *node = NULL; GListPtr gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, local_node->details->uname); gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); node = child_rsc->fns->location(child_rsc, NULL, current); if(filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_trace("Filtered %s", child_rsc->id); continue; } if(node && local_node && node->details == local_node->details) { crm_debug_2("Pairing %s with %s on %s", local_child->id, child_rsc->id, node->details->uname); return child_rsc; } else if(node) { crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname, local_node->details->uname); } else { crm_trace("%s - not allocated %d", child_rsc->id, current); } } crm_debug_3("Can't pair %s with %s", local_child->id, rsc->id); return NULL; } resource_t* find_compatible_child( resource_t *local_child, resource_t *rsc, enum rsc_role_e filter, gboolean current) { resource_t *pair = NULL; GListPtr gIter = NULL; GListPtr scratch = NULL; node_t *local_node = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); local_node = local_child->fns->location(local_child, NULL, current); if(local_node) { return find_compatible_child_by_node(local_child, local_node, rsc, filter, current); } scratch = g_hash_table_get_values(local_child->allowed_nodes); scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL); gIter = scratch; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; pair = find_compatible_child_by_node( local_child, node, rsc, filter, current); if(pair) { goto done; } } crm_debug("Can't pair %s with %s", local_child->id, rsc->id); done: g_list_free(scratch); return pair; } void clone_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ GListPtr gIter = rsc_lh->children; CRM_CHECK(FALSE, crm_err("This functionality is not thought to be used. Please report a bug.")); CRM_CHECK(rsc_lh, return); CRM_CHECK(rsc_rh, return); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, rsc_rh, constraint); } return; } void clone_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { GListPtr gIter = NULL; gboolean do_interleave = FALSE; clone_variant_data_t *clone_data = NULL; clone_variant_data_t *clone_data_lh = NULL; CRM_CHECK(rsc_lh != NULL, return); CRM_CHECK(rsc_lh->variant == pe_native, return); get_clone_variant_data(clone_data, constraint->rsc_rh); crm_debug_3("Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc_rh->id, constraint->score); if(constraint->rsc_lh->variant >= pe_clone) { get_clone_variant_data(clone_data_lh, constraint->rsc_lh); if(clone_data->clone_node_max != clone_data_lh->clone_node_max) { crm_config_err("Cannot interleave "XML_CIB_TAG_INCARNATION " %s and %s because" " they do not support the same number of" " resources per node", constraint->rsc_lh->id, constraint->rsc_rh->id); /* only the LHS side needs to be labeled as interleave */ } else if(clone_data_lh->interleave) { do_interleave = TRUE; } } if(rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } else if(is_set(rsc_rh->flags, pe_rsc_provisional)) { crm_debug_3("%s is still provisional", rsc_rh->id); return; } else if(do_interleave) { resource_t *rh_child = NULL; rh_child = find_compatible_child(rsc_lh, rsc_rh, RSC_ROLE_UNKNOWN, FALSE); if(rh_child) { crm_debug("Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint); } else if(constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); assign_node(rsc_lh, NULL, TRUE); } else { crm_debug("Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); } return; } else if(constraint->score >= INFINITY) { GListPtr rhs = NULL; gIter = rsc_rh->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if(chosen != NULL) { rhs = g_list_prepend(rhs, chosen); } } node_list_exclude(rsc_lh->allowed_nodes, rhs, FALSE); g_list_free(rhs); return; } gIter = rsc_rh->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint); } } static enum action_tasks clone_child_action(action_t *action) { enum action_tasks result = no_action; resource_t *child = (resource_t*)action->rsc->children->data; if(safe_str_eq(action->task, "notify") || safe_str_eq(action->task, "notified")) { /* Find the action we're notifying about instead */ int stop = 0; char *key = action->uuid; int lpc = strlen(key); for(; lpc > 0; lpc--) { if(key[lpc] == '_' && stop == 0) { stop = lpc; } else if(key[lpc] == '_') { char *task_mutable = NULL; lpc++; task_mutable = crm_strdup(key+lpc); task_mutable[stop-lpc] = 0; crm_trace("Extracted action '%s' from '%s'", task_mutable, key); result = get_complex_task(child, task_mutable, TRUE); crm_free(task_mutable); break; } } } else { result = get_complex_task(child, action->task, TRUE); } return result; } enum pe_action_flags clone_action_flags(action_t *action, node_t *node) { GListPtr gIter = NULL; gboolean any_runnable = FALSE; gboolean check_runnable = TRUE; enum action_tasks task = clone_child_action(action); enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); const char *task_s = task2text(task); gIter = action->rsc->children; for(; gIter != NULL; gIter = gIter->next) { action_t *child_action = NULL; resource_t *child = (resource_t*)gIter->data; child_action = find_first_action(child->actions, NULL, task_s, child->children?NULL:node); crm_trace("Checking for %s in %s on %s", task_s, child->id, node?node->details->uname:"none"); if(child_action) { enum pe_action_flags child_flags = get_action_flags(child_action, node); if(is_set(flags, pe_action_optional) && is_set(child_flags, pe_action_optional) == FALSE) { crm_trace("%s is manditory because of %s", action->uuid, child_action->uuid); clear_bit_inplace(flags, pe_action_optional); clear_bit_inplace(action->flags, pe_action_optional); } if(is_set(child_flags, pe_action_runnable)) { any_runnable = TRUE; } } else { GListPtr gIter2 = child->actions; for(; gIter2 != NULL; gIter2 = gIter2->next) { action_t *op = (action_t*)gIter2->data; crm_trace("%s on %s (%s)", op->uuid, op->node?op->node->details->uname:"none", op->task); } } } if(check_runnable && any_runnable == FALSE) { crm_trace("%s is not runnable because no children are", action->uuid); clear_bit_inplace(flags, pe_action_runnable); if(node == NULL) { clear_bit_inplace(action->flags, pe_action_runnable); } } return flags; } static enum pe_graph_flags clone_update_actions_interleave( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { gboolean current = FALSE; resource_t *first_child = NULL; GListPtr gIter = then->rsc->children; enum pe_graph_flags changed = pe_graph_none; /*pe_graph_disable*/ enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); /* Fix this - lazy */ if(strstr(first->uuid, "_stopped_0") || strstr(first->uuid, "_demoted_0")) { current = TRUE; } for(; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t*)gIter->data; CRM_ASSERT(then_child != NULL); first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current); if(first_child == NULL && current) { crm_trace("Ignore"); } else if(first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if(type & (pe_order_runnable_left|pe_order_implies_then) /* Mandatory */) { crm_info("Inhibiting %s from being active", then_child->id); assign_node(then_child, NULL, TRUE); } } else { action_t *first_action = NULL; action_t *then_action = NULL; crm_debug("Pairing %s with %s", first_child->id, then_child->id); first_action = find_first_action(first_child->actions, NULL, first_task, node); then_action = find_first_action(then_child->actions, NULL, then->task, node); CRM_CHECK(first_action != NULL || is_set(first_child->flags, pe_rsc_orphan), crm_err("No action found for %s in %s (first)", first_task, first_child->id)); if(then_action == NULL && is_not_set(then_child->flags, pe_rsc_orphan) && crm_str_eq(then->task, RSC_STOP, TRUE) == FALSE && crm_str_eq(then->task, RSC_DEMOTED, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } if(first_action == NULL || then_action == NULL) { continue; } if(order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s -> %s", first_action->uuid, then_action->uuid); changed |= (pe_graph_updated_first|pe_graph_updated_then); } changed |= then_child->cmds->update_actions(first_action, then_action, node, get_action_flags(then_action, node), filter, type); } } return changed; } enum pe_graph_flags clone_update_actions( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { const char *rsc = "none"; gboolean interleave = FALSE; enum pe_graph_flags changed = pe_graph_none; if(first->rsc != then->rsc && first->rsc && first->rsc->variant >= pe_clone && then->rsc && then->rsc->variant >= pe_clone) { clone_variant_data_t *clone_data = NULL; if(strstr(then->uuid, "_stop_0") || strstr(then->uuid, "_demote_0")) { get_clone_variant_data(clone_data, first->rsc); rsc = first->rsc->id; } else { get_clone_variant_data(clone_data, then->rsc); rsc = then->rsc->id; } interleave = clone_data->interleave; } crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave?"yes":"no", rsc); if(interleave) { changed = clone_update_actions_interleave(first, then, node, flags, filter, type); } else { GListPtr gIter = then->rsc->children; changed |= native_update_actions(first, then, node, flags, filter, type); for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; action_t *child_action = find_first_action(child->actions, NULL, then->task, node); if(child_action) { enum pe_action_flags child_flags = get_action_flags(child_action, node); if(is_set(child_flags, pe_action_runnable)) { changed |= child->cmds->update_actions(first, child_action, node, flags, filter, type); } } } } return changed; } void clone_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { GListPtr gIter = rsc->children; crm_debug_3("Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); } } void clone_expand(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); gIter = rsc->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t*)gIter->data; get_action_flags(op, NULL); } if(clone_data->start_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->start_notify); expand_notification_data(clone_data->start_notify); create_notifications(rsc, clone_data->start_notify, data_set); } if(clone_data->stop_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->stop_notify); expand_notification_data(clone_data->stop_notify); create_notifications(rsc, clone_data->stop_notify, data_set); } if(clone_data->promote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->promote_notify); expand_notification_data(clone_data->promote_notify); create_notifications(rsc, clone_data->promote_notify, data_set); } if(clone_data->demote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->demote_notify); expand_notification_data(clone_data->demote_notify); create_notifications(rsc, clone_data->demote_notify, data_set); } /* Now that the notifcations have been created we can expand the children */ gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } native_expand(rsc, data_set); /* The notifications are in the graph now, we can destroy the notify_data */ free_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; free_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; free_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; free_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } static gint sort_rsc_id(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t*)a; const resource_t *resource2 = (const resource_t*)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); return strcmp(resource1->id, resource2->id); } node_t *rsc_known_on(resource_t *rsc, GListPtr *list) { GListPtr gIter = NULL; node_t *one = NULL; GListPtr result = NULL; if(rsc->children) { gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; rsc_known_on(child, &result); } } else if(rsc->known_on) { result = g_hash_table_get_values(rsc->known_on); } if(result && g_list_length(result) == 1) { one = g_list_nth_data(result, 0); } if(list) { GListPtr gIter = NULL; gIter = result; for(; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if(*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) { *list = g_list_prepend(*list, node); } } } g_list_free(result); return one; } static resource_t *find_instance_on(resource_t *rsc, node_t *node) { GListPtr gIter = NULL; gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; GListPtr known_list = NULL; resource_t *child = (resource_t*)gIter->data; rsc_known_on(child, &known_list); gIter2 = known_list; for(; gIter2 != NULL; gIter2 = gIter2->next) { node_t *known = (node_t*)gIter2->data; if(node->details == known->details) { g_list_free(known_list); return child; } } g_list_free(known_list); } return NULL; } gboolean clone_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set) { GListPtr gIter = NULL; gboolean any_created = FALSE; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); rsc->children = g_list_sort(rsc->children, sort_rsc_id); if(rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return FALSE; } if(is_not_set(rsc->flags, pe_rsc_unique) && clone_data->clone_node_max == 1) { /* only look for one copy */ resource_t *child = NULL; /* Try whoever we probed last time */ child = find_instance_on(rsc, node); if(child) { return child->cmds->create_probe( child, node, complete, force, data_set); } /* Try whoever we plan on starting there */ gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; node_t *local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if(local_node == NULL) { continue; } if(local_node->details == node->details) { return child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set); } } /* Fall back to the first clone instance */ child = rsc->children->data; return child->cmds->create_probe(child, node, complete, force, data_set); } gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; if(child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set)) { any_created = TRUE; } if(any_created && is_not_set(rsc->flags, pe_rsc_unique) && clone_data->clone_node_max == 1) { /* only look for one copy (clone :0) */ break; } } return any_created; } void clone_append_meta(resource_t *rsc, xmlNode *xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); name = crm_meta_name(XML_RSC_ATTR_UNIQUE); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_unique)?"true":"false"); crm_free(name); name = crm_meta_name(XML_RSC_ATTR_NOTIFY); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_notify)?"true":"false"); crm_free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX); crm_xml_add_int(xml, name, clone_data->clone_max); crm_free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX); crm_xml_add_int(xml, name, clone_data->clone_node_max); crm_free(name); } diff --git a/pengine/constraints.c b/pengine/constraints.c index eeb4c93764..80647bb73e 100644 --- a/pengine/constraints.c +++ b/pengine/constraints.c @@ -1,1421 +1,1653 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum pe_order_kind { pe_order_kind_optional, pe_order_kind_mandatory, pe_order_kind_serialize, }; #define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \ __rsc = pe_find_resource(data_set->resources, __name); \ if(__rsc == NULL) { \ crm_config_err("%s: No resource found for %s", __set, __name); \ return FALSE; \ } \ } while(0) enum pe_ordering get_flags( const char *id, enum pe_order_kind kind, const char *action_first, const char *action_then, gboolean invert); gboolean unpack_constraints(xmlNode * xml_constraints, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; xmlNode *lifetime = NULL; for(xml_obj = __xml_first_child(xml_constraints); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); if(id == NULL) { crm_config_err("Constraint <%s...> must have an id", crm_element_name(xml_obj)); continue; } crm_debug_3("Processing constraint %s %s", crm_element_name(xml_obj),id); lifetime = first_named_child(xml_obj, "lifetime"); if(lifetime) { crm_config_warn("Support for the lifetime tag, used by %s, is deprecated." " The rules it contains should instead be direct decendants of the constraint object", id); } if(test_ruleset(lifetime, NULL, data_set->now) == FALSE) { crm_info("Constraint %s %s is not active", crm_element_name(xml_obj), id); } else if(safe_str_eq(XML_CONS_TAG_RSC_ORDER, crm_element_name(xml_obj))) { unpack_rsc_order(xml_obj, data_set); } else if(safe_str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj))) { unpack_rsc_colocation(xml_obj, data_set); } else if(safe_str_eq(XML_CONS_TAG_RSC_LOCATION, crm_element_name(xml_obj))) { unpack_rsc_location(xml_obj, data_set); + } else if(safe_str_eq(XML_CONS_TAG_RSC_TICKET, + crm_element_name(xml_obj))) { + unpack_rsc_ticket(xml_obj, data_set); + } else { pe_err("Unsupported constraint type: %s", crm_element_name(xml_obj)); } } return TRUE; } static const char * invert_action(const char *action) { if(safe_str_eq(action, RSC_START)) { return RSC_STOP; } else if(safe_str_eq(action, RSC_STOP)) { return RSC_START; } else if(safe_str_eq(action, RSC_PROMOTE)) { return RSC_DEMOTE; } else if(safe_str_eq(action, RSC_DEMOTE)) { return RSC_PROMOTE; } else if(safe_str_eq(action, RSC_PROMOTED)) { return RSC_DEMOTED; } else if(safe_str_eq(action, RSC_DEMOTED)) { return RSC_PROMOTED; } else if(safe_str_eq(action, RSC_STARTED)) { return RSC_STOPPED; } else if(safe_str_eq(action, RSC_STOPPED)) { return RSC_STARTED; } crm_config_warn("Unknown action: %s", action); return NULL; } static enum pe_order_kind get_ordering_type(xmlNode *xml_obj) { enum pe_order_kind kind_e = pe_order_kind_mandatory; const char *kind = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND); if(kind == NULL) { const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); kind_e = pe_order_kind_mandatory; if(score) { int score_i = char2score(score); if(score_i == 0) { kind_e = pe_order_kind_optional; } /* } else if(rsc_then->variant == pe_native && rsc_first->variant > pe_group) { */ /* kind_e = pe_order_kind_optional; */ } } else if(safe_str_eq(kind, "Mandatory")) { kind_e = pe_order_kind_mandatory; } else if(safe_str_eq(kind, "Optional")) { kind_e = pe_order_kind_optional; } else if(safe_str_eq(kind, "Serialize")) { kind_e = pe_order_kind_serialize; } else { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); crm_config_err("Constraint %s: Unknown type '%s'", id, kind); } return kind_e; } static gboolean contains_stonith(resource_t *rsc) { GListPtr gIter = rsc->children; if(gIter == FALSE) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(safe_str_eq(class, "stonith")) { return TRUE; } } for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; if(contains_stonith(child)) { return TRUE; } } return FALSE; } static gboolean unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t *data_set) { int order_id = 0; resource_t *rsc_then = NULL; resource_t *rsc_first = NULL; gboolean invert_bool = TRUE; enum pe_order_kind kind = pe_order_kind_mandatory; enum pe_ordering cons_weight = pe_order_optional; const char *id_first = NULL; const char *id_then = NULL; const char *action_then = NULL; const char *action_first = NULL; const char *instance_then = NULL; const char *instance_first = NULL; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); crm_str_to_boolean(invert, &invert_bool); if(xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } else if(id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN); id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST); action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION); action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION); instance_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_INSTANCE); instance_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_INSTANCE); if(action_first == NULL) { action_first = RSC_START; } if(action_then == NULL) { action_then = action_first; } if(id_then == NULL || id_first == NULL) { crm_config_err("Constraint %s needs two sides lh: %s rh: %s", id, crm_str(id_then), crm_str(id_first)); return FALSE; } rsc_then = pe_find_resource(data_set->resources, id_then); rsc_first = pe_find_resource(data_set->resources, id_first); if(rsc_then == NULL) { crm_config_err("Constraint %s: no resource found for name '%s'", id, id_then); return FALSE; } else if(rsc_first == NULL) { crm_config_err("Constraint %s: no resource found for name '%s'", id, id_first); return FALSE; } else if(instance_then && rsc_then->variant < pe_clone) { crm_config_err("Invalid constraint '%s':" " Resource '%s' is not a clone but instance %s was requested", id, id_then, instance_then); return FALSE; } else if(instance_first && rsc_first->variant < pe_clone) { crm_config_err("Invalid constraint '%s':" " Resource '%s' is not a clone but instance %s was requested", id, id_first, instance_first); return FALSE; } if(instance_then) { rsc_then = find_clone_instance(rsc_then, instance_then, data_set); if(rsc_then == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_then, id_then); return FALSE; } } if(instance_first) { rsc_first = find_clone_instance(rsc_first, instance_first, data_set); if(rsc_first == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_first, id_first); return FALSE; } } if(safe_str_eq(action_first, RSC_STOP) && contains_stonith(rsc_then)) { if(contains_stonith(rsc_first) == FALSE) { crm_config_err("Constraint %s: Ordering STONITH resource (%s) to stop before %s is illegal", id, rsc_first->id, rsc_then->id); } return FALSE; } cons_weight = pe_order_optional; kind = get_ordering_type(xml_obj); if(kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) { crm_debug_2("Upgrade : recovery - implies right"); cons_weight |= pe_order_implies_then; } cons_weight |= get_flags(id, kind, action_first, action_then, FALSE); order_id = new_rsc_order( rsc_first, action_first, rsc_then, action_then, cons_weight, data_set); crm_debug_2("order-%d (%s): %s_%s before %s_%s flags=0x%.6x", order_id, id, rsc_first->id, action_first, rsc_then->id, action_then, cons_weight); if(invert_bool == FALSE) { return TRUE; } else if(invert && kind == pe_order_kind_serialize) { crm_config_warn("Cannot invert serialized constraint set %s", id); return TRUE; } else if(kind == pe_order_kind_serialize) { return TRUE; } action_then = invert_action(action_then); action_first = invert_action(action_first); if(action_then == NULL || action_first == NULL) { crm_config_err("Cannot invert rsc_order constraint %s." " Please specify the inverse manually.", id); return TRUE; } if(safe_str_eq(action_first, RSC_STOP) && contains_stonith(rsc_then)) { if(contains_stonith(rsc_first) == FALSE) { crm_config_err("Constraint %s: Ordering STONITH resource (%s) to stop before %s is illegal", id, rsc_first->id, rsc_then->id); } return FALSE; } cons_weight = pe_order_optional; if(kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) { crm_debug_2("Upgrade : recovery - implies left"); cons_weight |= pe_order_implies_first; } cons_weight |= get_flags(id, kind, action_first, action_then, TRUE); order_id = new_rsc_order( rsc_then, action_then, rsc_first, action_first, cons_weight, data_set); crm_debug_2("order-%d (%s): %s_%s before %s_%s flags=0x%.6x", order_id, id, rsc_then->id, action_then, rsc_first->id, action_first, cons_weight); return TRUE; } gboolean unpack_rsc_location(xmlNode * xml_obj, pe_working_set_t *data_set) { gboolean empty = TRUE; rsc_to_node_t *location = NULL; const char *id_lh = crm_element_value(xml_obj, "rsc"); const char *id = crm_element_value(xml_obj, XML_ATTR_ID); resource_t *rsc_lh = pe_find_resource(data_set->resources, id_lh); const char *node = crm_element_value(xml_obj, "node"); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); const char *domain = crm_element_value(xml_obj, XML_CIB_TAG_DOMAIN); const char *role = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE); if(rsc_lh == NULL) { /* only a warn as BSC adds the constraint then the resource */ crm_config_warn("No resource (con=%s, rsc=%s)", id, id_lh); return FALSE; } if(domain) { GListPtr nodes = g_hash_table_lookup(data_set->domains, domain); if(domain == NULL) { crm_config_err("Invalid constraint %s: Domain %s does not exist", id, domain); return FALSE; } location = rsc2node_new(id, rsc_lh, 0, NULL, data_set); location->node_list_rh = node_list_dup(nodes, FALSE, FALSE); } else if(node != NULL && score != NULL) { int score_i = char2score(score); node_t *match = pe_find_node(data_set->nodes, node); if(!match) { return FALSE; } location = rsc2node_new(id, rsc_lh, score_i, match, data_set); } else { xmlNode *rule_xml = NULL; for(rule_xml = __xml_first_child(xml_obj); rule_xml != NULL; rule_xml = __xml_next(rule_xml)) { if(crm_str_eq((const char *)rule_xml->name, XML_TAG_RULE, TRUE)) { empty = FALSE; crm_debug_2("Unpacking %s/%s", id, ID(rule_xml)); generate_location_rule(rsc_lh, rule_xml, data_set); } } if(empty) { crm_config_err("Invalid location constraint %s:" " rsc_location must contain at least one rule", ID(xml_obj)); } } if(location && role) { if(text2role(role) == RSC_ROLE_UNKNOWN) { pe_err("Invalid constraint %s: Bad role %s", id, role); return FALSE; } else { location->role_filter = text2role(role); if(location->role_filter == RSC_ROLE_SLAVE) { /* Fold slave back into Started for simplicity * At the point Slave location constraints are evaluated, * all resources are still either stopped or started */ location->role_filter = RSC_ROLE_STARTED; } } } return TRUE; } static int get_node_score(const char *rule, const char *score, gboolean raw, node_t *node) { int score_f = 0; if(score == NULL) { pe_err("Rule %s: no score specified. Assuming 0.", rule); } else if(raw) { score_f = char2score(score); } else { const char *attr_score = g_hash_table_lookup( node->details->attrs, score); if(attr_score == NULL) { crm_debug("Rule %s: node %s did not have a value for %s", rule, node->details->uname, score); score_f = -INFINITY; } else { crm_debug("Rule %s: node %s had value %s for %s", rule, node->details->uname, attr_score, score); score_f = char2score(attr_score); } } return score_f; } rsc_to_node_t * generate_location_rule( resource_t *rsc, xmlNode *rule_xml, pe_working_set_t *data_set) { const char *rule_id = NULL; const char *score = NULL; const char *boolean = NULL; const char *role = NULL; GListPtr gIter = NULL; GListPtr match_L = NULL; int score_f = 0; gboolean do_and = TRUE; gboolean accept = TRUE; gboolean raw_score = TRUE; rsc_to_node_t *location_rule = NULL; rule_xml = expand_idref(rule_xml, data_set->input); rule_id = crm_element_value(rule_xml, XML_ATTR_ID); boolean = crm_element_value(rule_xml, XML_RULE_ATTR_BOOLEAN_OP); role = crm_element_value(rule_xml, XML_RULE_ATTR_ROLE); crm_debug_2("Processing rule: %s", rule_id); if(role != NULL && text2role(role) == RSC_ROLE_UNKNOWN) { pe_err("Bad role specified for %s: %s", rule_id, role); return NULL; } score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE); if(score != NULL) { score_f = char2score(score); } else { score = crm_element_value( rule_xml, XML_RULE_ATTR_SCORE_ATTRIBUTE); if(score == NULL) { score = crm_element_value( rule_xml, XML_RULE_ATTR_SCORE_MANGLED); } if(score != NULL) { raw_score = FALSE; } } if(safe_str_eq(boolean, "or")) { do_and = FALSE; } location_rule = rsc2node_new(rule_id, rsc, 0, NULL, data_set); if(location_rule == NULL) { return NULL; } if(role != NULL) { crm_debug_2("Setting role filter: %s", role); location_rule->role_filter = text2role(role); if(location_rule->role_filter == RSC_ROLE_SLAVE) { /* Fold slave back into Started for simplicity * At the point Slave location constraints are evaluated, * all resources are still either stopped or started */ location_rule->role_filter = RSC_ROLE_STARTED; } } if(do_and) { GListPtr gIter = NULL; match_L = node_list_dup(data_set->nodes, TRUE, FALSE); for(gIter = match_L; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; node->weight = get_node_score(rule_id, score, raw_score, node); } } for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; accept = test_rule( rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN, data_set->now); crm_debug_2("Rule %s %s on %s", ID(rule_xml), accept?"passed":"failed", node->details->uname); score_f = get_node_score(rule_id, score, raw_score, node); /* if(accept && score_f == -INFINITY) { */ /* accept = FALSE; */ /* } */ if(accept) { node_t *local = pe_find_node_id(match_L, node->details->id); if(local == NULL && do_and) { continue; } else if(local == NULL) { local = node_copy(node); match_L = g_list_append(match_L, local); } if(do_and == FALSE) { local->weight = merge_weights(local->weight, score_f); } crm_debug_2("node %s now has weight %d", node->details->uname, local->weight); } else if(do_and && !accept) { /* remove it */ node_t *delete = pe_find_node_id(match_L, node->details->id); if(delete != NULL) { match_L = g_list_remove(match_L,delete); crm_debug_5("node %s did not match", node->details->uname); } crm_free(delete); } } location_rule->node_list_rh = match_L; if(location_rule->node_list_rh == NULL) { crm_debug_2("No matching nodes for rule %s", rule_id); return NULL; } crm_debug_3("%s: %d nodes matched", rule_id, g_list_length(location_rule->node_list_rh)); return location_rule; } static gint sort_cons_priority_lh(gconstpointer a, gconstpointer b) { const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t*)a; const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t*)b; if(a == NULL) { return 1; } if(b == NULL) { return -1; } CRM_ASSERT(rsc_constraint1->rsc_lh != NULL); CRM_ASSERT(rsc_constraint1->rsc_rh != NULL); if(rsc_constraint1->rsc_lh->priority > rsc_constraint2->rsc_lh->priority) { return -1; } if(rsc_constraint1->rsc_lh->priority < rsc_constraint2->rsc_lh->priority) { return 1; } return strcmp(rsc_constraint1->rsc_lh->id, rsc_constraint2->rsc_lh->id); } static gint sort_cons_priority_rh(gconstpointer a, gconstpointer b) { const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t*)a; const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t*)b; if(a == NULL) { return 1; } if(b == NULL) { return -1; } CRM_ASSERT(rsc_constraint1->rsc_lh != NULL); CRM_ASSERT(rsc_constraint1->rsc_rh != NULL); if(rsc_constraint1->rsc_rh->priority > rsc_constraint2->rsc_rh->priority) { return -1; } if(rsc_constraint1->rsc_rh->priority < rsc_constraint2->rsc_rh->priority) { return 1; } return strcmp(rsc_constraint1->rsc_rh->id, rsc_constraint2->rsc_rh->id); } gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t *rsc_lh, resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set) { rsc_colocation_t *new_con = NULL; if(rsc_lh == NULL){ crm_config_err("No resource found for LHS %s", id); return FALSE; } else if(rsc_rh == NULL){ crm_config_err("No resource found for RHS of %s", id); return FALSE; } crm_malloc0(new_con, sizeof(rsc_colocation_t)); if(new_con == NULL) { return FALSE; } if(state_lh == NULL || safe_str_eq(state_lh, RSC_ROLE_STARTED_S)) { state_lh = RSC_ROLE_UNKNOWN_S; } if(state_rh == NULL || safe_str_eq(state_rh, RSC_ROLE_STARTED_S)) { state_rh = RSC_ROLE_UNKNOWN_S; } new_con->id = id; new_con->rsc_lh = rsc_lh; new_con->rsc_rh = rsc_rh; new_con->score = score; new_con->role_lh = text2role(state_lh); new_con->role_rh = text2role(state_rh); new_con->node_attribute = node_attr; if(node_attr == NULL) { node_attr = "#"XML_ATTR_UNAME; } crm_debug_3("%s ==> %s (%s %d)", rsc_lh->id, rsc_rh->id, node_attr, score); rsc_lh->rsc_cons = g_list_insert_sorted( rsc_lh->rsc_cons, new_con, sort_cons_priority_rh); rsc_rh->rsc_cons_lhs = g_list_insert_sorted( rsc_rh->rsc_cons_lhs, new_con, sort_cons_priority_lh); data_set->colocation_constraints = g_list_append( data_set->colocation_constraints, new_con); return TRUE; } /* LHS before RHS */ int new_rsc_order(resource_t *lh_rsc, const char *lh_task, resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set) { char *lh_key = NULL; char *rh_key = NULL; CRM_CHECK(lh_rsc != NULL, return -1); CRM_CHECK(lh_task != NULL, return -1); CRM_CHECK(rh_rsc != NULL, return -1); CRM_CHECK(rh_task != NULL, return -1); lh_key = generate_op_key(lh_rsc->id, lh_task, 0); rh_key = generate_op_key(rh_rsc->id, rh_task, 0); return custom_action_order(lh_rsc, lh_key, NULL, rh_rsc, rh_key, NULL, type, data_set); } /* LHS before RHS */ int custom_action_order( resource_t *lh_rsc, char *lh_action_task, action_t *lh_action, resource_t *rh_rsc, char *rh_action_task, action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set) { order_constraint_t *order = NULL; if(lh_rsc == NULL && lh_action) { lh_rsc = lh_action->rsc; } if(rh_rsc == NULL && rh_action) { rh_rsc = rh_action->rsc; } if((lh_action == NULL && lh_rsc == NULL) || (rh_action == NULL && rh_rsc == NULL)){ crm_config_err("Invalid inputs %p.%p %p.%p", lh_rsc, lh_action, rh_rsc, rh_action); crm_free(lh_action_task); crm_free(rh_action_task); return -1; } crm_malloc0(order, sizeof(order_constraint_t)); order->id = data_set->order_id++; order->type = type; order->lh_rsc = lh_rsc; order->rh_rsc = rh_rsc; order->lh_action = lh_action; order->rh_action = rh_action; order->lh_action_task = lh_action_task; order->rh_action_task = rh_action_task; if(order->lh_action_task == NULL && lh_action) { order->lh_action_task = crm_strdup(lh_action->uuid); } if(order->rh_action_task == NULL && rh_action) { order->rh_action_task = crm_strdup(rh_action->uuid); } if(order->lh_rsc == NULL && lh_action) { order->lh_rsc = lh_action->rsc; } if(order->rh_rsc == NULL && rh_action) { order->rh_rsc = rh_action->rsc; } data_set->ordering_constraints = g_list_prepend( data_set->ordering_constraints, order); return order->id; } enum pe_ordering get_flags( const char *id, enum pe_order_kind kind, const char *action_first, const char *action_then, gboolean invert) { enum pe_ordering flags = pe_order_optional; if(invert && kind == pe_order_kind_mandatory) { crm_debug_2("Upgrade %s: implies left", id); flags |= pe_order_implies_first; } else if(kind == pe_order_kind_mandatory) { crm_debug_2("Upgrade %s: implies right", id); flags |= pe_order_implies_then; if(safe_str_eq(action_first, RSC_START) || safe_str_eq(action_first, RSC_PROMOTE)) { crm_debug_2("Upgrade %s: runnable", id); flags |= pe_order_runnable_left; } } else if(kind == pe_order_kind_serialize) { flags |= pe_order_serialize_only; } return flags; } static gboolean unpack_order_set(xmlNode *set, enum pe_order_kind kind, resource_t **rsc, action_t **begin, action_t **end, action_t **inv_begin, action_t **inv_end, const char *symmetrical, pe_working_set_t *data_set) { xmlNode *xml_rsc = NULL; GListPtr set_iter = NULL; GListPtr resources = NULL; resource_t *last = NULL; resource_t *resource = NULL; int local_kind = kind; gboolean sequential = FALSE; enum pe_ordering flags = pe_order_optional; char *key = NULL; const char *id = ID(set); const char *action = crm_element_value(set, "action"); const char *sequential_s = crm_element_value(set, "sequential"); const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND); /* char *pseudo_id = NULL; char *end_id = NULL; char *begin_id = NULL; */ if(action == NULL) { action = RSC_START; } if(kind_s) { local_kind = get_ordering_type(set); } if(sequential_s == NULL) { sequential_s = "1"; } sequential = crm_is_true(sequential_s); flags = get_flags(id, local_kind, action, action, FALSE); for(xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, resource, ID(xml_rsc)); resources = g_list_append(resources, resource); } } if(g_list_length(resources) == 1) { crm_debug_2("Single set: %s", id); *rsc = resource; *end = NULL; *begin = NULL; *inv_end = NULL; *inv_begin = NULL; goto done; } /* pseudo_id = crm_concat(id, action, '-'); end_id = crm_concat(pseudo_id, "end", '-'); begin_id = crm_concat(pseudo_id, "begin", '-'); */ *rsc = NULL; /* *end = get_pseudo_op(end_id, data_set); *begin = get_pseudo_op(begin_id, data_set); crm_free(pseudo_id); crm_free(begin_id); crm_free(end_id); */ set_iter = resources; while(set_iter != NULL) { resource = (resource_t *) set_iter->data; set_iter = set_iter->next; key = generate_op_key(resource->id, action, 0); /* custom_action_order(NULL, NULL, *begin, resource, crm_strdup(key), NULL, flags|pe_order_implies_first_printed, data_set); custom_action_order(resource, crm_strdup(key), NULL, NULL, NULL, *end, flags|pe_order_implies_then_printed, data_set); */ if(local_kind == pe_order_kind_serialize) { /* Serialize before everything that comes after */ GListPtr gIter = NULL; for(gIter = set_iter; gIter != NULL; gIter = gIter->next) { resource_t *then_rsc = (resource_t*)gIter->data; char *then_key = generate_op_key(then_rsc->id, action, 0); custom_action_order(resource, crm_strdup(key), NULL, then_rsc, then_key, NULL, flags, data_set); } } else if(sequential) { if(last != NULL) { new_rsc_order(last, action, resource, action, flags, data_set); } last = resource; } crm_free(key); } if(crm_is_true(symmetrical) == FALSE) { goto done; } else if(symmetrical && local_kind == pe_order_kind_serialize) { crm_config_warn("Cannot invert serialized constraint set %s", id); goto done; } else if(local_kind == pe_order_kind_serialize) { goto done; } last = NULL; action = invert_action(action); /* pseudo_id = crm_concat(id, action, '-'); end_id = crm_concat(pseudo_id, "end", '-'); begin_id = crm_concat(pseudo_id, "begin", '-'); *inv_end = get_pseudo_op(end_id, data_set); *inv_begin = get_pseudo_op(begin_id, data_set); crm_free(pseudo_id); crm_free(begin_id); crm_free(end_id); */ flags = get_flags(id, local_kind, action, action, TRUE); set_iter = resources; while(set_iter != NULL) { resource = (resource_t *) set_iter->data; set_iter = set_iter->next; /* key = generate_op_key(resource->id, action, 0); custom_action_order(NULL, NULL, *inv_begin, resource, crm_strdup(key), NULL, flags|pe_order_implies_first_printed, data_set); custom_action_order(resource, key, NULL, NULL, NULL, *inv_end, flags|pe_order_implies_then_printed, data_set); */ if(sequential) { if(last != NULL) { new_rsc_order(resource, action, last, action, flags, data_set); } last = resource; } } done: g_list_free(resources); return TRUE; } static gboolean order_rsc_sets( const char *id, xmlNode *set1, xmlNode *set2, enum pe_order_kind kind, pe_working_set_t *data_set, gboolean invert) { xmlNode *xml_rsc = NULL; resource_t *rsc_1 = NULL; resource_t *rsc_2 = NULL; const char *action_1 = crm_element_value(set1, "action"); const char *action_2 = crm_element_value(set2, "action"); const char *sequential_1 = crm_element_value(set1, "sequential"); const char *sequential_2 = crm_element_value(set2, "sequential"); enum pe_ordering flags = pe_order_none; if (action_1 == NULL) { action_1 = RSC_START; }; if (action_2 == NULL) { action_2 = RSC_START; }; if (invert == FALSE) { flags = get_flags(id, kind, action_1, action_2, FALSE); } else { action_1 = invert_action(action_1); action_2 = invert_action(action_2); flags = get_flags(id, kind, action_2, action_1, TRUE); } if(crm_is_true(sequential_1)) { if(invert == FALSE) { /* get the last one */ const char *rid = NULL; for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_1, rid); } else { /* get the first one */ for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); break; } } } } if(crm_is_true(sequential_2)) { if(invert == FALSE) { /* get the first one */ for(xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); break; } } } else { /* get the last one */ const char *rid = NULL; for(xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); } } if(rsc_1 != NULL && rsc_2 != NULL) { new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } else if(rsc_1 != NULL) { for(xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } else if(rsc_2 != NULL) { xmlNode *xml_rsc = NULL; for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } else { for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_2 = NULL; EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); for(xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next(xml_rsc_2)) { if(crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } } } return TRUE; } gboolean unpack_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set) { gboolean any_sets = FALSE; resource_t *rsc = NULL; /* resource_t *last_rsc = NULL; */ action_t *set_end = NULL; action_t *set_begin = NULL; action_t *set_inv_end = NULL; action_t *set_inv_begin = NULL; xmlNode *set = NULL; xmlNode *last = NULL; /* action_t *last_end = NULL; action_t *last_begin = NULL; action_t *last_inv_end = NULL; action_t *last_inv_begin = NULL; */ const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); enum pe_order_kind kind = get_ordering_type(xml_obj); if(invert == NULL) { invert = "true"; } for(set = __xml_first_child(xml_obj); set != NULL; set = __xml_next(set)) { if(crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if(unpack_order_set(set, kind, &rsc, &set_begin, &set_end, &set_inv_begin, &set_inv_end, invert, data_set) == FALSE) { return FALSE; /* Expand orders in order_rsc_sets() instead of via pseudo actions. */ /* } else if(last) { const char *set_action = crm_element_value(set, "action"); const char *last_action = crm_element_value(last, "action"); enum pe_ordering flags = get_flags(id, kind, last_action, set_action, FALSE); if(!set_action) { set_action = RSC_START; } if(!last_action) { last_action = RSC_START; } if(rsc == NULL && last_rsc == NULL) { order_actions(last_end, set_begin, flags); } else { custom_action_order( last_rsc, null_or_opkey(last_rsc, last_action), last_end, rsc, null_or_opkey(rsc, set_action), set_begin, flags, data_set); } if(crm_is_true(invert)) { set_action = invert_action(set_action); last_action = invert_action(last_action); flags = get_flags(id, kind, last_action, set_action, TRUE); if(rsc == NULL && last_rsc == NULL) { order_actions(last_inv_begin, set_inv_end, flags); } else { custom_action_order( last_rsc, null_or_opkey(last_rsc, last_action), last_inv_begin, rsc, null_or_opkey(rsc, set_action), set_inv_end, flags, data_set); } } */ } else if(/* never called -- Now call it for supporting clones in resource sets */last) { if(order_rsc_sets(id, last, set, kind, data_set, FALSE) == FALSE) { return FALSE; } if(crm_is_true(invert) && order_rsc_sets(id, set, last, kind, data_set, TRUE) == FALSE) { return FALSE; } } last = set; /* last_rsc = rsc; last_end = set_end; last_begin = set_begin; last_inv_end = set_inv_end; last_inv_begin = set_inv_begin; */ } } if(any_sets == FALSE) { return unpack_simple_rsc_order(xml_obj, data_set); } return TRUE; } static gboolean unpack_colocation_set(xmlNode *set, int score, pe_working_set_t *data_set) { xmlNode *xml_rsc = NULL; resource_t *with = NULL; resource_t *resource = NULL; const char *set_id = ID(set); const char *role = crm_element_value(set, "role"); const char *sequential = crm_element_value(set, "sequential"); int local_score = score; const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE); if(score_s) { local_score = char2score(score_s); } if(sequential != NULL && crm_is_true(sequential) == FALSE) { return TRUE; } else if(local_score >= 0) { for(xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); if(with != NULL) { crm_debug_2("Colocating %s with %s", resource->id, with->id); rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, data_set); } with = resource; } } } else { /* Anti-colocating with every prior resource is * the only way to ensure the intuitive result * (ie. that no-one in the set can run with anyone * else in the set) */ for(xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_with = NULL; EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); for(xml_rsc_with = __xml_first_child(set); xml_rsc_with != NULL; xml_rsc_with = __xml_next(xml_rsc_with)) { if(crm_str_eq((const char *)xml_rsc_with->name, XML_TAG_RESOURCE_REF, TRUE)) { if(safe_str_eq(resource->id, ID(xml_rsc_with))) { break; } else if(resource == NULL) { crm_config_err("%s: No resource found for %s", set_id, ID(xml_rsc_with)); return FALSE; } EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with)); crm_debug_2("Anti-Colocating %s with %s", resource->id, with->id); rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, data_set); } } } } } return TRUE; } static gboolean colocate_rsc_sets( const char *id, xmlNode *set1, xmlNode *set2, int score, pe_working_set_t *data_set) { xmlNode *xml_rsc = NULL; resource_t *rsc_1 = NULL; resource_t *rsc_2 = NULL; const char *role_1 = crm_element_value(set1, "role"); const char *role_2 = crm_element_value(set2, "role"); const char *sequential_1 = crm_element_value(set1, "sequential"); const char *sequential_2 = crm_element_value(set2, "sequential"); if(crm_is_true(sequential_1)) { /* get the first one */ for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); break; } } } if(crm_is_true(sequential_2)) { /* get the last one */ const char *rid = NULL; for(xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); } if(rsc_1 != NULL && rsc_2 != NULL) { rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } else if(rsc_1 != NULL) { for(xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } else if(rsc_2 != NULL) { for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } else { for(xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_2 = NULL; EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); for(xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next(xml_rsc_2)) { if(crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } } } return TRUE; } static gboolean unpack_simple_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) { int score_i = 0; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); const char *id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET); const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); const char *state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE); const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE); const char *instance_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_INSTANCE); const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR); const char *symmetrical = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); resource_t *rsc_lh = pe_find_resource(data_set->resources, id_lh); resource_t *rsc_rh = pe_find_resource(data_set->resources, id_rh); if(rsc_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_lh); return FALSE; } else if(rsc_rh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_rh); return FALSE; } else if(instance_lh && rsc_lh->variant < pe_clone) { crm_config_err("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_lh, instance_lh); return FALSE; } else if(instance_rh && rsc_rh->variant < pe_clone) { crm_config_err("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_rh, instance_rh); return FALSE; } if(instance_lh) { rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set); if(rsc_lh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_lh, id_lh); return FALSE; } } if(instance_rh) { rsc_rh = find_clone_instance(rsc_rh, instance_rh, data_set); if(rsc_rh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_rh, id_rh); return FALSE; } } if(crm_is_true(symmetrical)) { crm_config_warn("The %s colocation constraint attribute has been removed." " It didn't do what you think it did anyway.", XML_CONS_ATTR_SYMMETRICAL); } if(score) { score_i = char2score(score); } rsc_colocation_new(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, data_set); return TRUE; } gboolean unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) { int score_i = 0; xmlNode *set = NULL; xmlNode *last = NULL; gboolean any_sets = FALSE; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); if(score) { score_i = char2score(score); } for(set = __xml_first_child(xml_obj); set != NULL; set = __xml_next(set)) { if(crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if(unpack_colocation_set(set, score_i, data_set) == FALSE) { return FALSE; } else if(last && colocate_rsc_sets(id, last, set, score_i, data_set) == FALSE) { return FALSE; } last = set; } } if(any_sets == FALSE) { return unpack_simple_colocation(xml_obj, data_set); } return TRUE; } +gboolean +rsc_ticket_new(const char *id, resource_t *rsc_lh, ticket_t *ticket, + const char *state_lh, const char *loss_policy, pe_working_set_t *data_set) +{ + rsc_ticket_t *new_rsc_ticket = NULL; + + + if(rsc_lh == NULL){ + crm_config_err("No resource found for LHS %s", id); + return FALSE; + } + + crm_malloc0(new_rsc_ticket, sizeof(rsc_ticket_t)); + if(new_rsc_ticket == NULL) { + return FALSE; + } + + if(state_lh == NULL + || safe_str_eq(state_lh, RSC_ROLE_STARTED_S)) { + state_lh = RSC_ROLE_UNKNOWN_S; + } + + new_rsc_ticket->id = id; + new_rsc_ticket->ticket = ticket; + new_rsc_ticket->rsc_lh = rsc_lh; + new_rsc_ticket->role_lh = text2role(state_lh); + + if(safe_str_eq(loss_policy, "fence")) { + crm_debug("On loss of ticket '%s': Fence the nodes running %s (%s)", + new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); + new_rsc_ticket->loss_policy = loss_ticket_fence; + + } else if(safe_str_eq(loss_policy, "freeze")) { + crm_debug("On loss of ticket '%s': Freeze %s (%s)", + new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); + new_rsc_ticket->loss_policy = loss_ticket_freeze; + + } else if(safe_str_eq(loss_policy, "demote")) { + crm_debug("On loss of ticket '%s': Demote %s (%s)", + new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); + new_rsc_ticket->loss_policy = loss_ticket_demote; + + } else if(safe_str_eq(loss_policy, "stop")) { + crm_debug("On loss of ticket '%s': Stop %s (%s)", + new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); + new_rsc_ticket->loss_policy = loss_ticket_stop; + + } else { + if(new_rsc_ticket->role_lh == RSC_ROLE_MASTER) { + crm_debug("On loss of ticket '%s': Default to demote %s (%s)", + new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); + new_rsc_ticket->loss_policy = loss_ticket_demote; + + } else { + crm_debug("On loss of ticket '%s': Default to stop %s (%s)", + new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); + new_rsc_ticket->loss_policy = loss_ticket_stop; + } + } + + crm_debug_3("%s (%s) ==> %s", rsc_lh->id, role2text(new_rsc_ticket->role_lh), ticket->id); + + rsc_lh->rsc_tickets = g_list_append(rsc_lh->rsc_tickets, new_rsc_ticket); + + data_set->ticket_constraints = g_list_append(data_set->ticket_constraints, new_rsc_ticket); + + return TRUE; +} + +static gboolean +unpack_rsc_ticket_set(xmlNode *set, ticket_t *ticket, const char *loss_policy, pe_working_set_t *data_set) +{ + xmlNode *xml_rsc = NULL; + resource_t *resource = NULL; + const char *set_id = ID(set); + const char *role = crm_element_value(set, "role"); + + if(set == NULL) { + crm_config_err("No resource_set object to process."); + return FALSE; + } + + if(set_id == NULL) { + crm_config_err("resource_set must have an id"); + return FALSE; + } + + if(ticket == NULL) { + crm_config_err("No dependented ticket specified for '%s'", set_id); + return FALSE; + } + + for(xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next(xml_rsc)) { + if(crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { + EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); + crm_debug_2("Resource '%s' depends on ticket '%s'", resource->id, ticket->id); + rsc_ticket_new(set_id, resource, ticket, role, loss_policy, data_set); + } + } + + return TRUE; +} + +static gboolean +unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set) +{ + const char *id = crm_element_value(xml_obj, XML_ATTR_ID); + const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET); + const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY); + + ticket_t *ticket = NULL; + + const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); + const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); + const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE); + + resource_t *rsc_lh = NULL; + + if(xml_obj == NULL) { + crm_config_err("No rsc_ticket constraint object to process."); + return FALSE; + } + + if(id == NULL) { + crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); + return FALSE; + } + + if(ticket_str == NULL) { + crm_config_err("Invalid constraint '%s': No ticket specified", id); + return FALSE; + } else { + ticket = g_hash_table_lookup(data_set->tickets, ticket_str); + } + + if(ticket == NULL) { + crm_config_err("Invalid constraint '%s': No ticket named '%s'", id, ticket_str); + return FALSE; + } + + if(id_lh == NULL) { + crm_config_err("Invalid constraint '%s': No resource specified", id); + return FALSE; + } else { + rsc_lh = pe_find_resource(data_set->resources, id_lh); + } + + if(rsc_lh == NULL) { + crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_lh); + return FALSE; + + } else if(instance_lh && rsc_lh->variant < pe_clone) { + crm_config_err("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_lh, instance_lh); + return FALSE; + } + + if(instance_lh) { + rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set); + if(rsc_lh == NULL) { + crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_lh, id_lh); + return FALSE; + } + } + + rsc_ticket_new(id, rsc_lh, ticket, state_lh, loss_policy, data_set); + return TRUE; +} + +gboolean +unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set) +{ + xmlNode *set = NULL; + gboolean any_sets = FALSE; + + const char *id = crm_element_value(xml_obj, XML_ATTR_ID); + const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET); + const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY); + + ticket_t *ticket = NULL; + + if(xml_obj == NULL) { + crm_config_err("No rsc_ticket constraint object to process."); + return FALSE; + } + + if(id == NULL) { + crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); + return FALSE; + } + + if(ticket_str == NULL) { + crm_config_err("Invalid constraint '%s': No ticket specified", id); + return FALSE; + } else { + ticket = g_hash_table_lookup(data_set->tickets, ticket_str); + } + + if(ticket == NULL) { + crm_malloc0(ticket, sizeof(ticket_t)); + if(ticket == NULL) { + crm_config_err("Cannot allocate ticket '%s'", ticket_str); + return FALSE; + } + + ticket->id = crm_strdup(ticket_str); + ticket->granted = FALSE; + ticket->last_granted = -1; + + g_hash_table_insert(data_set->tickets, crm_strdup(ticket->id), ticket); + } + + for(set = __xml_first_child(xml_obj); set != NULL; set = __xml_next(set)) { + if(crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { + any_sets = TRUE; + set = expand_idref(set, data_set->input); + if(unpack_rsc_ticket_set(set, ticket, loss_policy, data_set) == FALSE) { + return FALSE; + } + } + } + + if(any_sets == FALSE) { + return unpack_simple_rsc_ticket(xml_obj, data_set); + } + + return TRUE; +} + gboolean is_active(rsc_to_node_t *cons) { return TRUE; } diff --git a/pengine/group.c b/pengine/group.c index 0f230aff53..8613facb5d 100644 --- a/pengine/group.c +++ b/pengine/group.c @@ -1,478 +1,488 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define VARIANT_GROUP 1 #include node_t * group_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { node_t *node = NULL; node_t *group_node = NULL; - GListPtr gIter = rsc->children; + GListPtr gIter = NULL; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } crm_debug_2("Processing %s", rsc->id); if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependency loop detected involving %s", rsc->id); return NULL; } if(group_data->first_child == NULL) { /* nothign to allocate */ clear_bit(rsc->flags, pe_rsc_provisional); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); rsc->role = group_data->first_child->role; group_data->first_child->rsc_cons = g_list_concat( group_data->first_child->rsc_cons, rsc->rsc_cons); rsc->rsc_cons = NULL; group_data->first_child->rsc_cons_lhs = g_list_concat( group_data->first_child->rsc_cons_lhs, rsc->rsc_cons_lhs); rsc->rsc_cons_lhs = NULL; + gIter = rsc->rsc_tickets; + for(; gIter != NULL; gIter = gIter->next) { + rsc_ticket_t *rsc_ticket = (rsc_ticket_t*)gIter->data; + + if(rsc_ticket->ticket->granted == FALSE) { + rsc_ticket_constraint(rsc, rsc_ticket, data_set); + } + } + dump_node_scores(show_scores?0:scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); + gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; node = child_rsc->cmds->allocate(child_rsc, prefer, data_set); if(group_node == NULL) { group_node = node; } } rsc->next_role = group_data->first_child->next_role; clear_bit(rsc->flags, pe_rsc_allocating); clear_bit(rsc->flags, pe_rsc_provisional); if(group_data->colocated) { return group_node; } return NULL; } void group_update_pseudo_status(resource_t *parent, resource_t *child); void group_create_actions(resource_t *rsc, pe_working_set_t *data_set) { action_t *op = NULL; const char *value = NULL; GListPtr gIter = rsc->children; crm_debug_2("Creating actions for %s", rsc->id); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->create_actions(child_rsc, data_set); group_update_pseudo_status(rsc, child_rsc); } op = start_action(rsc, NULL, TRUE/* !group_data->child_starting */); set_bit_inplace(op->flags, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, started_key(rsc), RSC_STARTED, NULL, TRUE/* !group_data->child_starting */, TRUE, data_set); set_bit_inplace(op->flags, pe_action_pseudo|pe_action_runnable); op = stop_action(rsc, NULL, TRUE/* !group_data->child_stopping */); set_bit_inplace(op->flags, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, stopped_key(rsc), RSC_STOPPED, NULL, TRUE/* !group_data->child_stopping */, TRUE, data_set); set_bit_inplace(op->flags, pe_action_pseudo|pe_action_runnable); value = g_hash_table_lookup(rsc->meta, "stateful"); if(crm_is_true(value)) { op = custom_action(rsc, demote_key(rsc), RSC_DEMOTE, NULL, TRUE, TRUE, data_set); set_bit_inplace(op->flags, pe_action_pseudo); set_bit_inplace(op->flags, pe_action_runnable); op = custom_action(rsc, demoted_key(rsc), RSC_DEMOTED, NULL, TRUE, TRUE, data_set); set_bit_inplace(op->flags, pe_action_pseudo); set_bit_inplace(op->flags, pe_action_runnable); op = custom_action(rsc, promote_key(rsc), RSC_PROMOTE, NULL, TRUE, TRUE, data_set); set_bit_inplace(op->flags, pe_action_pseudo); set_bit_inplace(op->flags, pe_action_runnable); op = custom_action(rsc, promoted_key(rsc), RSC_PROMOTED, NULL, TRUE, TRUE, data_set); set_bit_inplace(op->flags, pe_action_pseudo); set_bit_inplace(op->flags, pe_action_runnable); } } void group_update_pseudo_status(resource_t *parent, resource_t *child) { GListPtr gIter = child->actions; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, parent); if(group_data->ordered == FALSE) { /* If this group is not ordered, then leave the meta-actions as optional */ return; } if(group_data->child_stopping && group_data->child_starting) { return; } for(; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; if(is_set(action->flags, pe_action_optional)) { continue; } if(safe_str_eq(RSC_STOP, action->task) && is_set(action->flags, pe_action_runnable)) { group_data->child_stopping = TRUE; crm_debug_3("Based on %s the group is stopping", action->uuid); } else if(safe_str_eq(RSC_START, action->task) && is_set(action->flags, pe_action_runnable)) { group_data->child_starting = TRUE; crm_debug_3("Based on %s the group is starting", action->uuid); } } } void group_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = rsc->children; resource_t *last_rsc = NULL; resource_t *top = uber_parent(rsc); group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; int stop = pe_order_none; int stopped = pe_order_implies_then_printed; int start = pe_order_implies_then|pe_order_runnable_left; int started = pe_order_runnable_left|pe_order_implies_then|pe_order_implies_then_printed; child_rsc->cmds->internal_constraints(child_rsc, data_set); if(last_rsc == NULL) { if(group_data->ordered) { stop |= pe_order_optional; stopped = pe_order_implies_then; } } else if(group_data->colocated) { rsc_colocation_new( "group:internal_colocation", NULL, INFINITY, child_rsc, last_rsc, NULL, NULL, data_set); } if(top->variant == pe_master) { new_rsc_order(rsc, RSC_DEMOTE, child_rsc, RSC_DEMOTE, stop|pe_order_implies_first_printed, data_set); new_rsc_order(child_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, stopped, data_set); new_rsc_order(child_rsc, RSC_PROMOTE, rsc, RSC_PROMOTED, started, data_set); new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, pe_order_implies_first_printed, data_set); } order_start_start(rsc, child_rsc, pe_order_implies_first_printed); order_stop_stop(rsc, child_rsc, stop|pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, stopped, data_set); new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, started, data_set); if(group_data->ordered == FALSE) { order_start_start(rsc, child_rsc, start|pe_order_implies_first_printed); if(top->variant == pe_master) { new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, start|pe_order_implies_first_printed, data_set); } } else if(last_rsc != NULL) { child_rsc->restart_type = pe_restart_restart; order_start_start(last_rsc, child_rsc, start); order_stop_stop(child_rsc, last_rsc, pe_order_optional); if(top->variant == pe_master) { new_rsc_order(last_rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, start, data_set); new_rsc_order(child_rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE, pe_order_optional, data_set); } } else { /* If anyone in the group is starting, then * pe_order_implies_then will cause _everyone_ in the group * to be sent a start action * But this is safe since starting something that is already * started is required to be "safe" */ int flags = pe_order_none; order_start_start(rsc, child_rsc, flags); if(top->variant == pe_master) { new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, flags, data_set); } } last_rsc = child_rsc; } if(group_data->ordered && last_rsc != NULL) { int stop_stop_flags = pe_order_implies_then; int stop_stopped_flags = pe_order_optional; order_stop_stop(rsc, last_rsc, stop_stop_flags); new_rsc_order(last_rsc, RSC_STOP, rsc, RSC_STOPPED, stop_stopped_flags, data_set); if(top->variant == pe_master) { new_rsc_order(rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE, stop_stop_flags, data_set); new_rsc_order(last_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, stop_stopped_flags, data_set); } } } void group_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { GListPtr gIter = NULL; group_variant_data_t *group_data = NULL; if(rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if(rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } gIter = rsc_lh->children; crm_debug_4("Processing constraints from %s", rsc_lh->id); get_group_variant_data(group_data, rsc_lh); if(group_data->colocated) { group_data->first_child->cmds->rsc_colocation_lh( group_data->first_child, rsc_rh, constraint); return; } else if(constraint->score >= INFINITY) { crm_config_err("%s: Cannot perform manditory colocation" " between non-colocated group and %s", rsc_lh->id, rsc_rh->id); return; } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, rsc_rh, constraint); } } void group_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { GListPtr gIter = rsc_rh->children; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc_rh); CRM_CHECK(rsc_lh->variant == pe_native, return); crm_debug_3("Processing RH of constraint %s", constraint->id); print_resource(LOG_DEBUG_3, "LHS", rsc_lh, TRUE); if(is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if(group_data->colocated && group_data->first_child) { if(constraint->score >= INFINITY) { /* Ensure RHS is _fully_ up before can start LHS */ group_data->last_child->cmds->rsc_colocation_rh( rsc_lh, group_data->last_child, constraint); } else { /* A partially active RHS is fine */ group_data->first_child->cmds->rsc_colocation_rh( rsc_lh, group_data->first_child, constraint); } return; } else if(constraint->score >= INFINITY) { crm_config_err("%s: Cannot perform manditory colocation with" " non-colocated group: %s", rsc_lh->id, rsc_rh->id); return; } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint); } } enum pe_action_flags group_action_flags(action_t *action, node_t *node) { GListPtr gIter = NULL; enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); for(gIter = action->rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; enum action_tasks task = get_complex_task(child, action->task, TRUE); const char *task_s = task2text(task); action_t *child_action = find_first_action(child->actions, NULL, task_s, node); if(child_action) { enum pe_action_flags child_flags = get_action_flags(child_action, node); if(is_set(flags, pe_action_optional) && is_set(child_flags, pe_action_optional) == FALSE) { crm_trace("%s is manditory because of %s", action->uuid, child_action->uuid); clear_bit_inplace(flags, pe_action_optional); clear_bit_inplace(action->flags, pe_action_optional); } if(safe_str_neq(task_s, action->task) && is_set(flags, pe_action_runnable) && is_set(child_flags, pe_action_runnable) == FALSE) { crm_trace("%s is not runnable because of %s", action->uuid, child_action->uuid); clear_bit_inplace(flags, pe_action_runnable); clear_bit_inplace(action->flags, pe_action_runnable); } } else if(task != stop_rsc) { crm_trace("%s is not runnable because of %s (not found in %s)", action->uuid, task_s, child->id); clear_bit_inplace(flags, pe_action_runnable); } } return flags; } enum pe_graph_flags group_update_actions( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { GListPtr gIter = then->rsc->children; enum pe_graph_flags changed = pe_graph_none; CRM_ASSERT(then->rsc != NULL); changed |= native_update_actions(first, then, node, flags, filter, type); for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; action_t *child_action = find_first_action(child->actions, NULL, then->task, node); if(child_action) { changed |= child->cmds->update_actions(first, child_action, node, flags, filter, type); } } return changed; } void group_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { GListPtr gIter = rsc->children; GListPtr saved = constraint->node_list_rh; GListPtr zero = node_list_dup(constraint->node_list_rh, TRUE, FALSE); gboolean reset_scores = TRUE; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); crm_debug("Processing rsc_location %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); if(group_data->colocated && reset_scores) { reset_scores = FALSE; constraint->node_list_rh = zero; } } constraint->node_list_rh = saved; slist_basic_destroy(zero); } void group_expand(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = rsc->children; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); crm_debug_3("Processing actions from %s", rsc->id); CRM_CHECK(rsc != NULL, return); native_expand(rsc, data_set); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } GHashTable * group_merge_weights( resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, int factor, gboolean allow_rollback, gboolean only_positive) { GListPtr gIter = rsc->rsc_cons_lhs; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); if(is_set(rsc->flags, pe_rsc_merging)) { crm_info("Breaking dependency loop with %s at %s", rsc->id, rhs); return nodes; } set_bit(rsc->flags, pe_rsc_merging); nodes = group_data->first_child->cmds->merge_weights( group_data->first_child, rhs, nodes, attr, factor, allow_rollback, only_positive); for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; nodes = native_merge_weights( constraint->rsc_lh, rsc->id, nodes, constraint->node_attribute, constraint->score/INFINITY, allow_rollback, only_positive); } clear_bit(rsc->flags, pe_rsc_merging); return nodes; } void group_append_meta(resource_t *rsc, xmlNode *xml) { } diff --git a/pengine/master.c b/pengine/master.c index 34b38f1d32..5c2dd3ae32 100644 --- a/pengine/master.c +++ b/pengine/master.c @@ -1,955 +1,964 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #define VARIANT_CLONE 1 #include extern gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); extern int master_score(resource_t *rsc, node_t *node, int not_set_value); static void child_promoting_constraints( clone_variant_data_t *clone_data, enum pe_ordering type, resource_t *rsc, resource_t *child, resource_t *last, pe_working_set_t *data_set) { if(child == NULL) { if(clone_data->ordered && last != NULL) { crm_debug_4("Ordered version (last node)"); /* last child promote before promoted started */ new_rsc_order(last, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set); } return; } /* child promote before global promoted */ new_rsc_order(child, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set); /* global promote before child promote */ new_rsc_order(rsc, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set); if(clone_data->ordered) { crm_debug_4("Ordered version"); if(last == NULL) { /* global promote before first child promote */ last = rsc; } /* else: child/child relative promote */ order_start_start(last, child, type); new_rsc_order(last, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set); } else { crm_debug_4("Un-ordered version"); } } static void child_demoting_constraints( clone_variant_data_t *clone_data, enum pe_ordering type, resource_t *rsc, resource_t *child, resource_t *last, pe_working_set_t *data_set) { if(child == NULL) { if(clone_data->ordered && last != NULL) { crm_debug_4("Ordered version (last node)"); /* global demote before first child demote */ new_rsc_order(rsc, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional, data_set); } return; } /* child demote before global demoted */ new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ new_rsc_order(rsc, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); if(clone_data->ordered && last != NULL) { crm_debug_4("Ordered version"); /* child/child relative demote */ new_rsc_order(child, RSC_DEMOTE, last, RSC_DEMOTE, type, data_set); } else if(clone_data->ordered) { crm_debug_4("Ordered version (1st node)"); /* first child stop before global stopped */ new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, type, data_set); } else { crm_debug_4("Un-ordered version"); } } static void master_update_pseudo_status( resource_t *rsc, gboolean *demoting, gboolean *promoting) { GListPtr gIter = NULL; if(rsc->children) { gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; master_update_pseudo_status(child, demoting, promoting); } return; } CRM_ASSERT(demoting != NULL); CRM_ASSERT(promoting != NULL); gIter = rsc->actions; for(; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; if(*promoting && *demoting) { return; } else if(is_set(action->flags, pe_action_optional)) { continue; } else if(safe_str_eq(RSC_DEMOTE, action->task)) { *demoting = TRUE; } else if(safe_str_eq(RSC_PROMOTE, action->task)) { *promoting = TRUE; } } } #define apply_master_location(list) do { \ gIter2 = list; \ for(; gIter2 != NULL; gIter2 = gIter2->next) { \ rsc_to_node_t *cons = (rsc_to_node_t*)gIter2->data; \ \ cons_node = NULL; \ if(cons->role_filter == RSC_ROLE_MASTER) { \ crm_debug_2("Applying %s to %s", \ cons->id, child_rsc->id); \ cons_node = pe_find_node_id( \ cons->node_list_rh, chosen->details->id); \ } \ if(cons_node != NULL) { \ int new_priority = merge_weights( \ child_rsc->priority, cons_node->weight); \ crm_debug_2("\t%s: %d->%d (%d)", child_rsc->id, \ child_rsc->priority, new_priority, cons_node->weight); \ child_rsc->priority = new_priority; \ } \ } \ } while(0) static node_t * can_be_master(resource_t *rsc) { node_t *node = NULL; node_t *local_node = NULL; resource_t *parent = uber_parent(rsc); clone_variant_data_t *clone_data = NULL; int level = LOG_DEBUG_2; #if 0 enum rsc_role_e role = RSC_ROLE_UNKNOWN; role = rsc->fns->state(rsc, FALSE); crm_info("%s role: %s", rsc->id, role2text(role)); #endif if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; if(can_be_master(child) == NULL) { do_crm_log_unlikely(level, "Child %s of %s can't be promoted", child->id, rsc->id); return NULL; } } } node = rsc->fns->location(rsc, NULL, FALSE); if(node == NULL) { do_crm_log_unlikely(level, "%s cannot be master: not allocated", rsc->id); return NULL; } else if(is_not_set(rsc->flags, pe_rsc_managed)) { if(rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { crm_notice("Forcing unmanaged master %s to remain promoted on %s", rsc->id, node->details->uname); } else { return NULL; } } else if(rsc->priority < 0) { do_crm_log_unlikely(level, "%s cannot be master: preference: %d", rsc->id, rsc->priority); return NULL; } else if(can_run_resources(node) == FALSE) { do_crm_log_unlikely(level, "Node cant run any resources: %s", node->details->uname); return NULL; } get_clone_variant_data(clone_data, parent); local_node = pe_hash_table_lookup( parent->allowed_nodes, node->details->id); if(local_node == NULL) { crm_err("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); return NULL; } else if(local_node->count < clone_data->master_node_max || is_not_set(rsc->flags, pe_rsc_managed)) { return local_node; } else { do_crm_log_unlikely(level, "%s cannot be master on %s: node full", rsc->id, node->details->uname); } return NULL; } static gint sort_master_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc; enum rsc_role_e role1 = RSC_ROLE_UNKNOWN; enum rsc_role_e role2 = RSC_ROLE_UNKNOWN; const resource_t *resource1 = (const resource_t*)a; const resource_t *resource2 = (const resource_t*)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); role1 = resource1->fns->state(resource1, TRUE); role2 = resource2->fns->state(resource2, TRUE); rc = sort_rsc_index(a, b); if( rc != 0 ) { crm_trace("%s %c %s (index)", resource1->id, rc<0?'<':'>', resource2->id); return rc; } if(role1 > role2) { crm_trace("%s %c %s (role)", resource1->id, '<', resource2->id); return -1; } else if(role1 < role2) { crm_trace("%s %c %s (role)", resource1->id, '>', resource2->id); return 1; } return sort_clone_instance(a, b, data_set); } static void master_promotion_order(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = NULL; node_t *node = NULL; node_t *chosen = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(clone_data->merged_master_weights) { return; } clone_data->merged_master_weights = TRUE; crm_debug_2("Merging weights for %s", rsc->id); set_bit(rsc->flags, pe_rsc_merging); gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; crm_debug_2("Sort index: %s = %d", child->id, child->sort_index); } dump_node_scores(LOG_DEBUG_3, rsc, "Before", rsc->allowed_nodes); gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; chosen = child->fns->location(child, NULL, FALSE); if(chosen == NULL || child->sort_index < 0) { crm_debug_3("Skipping %s", child->id); continue; } node = (node_t*)pe_hash_table_lookup( rsc->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); /* adds in master preferences and rsc_location.role=Master */ crm_trace("Adding %s to %s from %s", score2char(child->sort_index), node->details->uname, child->id); node->weight = merge_weights(child->sort_index, node->weight); } dump_node_scores(LOG_DEBUG_3, rsc, "Middle", rsc->allowed_nodes); gIter = rsc->rsc_cons; for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; /* (re-)adds location preferences of resources that the * master instance should/must be colocated with */ if(constraint->role_lh == RSC_ROLE_MASTER) { crm_debug_2("RHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, constraint->score); rsc->allowed_nodes = constraint->rsc_rh->cmds->merge_weights( constraint->rsc_rh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, constraint->score==INFINITY?FALSE:TRUE, FALSE); } } gIter = rsc->rsc_cons_lhs; for(; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; /* (re-)adds location preferences of resource that wish to be * colocated with the master instance */ if(constraint->role_rh == RSC_ROLE_MASTER) { crm_debug_2("LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, constraint->score); rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE, TRUE); } } + gIter = rsc->rsc_tickets; + for(; gIter != NULL; gIter = gIter->next) { + rsc_ticket_t *rsc_ticket = (rsc_ticket_t*)gIter->data; + + if(rsc_ticket->role_lh == RSC_ROLE_MASTER && rsc_ticket->ticket->granted == FALSE) { + resource_location(rsc, NULL, -INFINITY, "__stateful_without_ticket__", data_set); + } + } + dump_node_scores(LOG_DEBUG_3, rsc, "After", rsc->allowed_nodes); /* write them back and sort */ gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; chosen = child->fns->location(child, NULL, FALSE); if(is_not_set(child->flags, pe_rsc_managed) && child->next_role == RSC_ROLE_MASTER) { child->sort_index = INFINITY; } else if(chosen == NULL || child->sort_index < 0) { crm_debug_2("%s: %d", child->id, child->sort_index); } else { node = (node_t*)pe_hash_table_lookup( rsc->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); child->sort_index = node->weight; } crm_debug_2("Set sort index: %s = %d", child->id, child->sort_index); } rsc->children = g_list_sort_with_data(rsc->children, sort_master_instance, data_set); clear_bit(rsc->flags, pe_rsc_merging); } int master_score(resource_t *rsc, node_t *node, int not_set_value) { char *attr_name; char *name = rsc->id; const char *attr_value; int score = not_set_value, len = 0; if(rsc->children) { GListPtr gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; int c_score = master_score(child, node, not_set_value); if(score == not_set_value) { score = c_score; } else { score += c_score; } } return score; } if(rsc->fns->state(rsc, TRUE) < RSC_ROLE_STARTED) { return score; } if(node != NULL) { node_t *match = pe_find_node_id(rsc->running_on, node->details->id); if(match == NULL) { crm_debug_2("%s is not active on %s - ignoring", rsc->id, node->details->uname); return score; } match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if(match == NULL || match->weight < 0) { crm_debug_2("%s on %s has score: %d - ignoring", rsc->id, match->details->uname, match->weight); return score; } } if(rsc->clone_name) { /* Use the name the lrm knows this resource as, * since that's what crm_master would have used too */ name = rsc->clone_name; } len = 8 + strlen(name); crm_malloc0(attr_name, len); sprintf(attr_name, "master-%s", name); attr_value = g_hash_table_lookup(node->details->attrs, attr_name); crm_trace("%s: %s[%s] = %s", rsc->id, attr_name, node->details->uname, crm_str(attr_value)); if(attr_value != NULL) { score = char2score(attr_value); } crm_free(attr_name); return score; } #define max(a, b) achildren; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(clone_data->applied_master_prefs) { /* Make sure we only do this once */ return; } clone_data->applied_master_prefs = TRUE; for(; gIter != NULL; gIter = gIter->next) { GHashTableIter iter; node_t *node = NULL; resource_t *child_rsc = (resource_t*)gIter->data; g_hash_table_iter_init (&iter, child_rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { if(can_run_resources(node) == FALSE) { /* This node will never be promoted to master, * so don't apply the master score as that may * lead to clone shuffling */ continue; } score = master_score(child_rsc, node, 0); if(score > 0) { new_score = merge_weights(node->weight, score); if(new_score != node->weight) { crm_debug_2("\t%s: Updating preference for %s (%d->%d)", child_rsc->id, node->details->uname, node->weight, new_score); node->weight = new_score; } } new_score = max(child_rsc->priority, score); if(new_score != child_rsc->priority) { crm_debug_2("\t%s: Updating priority (%d->%d)", child_rsc->id, child_rsc->priority, new_score); child_rsc->priority = new_score; } } } } static void set_role_slave(resource_t *rsc, gboolean current) { GListPtr gIter = rsc->children; if(current) { if(rsc->role == RSC_ROLE_STARTED) { rsc->role = RSC_ROLE_SLAVE; } } else { GListPtr allocated = NULL; rsc->fns->location(rsc, &allocated, FALSE); if(allocated) { rsc->next_role = RSC_ROLE_SLAVE; } else { rsc->next_role = RSC_ROLE_STOPPED; } g_list_free(allocated); } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; set_role_slave(child_rsc, current); } } static void set_role_master(resource_t *rsc) { GListPtr gIter = rsc->children; if(rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_MASTER; } for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; set_role_master(child_rsc); } } node_t * master_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { int promoted = 0; GListPtr gIter = NULL; GListPtr gIter2 = NULL; GHashTableIter iter; node_t *node = NULL; node_t *chosen = NULL; node_t *cons_node = NULL; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependency loop detected involving %s", rsc->id); return NULL; } apply_master_prefs(rsc); clone_color(rsc, prefer, data_set); set_bit(rsc->flags, pe_rsc_allocating); /* count now tracks the number of masters allocated */ g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { node->count = 0; } /* * assign priority */ gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { GListPtr list = NULL; resource_t *child_rsc = (resource_t*)gIter->data; crm_debug_2("Assigning priority for %s: %s", child_rsc->id, role2text(child_rsc->next_role)); if(child_rsc->fns->state(child_rsc, TRUE) == RSC_ROLE_STARTED) { set_role_slave(child_rsc, TRUE); } chosen = child_rsc->fns->location(child_rsc, &list, FALSE); if(g_list_length(list) > 1) { crm_config_err("Cannot promote non-colocated child %s", child_rsc->id); } g_list_free(list); if(chosen == NULL) { continue; } next_role = child_rsc->fns->state(child_rsc, FALSE); switch(next_role) { case RSC_ROLE_STARTED: case RSC_ROLE_UNKNOWN: CRM_CHECK(chosen != NULL, break); /* * Default to -1 if no value is set * * This allows master locations to be specified * based solely on rsc_location constraints, * but prevents anyone from being promoted if * neither a constraint nor a master-score is present */ child_rsc->priority = master_score(child_rsc, chosen, -1); break; case RSC_ROLE_SLAVE: case RSC_ROLE_STOPPED: child_rsc->priority = -INFINITY; break; case RSC_ROLE_MASTER: /* We will arrive here if we're re-creating actions after a stonith */ break; default: CRM_CHECK(FALSE/* unhandled */, crm_err("Unknown resource role: %d for %s", next_role, child_rsc->id)); } apply_master_location(child_rsc->rsc_location); apply_master_location(rsc->rsc_location); gIter2 = child_rsc->rsc_cons; for(; gIter2 != NULL; gIter2 = gIter2->next) { rsc_colocation_t *cons = (rsc_colocation_t*)gIter2->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->rsc_rh, cons); } child_rsc->sort_index = child_rsc->priority; crm_debug_2("Assigning priority for %s: %d", child_rsc->id, child_rsc->priority); if(next_role == RSC_ROLE_MASTER) { child_rsc->sort_index = INFINITY; } } dump_node_scores(LOG_DEBUG_3, rsc, "Pre merge", rsc->allowed_nodes); master_promotion_order(rsc, data_set); /* mark the first N as masters */ gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; char *score = score2char(child_rsc->sort_index); chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if(show_scores) { fprintf(stdout, "%s promotion score on %s: %s\n", child_rsc->id, chosen?chosen->details->uname:"none", score); } else { do_crm_log_unlikely(scores_log_level, "%s promotion score on %s: %s", child_rsc->id, chosen?chosen->details->uname:"none", score); } crm_free(score); chosen = NULL; /* nuke 'chosen' so that we don't promote more than the * required number of instances */ if(child_rsc->sort_index < 0) { crm_debug_2("Not supposed to promote child: %s", child_rsc->id); } else if(promoted < clone_data->master_max || is_not_set(rsc->flags, pe_rsc_managed)) { chosen = can_be_master(child_rsc); } crm_debug("%s master score: %d", child_rsc->id, child_rsc->priority); if(chosen == NULL) { set_role_slave(child_rsc, FALSE); continue; } chosen->count++; crm_info("Promoting %s (%s %s)", child_rsc->id, role2text(child_rsc->role), chosen->details->uname); set_role_master(child_rsc); promoted++; } clone_data->masters_allocated = promoted; crm_info("%s: Promoted %d instances of a possible %d to master", rsc->id, promoted, clone_data->master_max); clear_bit(rsc->flags, pe_rsc_provisional); clear_bit(rsc->flags, pe_rsc_allocating); return NULL; } void master_create_actions(resource_t *rsc, pe_working_set_t *data_set) { action_t *action = NULL; GListPtr gIter = rsc->children; action_t *action_complete = NULL; gboolean any_promoting = FALSE; gboolean any_demoting = FALSE; resource_t *last_promote_rsc = NULL; resource_t *last_demote_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_debug("Creating actions for %s", rsc->id); /* create actions as normal */ clone_create_actions(rsc, data_set); for(; gIter != NULL; gIter = gIter->next) { gboolean child_promoting = FALSE; gboolean child_demoting = FALSE; resource_t *child_rsc = (resource_t*)gIter->data; crm_debug_2("Creating actions for %s", child_rsc->id); child_rsc->cmds->create_actions(child_rsc, data_set); master_update_pseudo_status( child_rsc, &child_demoting, &child_promoting); any_demoting = any_demoting || child_demoting; any_promoting = any_promoting || child_promoting; crm_debug_2("Created actions for %s: %d %d", child_rsc->id, child_promoting, child_demoting); } /* promote */ action = promote_action(rsc, NULL, !any_promoting); action_complete = custom_action( rsc, promoted_key(rsc), RSC_PROMOTED, NULL, !any_promoting, TRUE, data_set); action_complete->priority = INFINITY; update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); update_action_flags(action_complete, pe_action_pseudo); update_action_flags(action_complete, pe_action_runnable); if(clone_data->masters_allocated > 0) { update_action_flags(action, pe_action_runnable); update_action_flags(action_complete, pe_action_runnable); } child_promoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_promote_rsc, data_set); if(clone_data->promote_notify == NULL) { clone_data->promote_notify = create_notification_boundaries( rsc, RSC_PROMOTE, action, action_complete, data_set); } /* demote */ action = demote_action(rsc, NULL, !any_demoting); action_complete = custom_action( rsc, demoted_key(rsc), RSC_DEMOTED, NULL, !any_demoting, TRUE, data_set); action_complete->priority = INFINITY; update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); update_action_flags(action_complete, pe_action_pseudo); update_action_flags(action_complete, pe_action_runnable); child_demoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_demote_rsc, data_set); if(clone_data->demote_notify == NULL) { clone_data->demote_notify = create_notification_boundaries( rsc, RSC_DEMOTE, action, action_complete, data_set); if(clone_data->promote_notify) { /* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day * Requires exposing *_notify */ order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre, pe_order_optional); } } /* restore the correct priority */ gIter = rsc->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->priority = rsc->priority; } } void master_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = rsc->children; resource_t *last_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); clone_internal_constraints(rsc, data_set); /* global stopped before start */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); /* global stopped before promote */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before start */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_START, pe_order_optional, data_set); /* global started before promote */ new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before stop */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); /* global demote before demoted */ new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_optional, data_set); /* global demoted before promote */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; /* child demote before promote */ new_rsc_order(child_rsc, RSC_DEMOTE, child_rsc, RSC_PROMOTE, pe_order_optional, data_set); child_promoting_constraints(clone_data, pe_order_optional, rsc, child_rsc, last_rsc, data_set); child_demoting_constraints(clone_data, pe_order_optional, rsc, child_rsc, last_rsc, data_set); last_rsc = child_rsc; } } static void node_hash_update_one(GHashTable *hash, node_t *other, const char *attr, int score) { GHashTableIter iter; node_t *node = NULL; const char *value = NULL; if(other == NULL) { return; } else if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } value = g_hash_table_lookup(other->details->attrs, attr); g_hash_table_iter_init (&iter, hash); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if(safe_str_eq(value, tmp)) { crm_debug_2("%s: %d + %d", node->details->uname, node->weight, other->weight); node->weight = merge_weights(node->weight, score); } } } void master_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { GListPtr gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc_rh); CRM_CHECK(rsc_rh != NULL, return); if(is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if(constraint->role_rh == RSC_ROLE_UNKNOWN) { crm_debug_3("Handling %s as a clone colocation", constraint->id); clone_rsc_colocation_rh(rsc_lh, rsc_rh, constraint); return; } CRM_CHECK(rsc_lh != NULL, return); CRM_CHECK(rsc_lh->variant == pe_native, return); crm_debug_2("Processing constraint %s: %d", constraint->id, constraint->score); if(constraint->role_rh == RSC_ROLE_UNKNOWN) { gIter = rsc_rh->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint); } } else if(is_set(rsc_lh->flags, pe_rsc_provisional)) { GListPtr rhs = NULL; gIter = rsc_rh->children; for(; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE); crm_debug_3("Processing: %s", child_rsc->id); if(chosen != NULL && next_role == constraint->role_rh) { crm_debug_3("Applying: %s %s %s %d", child_rsc->id, role2text(next_role), chosen->details->uname, constraint->score); if(constraint->score < INFINITY) { node_hash_update_one(rsc_lh->allowed_nodes, chosen, constraint->node_attribute, constraint->score); } rhs = g_list_prepend(rhs, chosen); } } /* Only do this if its not a master-master colocation * Doing this unconditionally would prevent the slaves from being started */ if(constraint->role_lh != RSC_ROLE_MASTER || constraint->role_rh != RSC_ROLE_MASTER) { if(constraint->score > 0) { node_list_exclude(rsc_lh->allowed_nodes, rhs, TRUE); } } g_list_free(rhs); } else if(constraint->role_lh == RSC_ROLE_MASTER) { resource_t *rh_child = find_compatible_child(rsc_lh, rsc_rh, constraint->role_rh, FALSE); if(rh_child == NULL && constraint->score >= INFINITY) { crm_debug_2("%s can't be promoted %s", rsc_lh->id, constraint->id); rsc_lh->priority = -INFINITY; } else if(rh_child != NULL) { int new_priority = merge_weights(rsc_lh->priority, constraint->score); crm_debug("Applying %s to %s", constraint->id, rsc_lh->id); crm_debug("\t%s: %d->%d", rsc_lh->id, rsc_lh->priority, new_priority); rsc_lh->priority = new_priority; } } return; } void master_append_meta(resource_t *rsc, xmlNode *xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); clone_append_meta(rsc, xml); name = crm_meta_name(XML_RSC_ATTR_MASTER_MAX); crm_xml_add_int(xml, name, clone_data->master_max); crm_free(name); name = crm_meta_name(XML_RSC_ATTR_MASTER_NODEMAX); crm_xml_add_int(xml, name, clone_data->master_node_max); crm_free(name); } diff --git a/pengine/native.c b/pengine/native.c index 4c720ac56f..d0b9af9761 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,2833 +1,2937 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #define DELETE_THEN_REFRESH 1 /* The crmd will remove the resource from the CIB itself, making this redundant */ #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); void pe_post_notify( resource_t *rsc, node_t *node, action_t *op, notify_data_t *n_data, pe_working_set_t *data_set); void NoRoleChange (resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set); gboolean DeleteRsc (resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set); gboolean StopRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean StartRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean DemoteRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean RoleError (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean NullOp (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, RoleError, NullOp, PromoteRsc, }, /* Master */ { RoleError, RoleError, RoleError, DemoteRsc, NullOp, }, }; struct capacity_data { node_t *node; resource_t *rsc; gboolean is_enough; }; static void check_capacity(gpointer key, gpointer value, gpointer user_data) { int required = 0; int remaining = 0; struct capacity_data *data = user_data; required = crm_parse_int(value, "0"); remaining = crm_parse_int(g_hash_table_lookup(data->node->details->utilization, key), "0"); if (required > remaining) { crm_debug("Node %s has no enough %s for resource %s: required=%d remaining=%d", data->node->details->uname, (char *)key, data->rsc->id, required, remaining); data->is_enough = FALSE; } } static gboolean have_enough_capacity(node_t *node, resource_t *rsc) { struct capacity_data data; data.node = node; data.rsc = rsc; data.is_enough = TRUE; g_hash_table_foreach(rsc->utilization, check_capacity, &data); return data.is_enough; } static gboolean native_choose_node(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ int alloc_details = scores_log_level+1; GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; gboolean result = FALSE; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr gIter = NULL; for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if (have_enough_capacity(node, rsc) == FALSE) { crm_debug("Resource %s cannot be allocated to node %s: none of enough capacity", rsc->id, node->details->uname); resource_location(rsc, node, -INFINITY, "__limit_utilization_", data_set); } } dump_node_scores(alloc_details, rsc, "Post-utilization", rsc->allowed_nodes); } length = g_hash_table_size(rsc->allowed_nodes); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to?TRUE:FALSE; } if(prefer) { chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if(chosen && chosen->weight >= 0 && can_run_resources(chosen)) { crm_trace("Using preferred node %s for %s instead of choosing from %d candidates", chosen->details->uname, rsc->id, length); } else if(chosen && chosen->weight < 0) { crm_trace("Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else if(chosen && can_run_resources(chosen)) { crm_trace("Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else { crm_trace("Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); } } if(chosen == NULL && rsc->allowed_nodes) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, g_list_nth_data(rsc->running_on, 0)); chosen = g_list_nth_data(nodes, 0); crm_trace("Chose node %s for %s from %d candidates", chosen?chosen->details->uname:"", rsc->id, length); if(chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if(running && can_run_resources(running) == FALSE) { crm_trace("Current node for %s (%s) can't run resources", rsc->id, running->details->uname); running = NULL; } for(lpc = 1; lpc < length && running; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if(tmp->weight == chosen->weight) { multiple++; if(tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if(multiple > 1) { int log_level = LOG_INFO; char *score = score2char(chosen->weight); if(chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); crm_free(score); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } static int node_list_attr_score(GHashTable *list, const char *attr, const char *value) { GHashTableIter iter; node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } g_hash_table_iter_init (&iter, list); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { int weight = node->weight; if(can_run_resources(node) == FALSE) { weight = -INFINITY; } if(weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if(safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } } if(safe_str_neq(attr, "#"XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node?best_node:"", best_score); } return best_score; } static void node_hash_update(GHashTable *list1, GHashTable *list2, const char *attr, int factor, gboolean only_positive) { int score = 0; int new_score = 0; GHashTableIter iter; node_t *node = NULL; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } g_hash_table_iter_init (&iter, list1); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { CRM_CHECK(node != NULL, continue); score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); new_score = merge_weights(factor*score, node->weight); if(factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO: Decide if we want to filter only if weight == -INFINITY * */ crm_trace("%s: Filtering %d + %d*%d (factor * score)", node->details->uname, node->weight, factor, score); } else if(only_positive && new_score < 0 && node->weight > 0) { node->weight = 1; crm_trace("%s: Filtering %d + %d*%d (score > 0)", node->details->uname, node->weight, factor, score); } else if(only_positive && new_score < 0 && node->weight == 0) { crm_trace("%s: Filtering %d + %d*%d (score == 0)", node->details->uname, node->weight, factor, score); } else { crm_trace("%s: %d + %d*%d", node->details->uname, node->weight, factor, score); node->weight = new_score; } } } static GHashTable * node_hash_dup(GHashTable *hash) { /* Hack! */ GListPtr list = g_hash_table_get_values(hash); GHashTable *result = node_hash_from_list(list); g_list_free(list); return result; } GHashTable * native_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, int factor, gboolean allow_rollback, gboolean only_positive) { enum pe_weights flags = pe_weights_none; if(only_positive) { set_bit_inplace(flags, pe_weights_positive); } if(allow_rollback) { set_bit_inplace(flags, pe_weights_rollback); } return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } GHashTable * rsc_merge_weights( resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, int factor, enum pe_weights flags) { GHashTable *work = NULL; int multiplier = 1; if(factor < 0) { multiplier = -1; } if(is_set(rsc->flags, pe_rsc_merging)) { crm_info("%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); if(is_set(flags, pe_weights_init)) { if(rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while(last->next != NULL) { last = last->next; } crm_trace("Merging %s as a group %p %p", rsc->id, rsc->children, last); work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags); } else { work = node_hash_dup(rsc->allowed_nodes); } clear_bit_inplace(flags, pe_weights_init); } else { crm_trace("%s: Combining scores from %s", rhs, rsc->id); work = node_hash_dup(nodes); node_hash_update(work, rsc->allowed_nodes, attr, factor, is_set(flags, pe_weights_positive)); } if(is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) { crm_info("%s: Rolling back scores from %s", rhs, rsc->id); g_hash_table_destroy(work); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if(can_run_any(work)) { GListPtr gIter = NULL; if(is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; } else { gIter = rsc->rsc_cons_lhs; } for(; gIter != NULL; gIter = gIter->next) { resource_t *other = NULL; rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; if(is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else { other = constraint->rsc_lh; } crm_trace("Applying %s (%s)", constraint->id, other->id); work = rsc_merge_weights(other, rhs, work, constraint->node_attribute, multiplier*constraint->score/INFINITY, flags); dump_node_scores(LOG_TRACE, NULL, rhs, work); } } if(nodes) { g_hash_table_destroy(nodes); } clear_bit(rsc->flags, pe_rsc_merging); return work; } node_t * native_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { GListPtr gIter = NULL; int alloc_details = scores_log_level+1; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ crm_debug("Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-allloc", rsc->allowed_nodes); for(gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; GHashTable *archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; crm_debug_2("%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if(constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_hash_dup(rsc->allowed_nodes); } rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); if(archive && can_run_any(rsc->allowed_nodes) == FALSE) { crm_info("%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if(archive) { g_hash_table_destroy(archive); } } dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); for(gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE, FALSE); } + + for(gIter = rsc->rsc_tickets; gIter != NULL; gIter = gIter->next) { + rsc_ticket_t *rsc_ticket = (rsc_ticket_t*)gIter->data; + + if(rsc_ticket->ticket->granted == FALSE) { + rsc_ticket_constraint(rsc, rsc_ticket, data_set); + } + } print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if(rsc->next_role == RSC_ROLE_STOPPED) { crm_debug_2("Making sure %s doesn't get allocated", rsc->id); /* make sure it doesnt come up again */ resource_location( rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } dump_node_scores(show_scores?0:scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); if(is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if(is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; rsc->next_role = rsc->role; if(rsc->running_on == NULL) { reason = "inactive"; } else if(rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if(is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } crm_info("Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to?assign_to->details->uname:"'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if(is_set(data_set->flags, pe_flag_stop_everything) && safe_str_neq(class, "stonith")) { crm_debug("Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if(is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set) ) { crm_debug_3("Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if(rsc->allocated_to == NULL) { if(is_not_set(rsc->flags, pe_rsc_orphan)) { crm_info("Resource %s cannot run anywhere", rsc->id); } else if(rsc->running_on != NULL) { crm_info("Stopping orphan resource %s", rsc->id); } } else { crm_debug("Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); return rsc->allocated_to; } static gboolean is_op_dup( resource_t *rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; for(operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if(crm_str_eq((const char *)operation->name, "op", TRUE)) { value = crm_element_value(operation, "name"); if(safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if(value == NULL) { value = "0"; } if(safe_str_neq(value, interval)) { continue; } if(id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } } } return dup; } void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; /* Only process for the operations without role="Stopped" */ value = crm_element_value(operation, "role"); if (value && text2role(value) == RSC_ROLE_STOPPED) { return; } crm_debug_2("Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node?node->details->uname:"n/a"); if(node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if(interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if(is_op_dup(rsc, name, interval)) { return; } if(safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if(find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ crm_free(key); return; } if(start != NULL) { crm_debug_3("Marking %s %s due to %s", key, is_set(start->flags, pe_action_optional)?"optional":"manditory", start->uuid); is_optional = (get_action_flags(start, NULL) & pe_action_optional); } else { crm_debug_2("Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if(possible_matches == NULL) { is_optional = FALSE; crm_debug_3("Marking %s manditory: not active", key); } else { g_list_free(possible_matches); } if((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if(is_optional) { char *local_key = crm_strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* its running : cancel it */ mon = custom_action( rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); crm_free(mon->task); mon->task = crm_strdup(RSC_CANCEL); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch(rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if(rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if(rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if(local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result , key, value?value:role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); crm_free(key); key = NULL; return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if(is_optional) { crm_debug_2("%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if(start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable|pe_action_clear); } else if(node == NULL || node->details->online == FALSE || node->details->unclean) { crm_debug("%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable|pe_action_clear); } else if(is_set(mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", mon->task, interval_ms/1000, rsc->id, crm_str(node_uname)); } if(rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(EXECRA_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); crm_free(running_master); } if(node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, crm_strdup(key), mon, pe_order_implies_then|pe_order_runnable_left, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { custom_action_order( rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } else if(rsc->role == RSC_ROLE_MASTER) { custom_action_order( rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } } } void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set) { if(is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xmlNode *operation = NULL; for(operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if(crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp(rsc, start, node, operation, data_set); } } } } void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* TODO: Support of non-unique clone */ if(is_set(rsc->flags, pe_rsc_unique) == FALSE) { return; } /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } crm_debug_2("Creating recurring actions %s for %s in role %s on nodes where it'll not be running", ID(operation), rsc->id, role2text(rsc->next_role)); if(node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if(interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if(is_op_dup(rsc, name, interval)) { return; } if(safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if(find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ crm_free(key); return; } /* if the monitor exists on the node where the resource will be running, cancel it */ if(node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if(possible_matches) { action_t *cancel_op = NULL; char *local_key = crm_strdup(key); g_list_free(possible_matches); cancel_op = custom_action( rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); crm_free(cancel_op->task); cancel_op->task = crm_strdup(RSC_CANCEL); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; if(rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } do_crm_log(LOG_INFO, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), crm_str(node_uname)); } } for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *stop_node = (node_t*)gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; char *stop_op_key = NULL; if(node_uname && safe_str_eq(stop_node_uname, node_uname)) { continue; } crm_debug_2("Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if(possible_matches == NULL) { crm_debug_3("Marking %s manditory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { crm_debug_3("Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, crm_strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(EXECRA_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); crm_free(rc_inactive); probe_complete_ops = find_actions(data_set->actions, CRM_OP_PROBED, NULL); for(local_gIter = probe_complete_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *probe_complete = (action_t*)local_gIter->data; if (probe_complete->node == NULL) { if(is_set(probe_complete->flags, pe_action_optional) == FALSE) { probe_is_optional = FALSE; } if(is_set(probe_complete->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : probe un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable|pe_action_clear); } if(is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(NULL, NULL, probe_complete, NULL, crm_strdup(key), stopped_mon, pe_order_optional, data_set); } break; } } if(probe_complete_ops) { g_list_free(probe_complete_ops); } stop_op_key = stop_key(rsc); stop_ops = find_actions_exact(rsc->actions, stop_op_key, stop_node); for(local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *stop = (action_t*)local_gIter->data; if(is_set(stop->flags, pe_action_optional) == FALSE) { stop_is_optional = FALSE; } if(is_set(stop->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable|pe_action_clear); } if(is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, crm_strdup(stop_op_key), stop, NULL, crm_strdup(key), stopped_mon, pe_order_implies_then|pe_order_runnable_left, data_set); } } if(stop_ops) { g_list_free(stop_ops); } crm_free(stop_op_key); if(is_optional == FALSE && probe_is_optional && stop_is_optional && is_set(rsc->flags, pe_rsc_managed) == FALSE) { crm_debug_3("Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional); } if(is_set(stopped_mon->flags, pe_action_optional)) { crm_debug_2("%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if(stop_node->details->online == FALSE || stop_node->details->unclean) { crm_debug("%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable|pe_action_clear); } if(is_set(stopped_mon->flags, pe_action_runnable) && is_set(stopped_mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", stopped_mon->task, interval_ms/1000, rsc->id, crm_str(stop_node_uname)); } } crm_free(key); } void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set) { if(is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xmlNode *operation = NULL; for(operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if(crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } void native_create_actions(resource_t *rsc, pe_working_set_t *data_set) { action_t *start = NULL; node_t *chosen = NULL; GListPtr gIter = NULL; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; crm_debug_2("Createing actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); chosen = rsc->allocated_to; if(chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; } else if(rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; } get_rsc_attributes(rsc->parameters, rsc, chosen, data_set); for(gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t*)gIter->data; action_t *stop = stop_action(rsc, current, FALSE); set_bit_inplace(stop->flags, pe_action_dangle); crm_trace("Forcing a cleanup of %s on %s", rsc->id, current->details->uname); if(is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, FALSE, data_set); } } if(g_list_length(rsc->running_on) > 1) { const char *type = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); pe_proc_err("Resource %s (%s::%s) is active on %d nodes %s", rsc->id, class, type, g_list_length(rsc->running_on), recovery2text(rsc->recovery_type)); cl_log(LOG_WARNING, "See %s for more information.", "http://clusterlabs.org/wiki/FAQ#Resource_is_Too_Active"); if(rsc->recovery_type == recovery_stop_start) { if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, NULL, FALSE, data_set); } StopRsc(rsc, NULL, FALSE, data_set); rsc->role = RSC_ROLE_STOPPED; } } else if(rsc->running_on != NULL) { node_t *current = rsc->running_on->data; NoRoleChange(rsc, current, chosen, data_set); } else if(rsc->role == RSC_ROLE_STOPPED && rsc->next_role == RSC_ROLE_STOPPED) { char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, NULL); GListPtr gIter = NULL; for(gIter = possible_matches; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; update_action_flags(action, pe_action_optional); /* action->pseudo = TRUE; */ } g_list_free(possible_matches); crm_debug_2("Stopping a stopped resource"); crm_free(key); goto do_recurring; } else if(rsc->role != RSC_ROLE_STOPPED) { /* A cheap trick to account for the fact that Master/Slave groups may not be * completely running when we set their role to Slave */ crm_debug_2("Resetting %s.role = %s (was %s)", rsc->id, role2text(RSC_ROLE_STOPPED), role2text(rsc->role)); rsc->role = RSC_ROLE_STOPPED; } role = rsc->role; while(role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; crm_debug_2("Executing: %s->%s (%s)", role2text(role), role2text(next_role), rsc->id); if(rsc_action_matrix[role][next_role]( rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } do_recurring: if(rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { Recurring_Stopped(rsc, NULL, NULL, data_set); } } void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { /* This function is on the critical path and worth optimizing as much as possible */ resource_t *top = uber_parent(rsc); int type = pe_order_optional|pe_order_implies_then|pe_order_restart; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, type, data_set); if(top->variant == pe_master) { custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(safe_str_neq(class, "stonith")) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order( rsc, stop_key(rsc), NULL, NULL, crm_strdup(all_stopped->task), all_stopped, pe_order_implies_then|pe_order_runnable_left, data_set); } if (g_hash_table_size(rsc->utilization) > 0 && safe_str_neq(data_set->placement_strategy, "default")) { GHashTableIter iter; node_t *next = NULL; GListPtr gIter = NULL; crm_trace("Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for(gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t*)gIter->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if(load_stopped->node == NULL) { load_stopped->node = node_copy(current); update_action_flags(load_stopped, pe_action_optional|pe_action_clear); } custom_action_order( rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_optional, data_set); } g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&next)) { char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if(load_stopped->node == NULL) { load_stopped->node = node_copy(next); update_action_flags(load_stopped, pe_action_optional|pe_action_clear); } custom_action_order( NULL, load_stopped_task, load_stopped, rsc, start_key(rsc), NULL, pe_order_optional, data_set); custom_action_order( NULL, crm_strdup(load_stopped_task), load_stopped, rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set); } } } void native_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { if(rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if(constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } crm_debug_2("Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } static gboolean filter_colocation_constraint( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { int level = LOG_DEBUG_4; if(constraint->score == 0){ return FALSE; } if(constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } return TRUE; } static void colocation_match( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = "#id"; GHashTable *work = NULL; gboolean do_check = FALSE; GHashTableIter iter; node_t *node = NULL; if(constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if(rsc_rh->allocated_to) { value = g_hash_table_lookup( rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if(constraint->score < 0) { /* nothing to do: * anti-colocation with something thats not running */ return; } work = node_hash_dup(rsc_lh->allowed_nodes); g_hash_table_iter_init (&iter, work); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { tmp = g_hash_table_lookup(node->details->attrs, attribute); if(do_check && safe_str_eq(tmp, value)) { if(constraint->score < INFINITY) { crm_debug_2("%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights( constraint->score, node->weight); } } else if(do_check == FALSE || constraint->score >= INFINITY) { crm_debug_2("%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check?"failed":"unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } } if(can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { char *score = score2char(constraint->score); crm_info("%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); crm_free(score); } if(work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { crm_debug_2("%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0?"":"Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); if(filter_colocation_constraint(rsc_lh, rsc_rh, constraint) == FALSE) { return; } if(is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if(is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return; } details_rh = rsc_rh->allocated_to?rsc_rh->allocated_to->details:NULL; details_lh = rsc_lh->allocated_to?rsc_lh->allocated_to->details:NULL; if(constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh?details_lh->uname:"n/a", details_rh?details_rh->uname:"n/a"); } else if(constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh?details_rh->uname:"n/a"); } return; } else { colocation_match(rsc_lh, rsc_rh, constraint); } } +static gboolean +filter_rsc_ticket( + resource_t *rsc_lh, rsc_ticket_t *rsc_ticket) +{ + int level = LOG_DEBUG_4; + + if(rsc_ticket->role_lh != RSC_ROLE_UNKNOWN + && rsc_ticket->role_lh != rsc_lh->role) { + do_crm_log_unlikely(level, "LH: Skipping constraint: \"%s\" state filter", + role2text(rsc_ticket->role_lh)); + return FALSE; + } + + return TRUE; +} + +void rsc_ticket_constraint( + resource_t *rsc_lh, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set) +{ + if(rsc_ticket == NULL) { + pe_err("rsc_ticket was NULL"); + return; + } + + if(rsc_lh == NULL) { + pe_err("rsc_lh was NULL for %s", rsc_ticket->id); + return; + } + + if(rsc_ticket->ticket->granted == TRUE) { + return; + } + + if(rsc_lh->children) { + GListPtr gIter = rsc_lh->children; + + crm_debug_4("Processing ticket dependencies from %s", rsc_lh->id); + + for(; gIter != NULL; gIter = gIter->next) { + resource_t *child_rsc = (resource_t*)gIter->data; + rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); + } + return; + } + + crm_debug_2("%s: Processing ticket dependency on %s (%s, %s)", + rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); + + if(g_list_length(rsc_lh->running_on) > 0) { + GListPtr gIter = NULL; + + switch(rsc_ticket->loss_policy) { + case loss_ticket_stop: + resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); + break; + + case loss_ticket_demote: + /*Promotion score will be set to -INFINITY in master_promotion_order() */ + if(rsc_ticket->role_lh != RSC_ROLE_MASTER) { + resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); + } + break; + + case loss_ticket_fence: + if(filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { + return; + } + + resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); + + for(gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { + node_t *node = (node_t*)gIter->data; + + crm_warn("Node %s will be fenced for deadman", node->details->uname); + node->details->unclean = TRUE; + } + break; + + case loss_ticket_freeze: + if(filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { + return; + } + if (g_list_length(rsc_lh->running_on) > 0) { + clear_bit(rsc_lh->flags, pe_rsc_managed); + } + break; + } + + } else { + + if(rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { + resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); + } + } +} + const char *convert_non_atomic_task(char *raw_task, resource_t *rsc, gboolean allow_notify); const char * convert_non_atomic_task(char *raw_task, resource_t *rsc, gboolean allow_notify) { int task = no_action; const char *atomic_task = raw_task; crm_trace("Processing %s for %s", crm_str(raw_task), rsc->id); if(raw_task == NULL) { return NULL; } else if(strstr(raw_task, "notify") != NULL) { goto done; /* no conversion */ } else if(rsc->variant < pe_group) { goto done; /* no conversion */ } task = text2task(raw_task); switch(task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: task--; break; case monitor_rsc: case shutdown_crm: case stonith_node: goto done; break; default: crm_trace("Unknown action: %s", raw_task); goto done; break; } if(task != no_action) { if(is_set(rsc->flags, pe_rsc_notify) && allow_notify) { /* atomic_task = generate_notify_key(rid, "confirmed-post", task2text(task+1)); */ crm_err("Not handled"); } else { atomic_task = task2text(task+1); } crm_trace("Converted %s -> %s", raw_task, atomic_task); } done: return atomic_task; } enum pe_action_flags native_action_flags(action_t *action, node_t *node) { return action->flags; } enum pe_graph_flags native_update_actions( action_t *first, action_t *then, node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; if(type & pe_order_implies_first) { if((filter & pe_action_optional) && (flags & pe_action_optional) == 0) { clear_bit_inplace(first->flags, pe_action_optional); } } if(is_set(type, pe_order_runnable_left) && is_set(filter, pe_action_runnable) && is_set(flags, pe_action_runnable) == FALSE) { clear_bit_inplace(then->flags, pe_action_runnable); } if(is_set(type, pe_order_implies_then) && is_set(filter, pe_action_optional) && is_set(flags, pe_action_optional) == FALSE) { clear_bit_inplace(then->flags, pe_action_optional); } if(is_set(type, pe_order_restart)) { const char *reason = NULL; CRM_ASSERT(then->rsc == first->rsc); CRM_ASSERT(then->rsc->variant == pe_native); if((filter & pe_action_runnable) && (then->flags & pe_action_runnable) == 0) { reason = "shutdown"; } if((filter & pe_action_optional) && (then->flags & pe_action_optional) == 0) { reason = "recover"; } if(reason && is_set(first->flags, pe_action_optional) && is_set(first->flags, pe_action_runnable)) { crm_trace("Handling %s: %s -> %s", reason, first->uuid, then->uuid); clear_bit_inplace(first->flags, pe_action_optional); } } if(then_flags != then->flags) { changed |= pe_graph_updated_then; crm_trace("Then: Flags for %s on %s are now 0x%.6x (were 0x%.6x) because of %s", then->uuid, then->node?then->node->details->uname:"[none]", then->flags, then_flags, first->uuid); } if(first_flags != first->flags) { changed |= pe_graph_updated_first; crm_trace("First: Flags for %s on %s are now 0x%.6x (were 0x%.6x) because of %s", first->uuid, first->node?first->node->details->uname:"[none]", first->flags, first_flags, then->uuid); } return changed; } void native_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { GListPtr gIter = NULL; GHashTableIter iter; node_t *node = NULL; crm_debug_2("Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if(constraint == NULL) { pe_err("Constraint is NULL"); return; } else if(rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } else if(constraint->role_filter > 0 && constraint->role_filter != rsc->next_role) { crm_debug("Constraint (%s) is not active (role : %s)", constraint->id, role2text(constraint->role_filter)); return; } else if(is_active(constraint) == FALSE) { crm_debug_2("Constraint (%s) is not active", constraint->id); return; } if(constraint->node_list_rh == NULL) { crm_debug_2("RHS of constraint %s is NULL", constraint->id); return; } for(gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; node_t *other_node = NULL; other_node = (node_t*)pe_hash_table_lookup( rsc->allowed_nodes, node->details->id); if(other_node != NULL) { crm_debug_4("%s + %s: %d + %d", node->details->uname, other_node->details->uname, node->weight, other_node->weight); other_node->weight = merge_weights( other_node->weight, node->weight); } else { node_t *new_node = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer)new_node->details->id, new_node); } } g_hash_table_iter_init (&iter, rsc->allowed_nodes); while (g_hash_table_iter_next (&iter, NULL, (void**)&node)) { crm_debug_3("%s + %s : %d", rsc->id, node->details->uname, node->weight); } } void native_expand(resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = NULL; crm_debug_3("Processing actions from %s", rsc->id); for(gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; crm_debug_4("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for(gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } void LogActions(resource_t *rsc, pe_working_set_t *data_set) { node_t *next = NULL; node_t *current = NULL; action_t *stop = NULL; action_t *start = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if(rsc->children) { GListPtr gIter = NULL; for(gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; LogActions(child_rsc, data_set); } return; } next = rsc->allocated_to; if(rsc->running_on) { current = rsc->running_on->data; if(rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if(current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if(is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { crm_notice("Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed)?" unmanaged":""); return; } if(current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } key = stop_key(rsc); possible_matches = find_actions(rsc->actions, key, next); crm_free(key); if(possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } key = start_key(rsc); possible_matches = find_actions(rsc->actions, key, next); crm_free(key); if(possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } if(rsc->role == rsc->next_role) { key = generate_op_key(rsc->id, RSC_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); crm_free(key); CRM_CHECK(next != NULL,); if(next == NULL) { } else if(possible_matches && current) { crm_notice("Migrate %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); g_list_free(possible_matches); } else if(start == NULL || is_set(start->flags, pe_action_optional)) { crm_notice("Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(moving && current) { crm_notice("Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(start && is_set(start->flags, pe_action_runnable) == FALSE) { crm_notice("Stop %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { crm_notice("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } return; } if(rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if(current != NULL) { crm_notice("Demote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname); if(stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->next_role > RSC_ROLE_STOPPED) { if(is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { crm_notice("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } } } else if(rsc->next_role == RSC_ROLE_STOPPED) { GListPtr gIter = NULL; CRM_CHECK(current != NULL,); for(gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; crm_notice("Stop %s\t(%s)", rsc->id, node->details->uname); } } if(moving) { crm_notice("Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->next_role), current->details->uname, next->details->uname); } if(rsc->role == RSC_ROLE_STOPPED) { CRM_CHECK(next != NULL,); if(next != NULL) { crm_notice("Start %s\t(%s)", rsc->id, next->details->uname); } } if(rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { CRM_CHECK(next != NULL,); if(stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->role > RSC_ROLE_STOPPED) { if(is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { crm_notice("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } crm_notice("Promote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next->details->uname); } } void NoRoleChange(resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set) { GListPtr gIter = NULL; action_t *stop = NULL; action_t *start = NULL; GListPtr possible_matches = NULL; crm_debug_2("Executing: %s (role=%s)", rsc->id, role2text(rsc->next_role)); if(current == NULL || next == NULL) { return; } if(is_set(rsc->flags, pe_rsc_failed) || safe_str_neq(current->details->id, next->details->id)) { if(rsc->next_role > RSC_ROLE_STARTED) { gboolean optional = TRUE; if(rsc->role == RSC_ROLE_MASTER) { optional = FALSE; } DemoteRsc(rsc, current, optional, data_set); } if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, current, FALSE, data_set); } StopRsc(rsc, current, FALSE, data_set); StartRsc(rsc, next, FALSE, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, FALSE, data_set); } possible_matches = find_recurring_actions(rsc->actions, next); for(gIter = possible_matches; gIter != NULL; gIter = gIter->next) { action_t *match = (action_t*)gIter->data; if(is_set(match->flags, pe_action_optional) == FALSE) { crm_debug("Fixing recurring action: %s", match->uuid); update_action_flags(match, pe_action_optional); } } g_list_free(possible_matches); } else if(is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, next, TRUE); if(is_set(start->flags, pe_action_runnable)) { /* wait for StartRsc() to be called */ rsc->role = RSC_ROLE_STOPPED; } else { /* wait for StopRsc() to be called */ rsc->next_role = RSC_ROLE_STOPPED; } } else { stop = stop_action(rsc, current, TRUE); start = start_action(rsc, next, TRUE); if(is_set(start->flags, pe_action_optional)) { update_action_flags(stop, pe_action_optional); } else { update_action_flags(stop, pe_action_optional|pe_action_clear); } if(rsc->next_role > RSC_ROLE_STARTED) { DemoteRsc(rsc, current, is_set(start->flags, pe_action_optional), data_set); } StopRsc(rsc, current, is_set(start->flags, pe_action_optional), data_set); StartRsc(rsc, current, is_set(start->flags, pe_action_optional), data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, is_set(start->flags, pe_action_optional), data_set); } if(is_set(start->flags, pe_action_runnable) == FALSE) { rsc->next_role = RSC_ROLE_STOPPED; } } } gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { GListPtr gIter = NULL; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); crm_debug_2("Executing: %s", rsc->id); if(rsc->next_role == RSC_ROLE_STOPPED && rsc->variant == pe_native && safe_str_eq(class, "stonith")) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order( NULL, crm_strdup(all_stopped->task), all_stopped, rsc, stop_key(rsc), NULL, pe_order_optional|pe_order_stonith_stop, data_set); } for(gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t*)gIter->data; stop_action(rsc, current, optional); if(is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } } return TRUE; } gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *start = NULL; crm_debug_2("Executing: %s", rsc->id); start = start_action(rsc, next, TRUE); if(is_set(start->flags, pe_action_runnable) && optional == FALSE) { update_action_flags(start, pe_action_optional|pe_action_clear); } return TRUE; } gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { char *key = NULL; GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; crm_debug_2("Executing: %s", rsc->id); CRM_CHECK(rsc->next_role == RSC_ROLE_MASTER, crm_err("Next role: %s", role2text(rsc->next_role)); return FALSE); CRM_CHECK(next != NULL, return FALSE); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); for(gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *start = (action_t*)gIter->data; if(is_set(start->flags, pe_action_runnable) == FALSE) { runnable = FALSE; } } g_list_free(action_list); if(runnable) { promote_action(rsc, next, optional); return TRUE; } crm_debug("%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); for(gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *promote = (action_t*)gIter->data; update_action_flags(promote, pe_action_runnable|pe_action_clear); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { GListPtr gIter = NULL; crm_debug_2("Executing: %s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for(gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t*)gIter->data; demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug("Executing: %s", rsc->id); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set) { action_t *delete = NULL; #if DELETE_THEN_REFRESH action_t *refresh = NULL; #endif if(is_set(rsc->flags, pe_rsc_failed)) { crm_debug_2("Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if(node == NULL) { crm_debug_2("Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if(node->details->unclean || node->details->online == FALSE) { crm_debug_2("Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete = delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional?pe_order_implies_then:pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional?pe_order_implies_then:pe_order_optional, data_set); #if DELETE_THEN_REFRESH refresh = custom_action( NULL, crm_strdup(CRM_OP_LRM_REFRESH), CRM_OP_LRM_REFRESH, node, FALSE, TRUE, data_set); add_hash_param(refresh->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); order_actions(delete, refresh, pe_order_optional); #endif return TRUE; } #include <../lib/pengine/unpack.h> static node_t * probe_grouped_clone(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { node_t *running = NULL; resource_t *top = uber_parent(rsc); if(running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() * * This code desperately needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=10: * No probes O(25s) * Detection without clone loop O(3m) * Detection with clone loop O(8m) ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while(peer && running == NULL) { running = pe_hash_table_lookup(peer->known_on, node->details->id); if(running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active clone: %s", rsc->id); crm_free(clone_id); return running; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } crm_free(clone_id); } return running; } gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set) { char *key = NULL; action_t *probe = NULL; node_t *running = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if(rc_inactive == NULL) { rc_inactive = crm_itoa(EXECRA_NOT_RUNNING); rc_master = crm_itoa(EXECRA_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if(force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { crm_debug_2("Skipping active resource detection for %s", rsc->id); return FALSE; } if(rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for(gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; any_created = child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set) || any_created; } return any_created; } if(is_set(rsc->flags, pe_rsc_orphan)) { crm_debug_2("Skipping orphan: %s", rsc->id); return FALSE; } running = g_hash_table_lookup(rsc->known_on, node->details->id); if(running == NULL && is_set(rsc->flags, pe_rsc_unique) == FALSE) { /* Anonymous clones */ if(rsc->parent == top) { running = g_hash_table_lookup(rsc->parent->known_on, node->details->id); } else { /* Grouped anonymous clones need extra special handling */ running = probe_grouped_clone(rsc, node, data_set); } } if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active: %s", rsc->id); return FALSE; } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional|pe_action_clear); /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if(running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if(rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } crm_debug("Probing %s on %s (%s)", rsc->id, node->details->uname, role2text(rsc->role)); order_actions(probe, complete, pe_order_implies_then); return TRUE; } static void native_start_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { node_t *target = stonith_op?stonith_op->node:NULL; if(is_stonith) { char *key = start_key(rsc); action_t *ready = get_pseudo_op(STONITH_UP, data_set); crm_debug_2("Ordering %s action before stonith events", key); custom_action_order( rsc, key, NULL, NULL, crm_strdup(ready->task), ready, pe_order_optional|pe_order_implies_then, data_set); } else { GListPtr gIter = NULL; action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); for(gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; if(action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_optional); } else if(target != NULL && safe_str_eq(action->task, RSC_START) && NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) { /* if known == NULL, then we dont know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * its analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explaination is that the * DC died and took its status with it */ crm_debug("Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_optional|pe_order_runnable_left); } } } } static void native_stop_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { char *key = NULL; GListPtr gIter = NULL; GListPtr action_list = NULL; resource_t *top = uber_parent(rsc); key = stop_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ for(gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; if(action->node->details->online && action->node->details->unclean == FALSE && is_set(rsc->flags, pe_rsc_failed)) { continue; } if(is_set(rsc->flags, pe_rsc_failed)) { crm_warn("Stop of failed resource %s is" " implicit after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); update_action_flags(action, pe_action_implied_by_stonith); if(is_stonith == FALSE) { action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); order_actions(stonith_op, action, pe_order_optional); order_actions(stonith_op, parent_stop, pe_order_optional); } if(is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ notify_data_t *n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); crm_info("Creating secondary notification for %s", action->uuid); collect_notification_data(rsc, TRUE, FALSE, n_data); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_resource"), crm_strdup(rsc->id)); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_uname"), crm_strdup(action->node->details->uname)); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,crm_strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); key = demote_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); for(gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t*)gIter->data; if(action->node->details->online == FALSE || action->node->details->unclean == TRUE || is_set(rsc->flags, pe_rsc_failed)) { if(is_set(rsc->flags, pe_rsc_failed)) { crm_info("Demote of failed resource %s is" " implict after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ crm_trace("here - 1"); update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); if(is_stonith == FALSE) { order_actions(stonith_op, action, pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering( resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set) { gboolean is_stonith = FALSE; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->children) { GListPtr gIter = NULL; for(gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } return; } if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(stonith_op != NULL && safe_str_eq(class, "stonith")) { is_stonith = TRUE; } /* Start constraints */ native_start_constraints(rsc, stonith_op, is_stonith, data_set); /* Stop constraints */ native_stop_constraints(rsc, stonith_op, is_stonith, data_set); } enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static enum stack_activity find_clone_activity_on(resource_t *rsc, resource_t *target, node_t *node, const char *type) { int mode = stack_stable; action_t *active = NULL; if(target->children) { GListPtr gIter = NULL; for(gIter = target->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t*)gIter->data; mode |= find_clone_activity_on(rsc, child, node, type); } return mode; } active = find_first_action(target->actions, NULL, RSC_START, NULL); if(active && is_set(active->flags, pe_action_optional) == FALSE && is_set(active->flags, pe_action_pseudo) == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_starting; } active = find_first_action(target->actions, NULL, RSC_STOP, node); if(active && is_set(active->flags, pe_action_optional) == FALSE && is_set(active->flags, pe_action_pseudo) == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_stopping; } return mode; } static enum stack_activity check_stack_element(resource_t *rsc, resource_t *other_rsc, const char *type) { resource_t *other_p = uber_parent(other_rsc); if(other_rsc == NULL || other_rsc == rsc) { return stack_stable; } else if(other_p->variant == pe_native) { crm_notice("Cannot migrate %s due to dependency on %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } else if(other_rsc == rsc->parent) { int mode = 0; GListPtr gIter = NULL; for(gIter = other_rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; if(constraint->score > 0) { mode |= check_stack_element(rsc, constraint->rsc_rh, type); } } return mode; } else if(other_p->variant == pe_group) { crm_notice("Cannot migrate %s due to dependency on group %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } /* else: >= clone */ /* ## Assumption A depends on clone(B) ## Resource Activity During Move N1 N2 N3 --- --- --- t0 A.stop t1 B.stop B.stop t2 B.start B.start t3 A.start ## Resource Activity During Migration N1 N2 N3 --- --- --- t0 B.start B.start t1 A.stop (1) t2 A.start (2) t3 B.stop B.stop Node 1: Rewritten to be a migrate-to operation Node 2: Rewritten to be a migrate-from operation # Constraints The following constraints already exist in the system. The 'ok' and 'fail' column refers to whether they still hold for migration. a) A.stop -> A.start - ok b) B.stop -> B.start - fail c) A.stop -> B.stop - ok d) B.start -> A.start - ok e) B.stop -> A.start - fail f) A.stop -> B.start - fail ## Scenarios B unchanged - ok B stopping only - fail - possible after fixing 'e' B starting only - fail - possible after fixing 'f' B stoping and starting - fail - constraint 'b' is unfixable B restarting only on N2 - fail - as-per previous only rarer */ /* Only allow migration when the clone is either stable, only starting or only stopping */ return find_clone_activity_on(rsc, other_rsc, NULL, type); } static gboolean at_stack_bottom(resource_t *rsc) { char *key = NULL; action_t *start = NULL; action_t *other = NULL; int mode = stack_stable; GListPtr action_list = NULL; GListPtr gIter = NULL; key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); crm_debug_3("%s: processing", rsc->id); CRM_CHECK(action_list != NULL, return FALSE); start = action_list->data; g_list_free(action_list); for(gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; resource_t *target = constraint->rsc_rh; crm_debug_4("Checking %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { mode |= check_stack_element(rsc, target, "coloc"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to colocation activity (last was %s)", rsc->id, target->id); return FALSE; } } } for(gIter = start->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t*)gIter->data; other = other_w->action; if(other_w->type & pe_order_serialize_only) { crm_debug_3("%s: depends on %s (serialize ordering)", rsc->id, other->uuid); continue; } crm_debug_2("%s: Checking %s ordering", rsc->id, other->uuid); if(is_set(other->flags, pe_action_optional) == FALSE) { mode |= check_stack_element(rsc, other->rsc, "order"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to ordering activity (last was %s)", rsc->id, other->rsc->id); return FALSE; } } } return TRUE; } void rsc_migrate_reload(resource_t *rsc, pe_working_set_t *data_set) { char *key = NULL; GListPtr gIter = NULL; int level = LOG_DEBUG; GListPtr action_list = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *other = NULL; action_t *action = NULL; const char *value = NULL; if(rsc->children) { for(gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t*)gIter->data; rsc_migrate_reload(child_rsc, data_set); } other = NULL; return; } else if(rsc->variant > pe_native) { return; } do_crm_log_unlikely(level+1, "Processing %s", rsc->id); if(is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || rsc->next_role < RSC_ROLE_STARTED || g_list_length(rsc->running_on) != 1) { do_crm_log_unlikely( level+1, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); return; } value = g_hash_table_lookup(rsc->meta, XML_OP_ATTR_ALLOW_MIGRATE); if(crm_is_true(value)) { set_bit(rsc->flags, pe_rsc_can_migrate); } if(rsc->next_role > RSC_ROLE_SLAVE) { clear_bit(rsc->flags, pe_rsc_can_migrate); do_crm_log_unlikely( level+1, "%s: resource role: role=%s", rsc->id, role2text(rsc->next_role)); } key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no start action", rsc->id); return; } start = action_list->data; g_list_free(action_list); if(is_not_set(rsc->flags, pe_rsc_can_migrate) && is_set(start->flags, pe_action_allow_reload_conversion) == FALSE) { do_crm_log_unlikely(level+1, "%s: no need to continue", rsc->id); return; } key = stop_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no stop action", rsc->id); return; } stop = action_list->data; g_list_free(action_list); action = start; if(action->node == NULL || is_set(action->flags, pe_action_pseudo) || is_set(action->flags, pe_action_optional) || is_set(action->flags, pe_action_runnable) == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } action = stop; if(action->node == NULL || is_set(action->flags, pe_action_pseudo) || is_set(action->flags, pe_action_optional) || is_set(action->flags, pe_action_runnable) == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } if(is_set(rsc->flags, pe_rsc_can_migrate)) { if(start->node == NULL || stop->node == NULL || stop->node->details == start->node->details) { clear_bit(rsc->flags, pe_rsc_can_migrate); } else if(at_stack_bottom(rsc) == FALSE) { clear_bit(rsc->flags, pe_rsc_can_migrate); } } if(is_set(rsc->flags, pe_rsc_can_migrate)) { action_t *to = NULL; action_t *from = NULL; action_t *done = get_pseudo_op(STONITH_DONE, data_set); crm_info("Migrating %s from %s to %s", rsc->id, stop->node->details->uname, start->node->details->uname); /* Preserve the stop to ensure the end state is sane on that node, * Make the start a pseudo op * Create migrate_to, have it depend on everything the stop did * Create migrate_from * *-> migrate_to -> migrate_from -> stop -> start */ update_action_flags(start, pe_action_pseudo); /* easier than trying to delete it from the graph * but perhaps we should have it run anyway */ to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, stop->node, FALSE, TRUE, data_set); from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, start->node, FALSE, TRUE, data_set); /* This is slightly sub-optimal if 'to' fails, but always * run both halves of the migration before terminating the * transition. * * This can be removed if/when we update unpack_rsc_op() to * 'correctly' handle partial migrations. * * Without this, we end up stopping both sides */ from->priority = INFINITY; order_actions(to, from, pe_order_optional); order_actions(from, stop, pe_order_optional); order_actions(done, to, pe_order_optional); add_hash_param(to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, stop->node->details->uname); add_hash_param(to->meta, XML_LRM_ATTR_MIGRATE_TARGET, start->node->details->uname); add_hash_param(from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, stop->node->details->uname); add_hash_param(from->meta, XML_LRM_ATTR_MIGRATE_TARGET, start->node->details->uname); /* Create the correct ordering ajustments based on find_clone_activity_on(); */ for(gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t*)gIter->data; resource_t *target = constraint->rsc_rh; crm_info("Repairing %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { int mode = check_stack_element(rsc, target, "coloc"); action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); CRM_ASSERT(clone_stop != NULL); CRM_ASSERT(clone_start != NULL); CRM_ASSERT((mode & stack_middle) == 0); CRM_ASSERT(((mode & stack_stopping) && (mode & stack_starting)) == 0); if(mode & stack_stopping) { #if 0 crm_debug("Creating %s.start -> %s.stop ordering", rsc->id, target->id); order_actions(from, clone_stop, pe_order_optional); #endif GListPtr lpc2 = NULL; for(lpc2 = start->actions_before; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other_w = (action_wrapper_t*)lpc2->data; /* Needed if the clone's started pseudo-action ever gets printed in the graph */ if(other_w->action == clone_start) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, start->uuid); other_w->type = pe_order_none; } } } else if(mode & stack_starting) { #if 0 crm_debug("Creating %s.started -> %s.stop ordering", target->id, rsc->id); order_actions(clone_start, to, pe_order_optional); #endif GListPtr lpc2 = NULL; for(lpc2 = clone_stop->actions_before; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other_w = (action_wrapper_t*)lpc2->data; /* Needed if the clone's stop pseudo-action ever gets printed in the graph */ if(other_w->action == stop) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, clone_stop->uuid); other_w->type = pe_order_none; } } } } } #if 0 /* Implied now that start/stop are not morphed into migrate ops */ /* Anything that needed stop to complete, now also needs start to have completed */ for(gIter = stop->actions_after; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t*)gIter->data; other = other_w->action; if(is_set(other->flags, pe_action_optional) || other->rsc != NULL) { continue; } crm_debug("Ordering %s before %s (stop)", from->uuid, other->uuid); order_actions(from, other, other_w->type); } #endif /* migrate_to also needs anything that the stop needed to have completed too */ for(gIter = stop->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t*)gIter->data; other = other_w->action; if(other->rsc == NULL) { /* nothing */ } else if(is_set(other->flags, pe_action_optional) || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (stop)", other_w->action->uuid, stop->uuid); order_actions(other, to, other_w->type); } /* migrate_to also needs anything that the start needed to have completed too */ for(gIter = start->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t*)gIter->data; other = other_w->action; if(other->rsc == NULL) { /* nothing */ } else if(is_set(other->flags, pe_action_optional) || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (start)", other_w->action->uuid, stop->uuid); order_actions(other, to, other_w->type); } } else if(start && stop && is_set(start->flags, pe_action_allow_reload_conversion) && stop->node->details == start->node->details) { action_t *rewrite = NULL; update_action_flags(start, pe_action_pseudo); /* easier than trying to delete it from the graph */ action = NULL; key = promote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } if(action && is_set(action->flags, pe_action_optional) == FALSE) { update_action_flags(action, pe_action_pseudo); } g_list_free(action_list); crm_free(key); action = NULL; key = demote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } g_list_free(action_list); crm_free(key); if(action && is_set(action->flags, pe_action_optional) == FALSE) { rewrite = action; update_action_flags(stop, pe_action_pseudo); } else { rewrite = stop; } crm_info("Rewriting %s of %s on %s as a reload", rewrite->task, rsc->id, stop->node->details->uname); crm_free(rewrite->uuid); crm_free(rewrite->task); rewrite->task = crm_strdup("reload"); rewrite->uuid = generate_op_key(rsc->id, rewrite->task, 0); } else { do_crm_log_unlikely(level+1, "%s nothing to do", rsc->id); } } void native_append_meta(resource_t *rsc, xmlNode *xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if(value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); crm_free(name); } } diff --git a/pengine/pengine.h b/pengine/pengine.h index 8580c7377e..3b83f40d59 100644 --- a/pengine/pengine.h +++ b/pengine/pengine.h @@ -1,176 +1,193 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef PENGINE__H #define PENGINE__H typedef struct rsc_to_node_s rsc_to_node_t; typedef struct rsc_colocation_s rsc_colocation_t; +typedef struct rsc_ticket_s rsc_ticket_t; typedef struct lrm_agent_s lrm_agent_t; typedef struct order_constraint_s order_constraint_t; #include #include #include #include #include #include #include #include enum pe_stop_fail { pesf_block, pesf_stonith, pesf_ignore }; enum pe_weights { pe_weights_none = 0x0, pe_weights_init = 0x1, pe_weights_forward = 0x4, pe_weights_positive = 0x8, pe_weights_rollback = 0x10, }; enum pe_ordering { pe_order_none = 0x0, /* deleted */ pe_order_optional = 0x1, /* pure ordering, nothing implied */ pe_order_implies_first = 0x10, /* If 'first' is required, ensure 'then' is too */ pe_order_implies_then = 0x20, /* If 'then' is required, ensure 'first' is too */ pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */ pe_order_restart = 0x1000, /* stop-start constraint */ pe_order_stonith_stop = 0x2000, /* only applies if the action is non-pseudo */ pe_order_serialize_only = 0x4000, /* serialize */ pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not manditory */ pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not manditory */ pe_order_trace = 0x4000000 /* test marker */ }; struct rsc_colocation_s { const char *id; const char *node_attribute; resource_t *rsc_lh; resource_t *rsc_rh; int role_lh; int role_rh; int score; }; +enum loss_ticket_policy_e { + loss_ticket_stop, + loss_ticket_demote, + loss_ticket_fence, + loss_ticket_freeze +}; + +struct rsc_ticket_s { + const char *id; + resource_t *rsc_lh; + ticket_t *ticket; + enum loss_ticket_policy_e loss_policy; + + int role_lh; +}; + struct rsc_to_node_s { const char *id; resource_t *rsc_lh; enum rsc_role_e role_filter; GListPtr node_list_rh; /* node_t* */ }; struct order_constraint_s { int id; enum pe_ordering type; void *lh_opaque; resource_t *lh_rsc; action_t *lh_action; char *lh_action_task; void *rh_opaque; resource_t *rh_rsc; action_t *rh_action; char *rh_action_task; /* (soon to be) variant specific */ /* int lh_rsc_incarnation; */ /* int rh_rsc_incarnation; */ }; enum pe_link_state { pe_link_not_dumped, pe_link_dumped, pe_link_dup, }; typedef struct action_wrapper_s action_wrapper_t; struct action_wrapper_s { enum pe_ordering type; enum pe_link_state state; action_t *action; }; extern gboolean stage0(pe_working_set_t *data_set); extern gboolean probe_resources(pe_working_set_t *data_set); extern gboolean stage2(pe_working_set_t *data_set); extern gboolean stage3(pe_working_set_t *data_set); extern gboolean stage4(pe_working_set_t *data_set); extern gboolean stage5(pe_working_set_t *data_set); extern gboolean stage6(pe_working_set_t *data_set); extern gboolean stage7(pe_working_set_t *data_set); extern gboolean stage8(pe_working_set_t *data_set); extern gboolean summary(GListPtr resources); extern gboolean pe_msg_dispatch(IPC_Channel *sender, void *user_data); extern gboolean process_pe_message( xmlNode *msg, xmlNode *xml_data, IPC_Channel *sender); extern gboolean unpack_constraints( xmlNode *xml_constraints, pe_working_set_t *data_set); extern gboolean update_action_states(GListPtr actions); extern gboolean shutdown_constraints( node_t *node, action_t *shutdown_op, pe_working_set_t *data_set); extern gboolean stonith_constraints( node_t *node, action_t *stonith_op, pe_working_set_t *data_set); extern int custom_action_order( resource_t *lh_rsc, char *lh_task, action_t *lh_action, resource_t *rh_rsc, char *rh_task, action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set); extern int new_rsc_order(resource_t *lh_rsc, const char *lh_task, resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set); #define order_start_start(rsc1,rsc2, type) \ new_rsc_order(rsc1, CRMD_ACTION_START, rsc2, CRMD_ACTION_START, type, data_set) #define order_stop_stop(rsc1, rsc2, type) \ new_rsc_order(rsc1, CRMD_ACTION_STOP, rsc2, CRMD_ACTION_STOP, type, data_set) extern void graph_element_from_action( action_t *action, pe_working_set_t *data_set); extern gboolean show_scores; extern int scores_log_level; extern gboolean show_utilization; extern int utilization_log_level; extern const char* transition_idle_timeout; #endif diff --git a/pengine/regression.sh b/pengine/regression.sh index 97f03cf2b0..3100640573 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,457 +1,520 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # core=`dirname $0` . $core/regression.core.sh io_dir=$test_home/test10 create_mode="true" info Generating test outputs for these tests... # do_test file description info Done. echo "" info Performing the following tests from $io_dir create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test domain "Failover domains" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" --rc 4 do_test standby "Standby" do_test comments "Comments" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" # This test emits an error log and thus upsets the test suite; even # though it explicitly aims to test an error leg. FIXME # do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop" echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594" do_test 662 "OSDL #662" do_test 696 "OSDL #696" do_test 726 "OSDL #726" do_test 735 "OSDL #735" do_test 764 "OSDL #764" do_test 797 "OSDL #797" do_test 829 "OSDL #829" do_test 994 "OSDL #994" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" echo "" do_test stopped-monitor-00-initial-start "Stopped Monitor - initial start" do_test stopped-monitor-01-failed-started "Stopped Monitor - failed started" do_test stopped-monitor-02-started-multi-up "Stopped Monitor - started multi-up" do_test stopped-monitor-03-stop-started "Stopped Monitor - stop started" do_test stopped-monitor-04-failed-stop "Stopped Monitor - failed stop" do_test stopped-monitor-05-start-unmanaged "Stopped Monitor - start unmanaged" do_test stopped-monitor-06-unmanaged-multi-up "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07-start-unmanaged-multi-up "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08-migrate "Stopped Monitor - migrate" do_test stopped-monitor-09-unmanage-started "Stopped Monitor - unmanage started" do_test stopped-monitor-10-unmanaged-started-multi-up "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11-stop-unmanaged-started "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12-unmanaged-started-multi-up "Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")" do_test stopped-monitor-20-initial-stop "Stopped Monitor - initial stop" do_test stopped-monitor-21-stopped-single-up "Stopped Monitor - stopped single-up" do_test stopped-monitor-22-stopped-multi-up "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23-start-stopped "Stopped Monitor - start stopped" do_test stopped-monitor-24-unmanage-stopped "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25-unmanaged-stopped-multi-up "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26-start-unmanaged-stopped "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27-unmanaged-stopped-multi-up "Stopped Monitor - unmanaged stopped multi-up (target-role="Started")" do_test stopped-monitor-30-new-node-started "Stopped Monitor - new node started" do_test stopped-monitor-31-new-node-stopped "Stopped Monitor - new node stopped" +echo"" +do_test ticket-primitive-stop-1-initial "Ticket - Primitive (loss-policy=stop, initial)" +do_test ticket-primitive-stop-2-granted "Ticket - Primitive (loss-policy=stop, granted)" +do_test ticket-primitive-stop-3-revoked "Ticket - Primitive (loss-policy-stop, revoked)" +do_test ticket-primitive-demote-1-initial "Ticket - Primitive (loss-policy=demote, initial)" +do_test ticket-primitive-demote-2-granted "Ticket - Primitive (loss-policy=demote, granted)" +do_test ticket-primitive-demote-3-revoked "Ticket - Primitive (loss-policy=demote, revoked)" +do_test ticket-primitive-fence-1-initial "Ticket - Primitive (loss-policy=fence, initial)" +do_test ticket-primitive-fence-2-granted "Ticket - Primitive (loss-policy=fence, granted)" +do_test ticket-primitive-fence-3-revoked "Ticket - Primitive (loss-policy=fence, revoked)" +do_test ticket-primitive-freeze-1-initial "Ticket - Primitive (loss-policy=freeze, initial)" +do_test ticket-primitive-freeze-2-granted "Ticket - Primitive (loss-policy=freeze, granted)" +do_test ticket-primitive-freeze-3-revoked "Ticket - Primitive (loss-policy=freeze, revoked)" + +do_test ticket-group-stop-1-initial "Ticket - Group (loss-policy=stop, initial)" +do_test ticket-group-stop-2-granted "Ticket - Group (loss-policy=stop, granted)" +do_test ticket-group-stop-3-revoked "Ticket - Group (loss-policy=stop, revoked)" +do_test ticket-group-demote-1-initial "Ticket - Group (loss-policy=demote, initial)" +do_test ticket-group-demote-2-granted "Ticket - Group (loss-policy=demote, granted)" +do_test ticket-group-demote-3-revoked "Ticket - Group (loss-policy=demote, revoked)" +do_test ticket-group-fence-1-initial "Ticket - Group (loss-policy=fence, initial)" +do_test ticket-group-fence-2-granted "Ticket - Group (loss-policy=fence, granted)" +do_test ticket-group-fence-3-revoked "Ticket - Group (loss-policy=fence, revoked)" +do_test ticket-group-freeze-1-initial "Ticket - Group (loss-policy=freeze, initial)" +do_test ticket-group-freeze-2-granted "Ticket - Group (loss-policy=freeze, granted)" +do_test ticket-group-freeze-3-revoked "Ticket - Group (loss-policy=freeze, revoked)" + +do_test ticket-clone-stop-1-initial "Ticket - Clone (loss-policy=stop, initial)" +do_test ticket-clone-stop-2-granted "Ticket - Clone (loss-policy=stop, granted)" +do_test ticket-clone-stop-3-revoked "Ticket - Clone (loss-policy=stop, revoked)" +do_test ticket-clone-demote-1-initial "Ticket - Clone (loss-policy=demote, initial)" +do_test ticket-clone-demote-2-granted "Ticket - Clone (loss-policy=demote, granted)" +do_test ticket-clone-demote-3-revoked "Ticket - Clone (loss-policy=demote, revoked)" +do_test ticket-clone-fence-1-initial "Ticket - Clone (loss-policy=fence, initial)" +do_test ticket-clone-fence-2-granted "Ticket - Clone (loss-policy=fence, granted)" +do_test ticket-clone-fence-3-revoked "Ticket - Clone (loss-policy=fence, revoked)" +do_test ticket-clone-freeze-1-initial "Ticket - Clone (loss-policy=freeze, initial)" +do_test ticket-clone-freeze-2-granted "Ticket - Clone (loss-policy=freeze, granted)" +do_test ticket-clone-freeze-3-revoked "Ticket - Clone (loss-policy=freeze, revoked)" + +do_test ticket-master-stop-1-initial "Ticket - Master (loss-policy=stop, initial)" +do_test ticket-master-stop-2-granted "Ticket - Master (loss-policy=stop, granted)" +do_test ticket-master-stop-3-revoked "Ticket - Master (loss-policy=stop, revoked)" +do_test ticket-master-demote-1-initial "Ticket - Master (loss-policy=demote, initial)" +do_test ticket-master-demote-2-granted "Ticket - Master (loss-policy=demote, granted)" +do_test ticket-master-demote-3-revoked "Ticket - Master (loss-policy=demote, revoked)" +do_test ticket-master-fence-1-initial "Ticket - Master (loss-policy=fence, initial)" +do_test ticket-master-fence-2-granted "Ticket - Master (loss-policy=fence, granted)" +do_test ticket-master-fence-3-revoked "Ticket - Master (loss-policy=fence, revoked)" +do_test ticket-master-freeze-1-initial "Ticket - Master (loss-policy=freeze, initial)" +do_test ticket-master-freeze-2-granted "Ticket - Master (loss-policy=freeze, granted)" +do_test ticket-master-freeze-3-revoked "Ticket - Master (loss-policy=freeze, revoked)" + +echo "" + +do_test ticket-rsc-sets-1-ticket-1-initial "Ticket - Resource sets (1 ticket, initial)" +do_test ticket-rsc-sets-1-ticket-2-granted "Ticket - Resource sets (1 ticket, granted)" +do_test ticket-rsc-sets-1-ticket-3-revoked "Ticket - Resource sets (1 ticket, revoked)" +do_test ticket-rsc-sets-2-tickets-1-initial "Ticket - Resource sets (2 tickets, initial)" +do_test ticket-rsc-sets-2-tickets-2-granted "Ticket - Resource sets (2 tickets, granted)" +do_test ticket-rsc-sets-2-tickets-3-granted "Ticket - Resource sets (2 tickets, granted)" +do_test ticket-rsc-sets-2-tickets-4-revoked "Ticket - Resource sets (2 tickets, revoked)" + echo "" test_results diff --git a/pengine/test10/ticket-clone-demote-1-initial.dot b/pengine/test10/ticket-clone-demote-1-initial.dot new file mode 100644 index 0000000000..4601f99af7 --- /dev/null +++ b/pengine/test10/ticket-clone-demote-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-demote-1-initial.exp b/pengine/test10/ticket-clone-demote-1-initial.exp new file mode 100644 index 0000000000..c6193f730c --- /dev/null +++ b/pengine/test10/ticket-clone-demote-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-demote-1-initial.scores b/pengine/test10/ticket-clone-demote-1-initial.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-demote-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-demote-1-initial.xml b/pengine/test10/ticket-clone-demote-1-initial.xml new file mode 100644 index 0000000000..f515762899 --- /dev/null +++ b/pengine/test10/ticket-clone-demote-1-initial.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-demote-2-granted.dot b/pengine/test10/ticket-clone-demote-2-granted.dot new file mode 100644 index 0000000000..f39982cc5f --- /dev/null +++ b/pengine/test10/ticket-clone-demote-2-granted.dot @@ -0,0 +1,17 @@ +digraph "g" { +"clone1_running_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_start_0" -> "clone1_running_0" [ style = bold] +"clone1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"clone1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"clone1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "clone1_running_0" [ style = bold] +"rsc1:0_start_0 node2" -> "rsc1:0_monitor_5000 node2" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "clone1_running_0" [ style = bold] +"rsc1:1_start_0 node1" -> "rsc1:1_monitor_5000 node1" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-demote-2-granted.exp b/pengine/test10/ticket-clone-demote-2-granted.exp new file mode 100644 index 0000000000..0c122c466b --- /dev/null +++ b/pengine/test10/ticket-clone-demote-2-granted.exp @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-demote-2-granted.scores b/pengine/test10/ticket-clone-demote-2-granted.scores new file mode 100644 index 0000000000..f1b0bd06fa --- /dev/null +++ b/pengine/test10/ticket-clone-demote-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-demote-2-granted.xml b/pengine/test10/ticket-clone-demote-2-granted.xml new file mode 100644 index 0000000000..35ac9a2d1f --- /dev/null +++ b/pengine/test10/ticket-clone-demote-2-granted.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-demote-3-revoked.dot b/pengine/test10/ticket-clone-demote-3-revoked.dot new file mode 100644 index 0000000000..b1adbf2599 --- /dev/null +++ b/pengine/test10/ticket-clone-demote-3-revoked.dot @@ -0,0 +1,16 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"clone1_stop_0" -> "clone1_stopped_0" [ style = bold] +"clone1_stop_0" -> "rsc1:0_stop_0 node2" [ style = bold] +"clone1_stop_0" -> "rsc1:1_stop_0 node1" [ style = bold] +"clone1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1:0_stop_0 node2" -> "clone1_stopped_0" [ style = bold] +"rsc1:0_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc1:1_stop_0 node1" -> "clone1_stopped_0" [ style = bold] +"rsc1:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-demote-3-revoked.exp b/pengine/test10/ticket-clone-demote-3-revoked.exp new file mode 100644 index 0000000000..4c5cf3a572 --- /dev/null +++ b/pengine/test10/ticket-clone-demote-3-revoked.exp @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-demote-3-revoked.scores b/pengine/test10/ticket-clone-demote-3-revoked.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-demote-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-demote-3-revoked.xml b/pengine/test10/ticket-clone-demote-3-revoked.xml new file mode 100644 index 0000000000..1d3127386f --- /dev/null +++ b/pengine/test10/ticket-clone-demote-3-revoked.xml @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-fence-1-initial.dot b/pengine/test10/ticket-clone-fence-1-initial.dot new file mode 100644 index 0000000000..4601f99af7 --- /dev/null +++ b/pengine/test10/ticket-clone-fence-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-fence-1-initial.exp b/pengine/test10/ticket-clone-fence-1-initial.exp new file mode 100644 index 0000000000..c6193f730c --- /dev/null +++ b/pengine/test10/ticket-clone-fence-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-fence-1-initial.scores b/pengine/test10/ticket-clone-fence-1-initial.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-fence-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-fence-1-initial.xml b/pengine/test10/ticket-clone-fence-1-initial.xml new file mode 100644 index 0000000000..7807cc16c2 --- /dev/null +++ b/pengine/test10/ticket-clone-fence-1-initial.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-fence-2-granted.dot b/pengine/test10/ticket-clone-fence-2-granted.dot new file mode 100644 index 0000000000..f39982cc5f --- /dev/null +++ b/pengine/test10/ticket-clone-fence-2-granted.dot @@ -0,0 +1,17 @@ +digraph "g" { +"clone1_running_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_start_0" -> "clone1_running_0" [ style = bold] +"clone1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"clone1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"clone1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "clone1_running_0" [ style = bold] +"rsc1:0_start_0 node2" -> "rsc1:0_monitor_5000 node2" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "clone1_running_0" [ style = bold] +"rsc1:1_start_0 node1" -> "rsc1:1_monitor_5000 node1" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-fence-2-granted.exp b/pengine/test10/ticket-clone-fence-2-granted.exp new file mode 100644 index 0000000000..0c122c466b --- /dev/null +++ b/pengine/test10/ticket-clone-fence-2-granted.exp @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-fence-2-granted.scores b/pengine/test10/ticket-clone-fence-2-granted.scores new file mode 100644 index 0000000000..f1b0bd06fa --- /dev/null +++ b/pengine/test10/ticket-clone-fence-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-fence-2-granted.xml b/pengine/test10/ticket-clone-fence-2-granted.xml new file mode 100644 index 0000000000..37ab92c1e2 --- /dev/null +++ b/pengine/test10/ticket-clone-fence-2-granted.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-fence-3-revoked.dot b/pengine/test10/ticket-clone-fence-3-revoked.dot new file mode 100644 index 0000000000..6aafce077e --- /dev/null +++ b/pengine/test10/ticket-clone-fence-3-revoked.dot @@ -0,0 +1,31 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"clone1_stop_0" -> "clone1_stopped_0" [ style = bold] +"clone1_stop_0" -> "rsc1:0_stop_0 node2" [ style = bold] +"clone1_stop_0" -> "rsc1:1_stop_0 node1" [ style = bold] +"clone1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1:0_stop_0 node2" -> "clone1_stopped_0" [ style = bold] +"rsc1:0_stop_0 node2" [ style=bold color="green" fontcolor="orange" ] +"rsc1:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc1:1_stop_0 node1" -> "clone1_stopped_0" [ style = bold] +"rsc1:1_stop_0 node1" [ style=bold color="green" fontcolor="orange" ] +"stonith node1" -> "all_stopped" [ style = bold] +"stonith node1" -> "clone1_stop_0" [ style = bold] +"stonith node1" -> "rsc1:1_stop_0 node1" [ style = bold] +"stonith node1" -> "stonith node2" [ style = bold] +"stonith node1" [ style=bold color="green" fontcolor="black" ] +"stonith node2" -> "all_stopped" [ style = bold] +"stonith node2" -> "clone1_stop_0" [ style = bold] +"stonith node2" -> "rsc1:0_stop_0 node2" [ style = bold] +"stonith node2" -> "stonith_complete" [ style = bold] +"stonith node2" [ style=bold color="green" fontcolor="black" ] +"stonith_complete" -> "rsc1:0_stop_0 node2" [ style = bold] +"stonith_complete" -> "rsc1:1_stop_0 node1" [ style = bold] +"stonith_complete" [ style=bold color="green" fontcolor="orange" ] +"stonith_up" -> "stonith node1" [ style = bold] +"stonith_up" -> "stonith node2" [ style = bold] +"stonith_up" -> "stonith_complete" [ style = bold] +"stonith_up" [ style=bold color="green" fontcolor="orange" ] +} diff --git a/pengine/test10/ticket-clone-fence-3-revoked.exp b/pengine/test10/ticket-clone-fence-3-revoked.exp new file mode 100644 index 0000000000..7a5c5bc2d7 --- /dev/null +++ b/pengine/test10/ticket-clone-fence-3-revoked.exp @@ -0,0 +1,143 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-fence-3-revoked.scores b/pengine/test10/ticket-clone-fence-3-revoked.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-fence-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-fence-3-revoked.xml b/pengine/test10/ticket-clone-fence-3-revoked.xml new file mode 100644 index 0000000000..81700a564d --- /dev/null +++ b/pengine/test10/ticket-clone-fence-3-revoked.xml @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-freeze-1-initial.dot b/pengine/test10/ticket-clone-freeze-1-initial.dot new file mode 100644 index 0000000000..4601f99af7 --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-freeze-1-initial.exp b/pengine/test10/ticket-clone-freeze-1-initial.exp new file mode 100644 index 0000000000..c6193f730c --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-freeze-1-initial.scores b/pengine/test10/ticket-clone-freeze-1-initial.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-freeze-1-initial.xml b/pengine/test10/ticket-clone-freeze-1-initial.xml new file mode 100644 index 0000000000..94456caf86 --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-1-initial.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-freeze-2-granted.dot b/pengine/test10/ticket-clone-freeze-2-granted.dot new file mode 100644 index 0000000000..f39982cc5f --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-2-granted.dot @@ -0,0 +1,17 @@ +digraph "g" { +"clone1_running_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_start_0" -> "clone1_running_0" [ style = bold] +"clone1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"clone1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"clone1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "clone1_running_0" [ style = bold] +"rsc1:0_start_0 node2" -> "rsc1:0_monitor_5000 node2" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "clone1_running_0" [ style = bold] +"rsc1:1_start_0 node1" -> "rsc1:1_monitor_5000 node1" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-freeze-2-granted.exp b/pengine/test10/ticket-clone-freeze-2-granted.exp new file mode 100644 index 0000000000..0c122c466b --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-2-granted.exp @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-freeze-2-granted.scores b/pengine/test10/ticket-clone-freeze-2-granted.scores new file mode 100644 index 0000000000..f1b0bd06fa --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-freeze-2-granted.xml b/pengine/test10/ticket-clone-freeze-2-granted.xml new file mode 100644 index 0000000000..91e83303de --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-2-granted.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-freeze-3-revoked.dot b/pengine/test10/ticket-clone-freeze-3-revoked.dot new file mode 100644 index 0000000000..5a800ee418 --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-3-revoked.dot @@ -0,0 +1,4 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-freeze-3-revoked.exp b/pengine/test10/ticket-clone-freeze-3-revoked.exp new file mode 100644 index 0000000000..efac49f6bf --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-3-revoked.exp @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-freeze-3-revoked.scores b/pengine/test10/ticket-clone-freeze-3-revoked.scores new file mode 100644 index 0000000000..d311a38bf8 --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: 1 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: 1 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-freeze-3-revoked.xml b/pengine/test10/ticket-clone-freeze-3-revoked.xml new file mode 100644 index 0000000000..a7faed041a --- /dev/null +++ b/pengine/test10/ticket-clone-freeze-3-revoked.xml @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-stop-1-initial.dot b/pengine/test10/ticket-clone-stop-1-initial.dot new file mode 100644 index 0000000000..4601f99af7 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-stop-1-initial.exp b/pengine/test10/ticket-clone-stop-1-initial.exp new file mode 100644 index 0000000000..c6193f730c --- /dev/null +++ b/pengine/test10/ticket-clone-stop-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-stop-1-initial.scores b/pengine/test10/ticket-clone-stop-1-initial.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-stop-1-initial.xml b/pengine/test10/ticket-clone-stop-1-initial.xml new file mode 100644 index 0000000000..a0f5387c86 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-1-initial.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-stop-2-granted.dot b/pengine/test10/ticket-clone-stop-2-granted.dot new file mode 100644 index 0000000000..f39982cc5f --- /dev/null +++ b/pengine/test10/ticket-clone-stop-2-granted.dot @@ -0,0 +1,17 @@ +digraph "g" { +"clone1_running_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_start_0" -> "clone1_running_0" [ style = bold] +"clone1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"clone1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"clone1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "clone1_running_0" [ style = bold] +"rsc1:0_start_0 node2" -> "rsc1:0_monitor_5000 node2" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "clone1_running_0" [ style = bold] +"rsc1:1_start_0 node1" -> "rsc1:1_monitor_5000 node1" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-stop-2-granted.exp b/pengine/test10/ticket-clone-stop-2-granted.exp new file mode 100644 index 0000000000..0c122c466b --- /dev/null +++ b/pengine/test10/ticket-clone-stop-2-granted.exp @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-stop-2-granted.scores b/pengine/test10/ticket-clone-stop-2-granted.scores new file mode 100644 index 0000000000..f1b0bd06fa --- /dev/null +++ b/pengine/test10/ticket-clone-stop-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-stop-2-granted.xml b/pengine/test10/ticket-clone-stop-2-granted.xml new file mode 100644 index 0000000000..464e886d74 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-2-granted.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-stop-3-revoked.dot b/pengine/test10/ticket-clone-stop-3-revoked.dot new file mode 100644 index 0000000000..b1adbf2599 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-3-revoked.dot @@ -0,0 +1,16 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"clone1_stop_0" -> "clone1_stopped_0" [ style = bold] +"clone1_stop_0" -> "rsc1:0_stop_0 node2" [ style = bold] +"clone1_stop_0" -> "rsc1:1_stop_0 node1" [ style = bold] +"clone1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1:0_stop_0 node2" -> "clone1_stopped_0" [ style = bold] +"rsc1:0_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc1:1_stop_0 node1" -> "clone1_stopped_0" [ style = bold] +"rsc1:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-clone-stop-3-revoked.exp b/pengine/test10/ticket-clone-stop-3-revoked.exp new file mode 100644 index 0000000000..4c5cf3a572 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-3-revoked.exp @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-clone-stop-3-revoked.scores b/pengine/test10/ticket-clone-stop-3-revoked.scores new file mode 100644 index 0000000000..89e2d0f335 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-clone-stop-3-revoked.xml b/pengine/test10/ticket-clone-stop-3-revoked.xml new file mode 100644 index 0000000000..b3d8333100 --- /dev/null +++ b/pengine/test10/ticket-clone-stop-3-revoked.xml @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-demote-1-initial.dot b/pengine/test10/ticket-group-demote-1-initial.dot new file mode 100644 index 0000000000..547e88a659 --- /dev/null +++ b/pengine/test10/ticket-group-demote-1-initial.dot @@ -0,0 +1,15 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-demote-1-initial.exp b/pengine/test10/ticket-group-demote-1-initial.exp new file mode 100644 index 0000000000..3cdd221fc7 --- /dev/null +++ b/pengine/test10/ticket-group-demote-1-initial.exp @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-demote-1-initial.scores b/pengine/test10/ticket-group-demote-1-initial.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-demote-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-demote-1-initial.xml b/pengine/test10/ticket-group-demote-1-initial.xml new file mode 100644 index 0000000000..be73945491 --- /dev/null +++ b/pengine/test10/ticket-group-demote-1-initial.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-demote-2-granted.dot b/pengine/test10/ticket-group-demote-2-granted.dot new file mode 100644 index 0000000000..b33d124db4 --- /dev/null +++ b/pengine/test10/ticket-group-demote-2-granted.dot @@ -0,0 +1,18 @@ +digraph "g" { +"group1_running_0" [ style=bold color="green" fontcolor="orange" ] +"group1_start_0" -> "group1_running_0" [ style = bold] +"group1_start_0" -> "rsc1_start_0 node2" [ style = bold] +"group1_start_0" -> "rsc2_start_0 node2" [ style = bold] +"group1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc1_start_0 node2" -> "rsc1_monitor_5000 node2" [ style = bold] +"rsc1_start_0 node2" -> "rsc2_start_0 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc2_start_0 node2" -> "rsc2_monitor_5000 node2" [ style = bold] +"rsc2_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-demote-2-granted.exp b/pengine/test10/ticket-group-demote-2-granted.exp new file mode 100644 index 0000000000..0c18c585fb --- /dev/null +++ b/pengine/test10/ticket-group-demote-2-granted.exp @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-demote-2-granted.scores b/pengine/test10/ticket-group-demote-2-granted.scores new file mode 100644 index 0000000000..8078251c62 --- /dev/null +++ b/pengine/test10/ticket-group-demote-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: 0 +group_color: rsc1 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-demote-2-granted.xml b/pengine/test10/ticket-group-demote-2-granted.xml new file mode 100644 index 0000000000..064e211bcd --- /dev/null +++ b/pengine/test10/ticket-group-demote-2-granted.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-demote-3-revoked.dot b/pengine/test10/ticket-group-demote-3-revoked.dot new file mode 100644 index 0000000000..3e24620652 --- /dev/null +++ b/pengine/test10/ticket-group-demote-3-revoked.dot @@ -0,0 +1,17 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"group1_stop_0" -> "group1_stopped_0" [ style = bold] +"group1_stop_0" -> "rsc1_stop_0 node2" [ style = bold] +"group1_stop_0" -> "rsc2_stop_0 node2" [ style = bold] +"group1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc2_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] +"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-demote-3-revoked.exp b/pengine/test10/ticket-group-demote-3-revoked.exp new file mode 100644 index 0000000000..3cd204e96a --- /dev/null +++ b/pengine/test10/ticket-group-demote-3-revoked.exp @@ -0,0 +1,89 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-demote-3-revoked.scores b/pengine/test10/ticket-group-demote-3-revoked.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-demote-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-demote-3-revoked.xml b/pengine/test10/ticket-group-demote-3-revoked.xml new file mode 100644 index 0000000000..0fdda63b9b --- /dev/null +++ b/pengine/test10/ticket-group-demote-3-revoked.xml @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-fence-1-initial.dot b/pengine/test10/ticket-group-fence-1-initial.dot new file mode 100644 index 0000000000..547e88a659 --- /dev/null +++ b/pengine/test10/ticket-group-fence-1-initial.dot @@ -0,0 +1,15 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-fence-1-initial.exp b/pengine/test10/ticket-group-fence-1-initial.exp new file mode 100644 index 0000000000..3cdd221fc7 --- /dev/null +++ b/pengine/test10/ticket-group-fence-1-initial.exp @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-fence-1-initial.scores b/pengine/test10/ticket-group-fence-1-initial.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-fence-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-fence-1-initial.xml b/pengine/test10/ticket-group-fence-1-initial.xml new file mode 100644 index 0000000000..27806bf168 --- /dev/null +++ b/pengine/test10/ticket-group-fence-1-initial.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-fence-2-granted.dot b/pengine/test10/ticket-group-fence-2-granted.dot new file mode 100644 index 0000000000..b33d124db4 --- /dev/null +++ b/pengine/test10/ticket-group-fence-2-granted.dot @@ -0,0 +1,18 @@ +digraph "g" { +"group1_running_0" [ style=bold color="green" fontcolor="orange" ] +"group1_start_0" -> "group1_running_0" [ style = bold] +"group1_start_0" -> "rsc1_start_0 node2" [ style = bold] +"group1_start_0" -> "rsc2_start_0 node2" [ style = bold] +"group1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc1_start_0 node2" -> "rsc1_monitor_5000 node2" [ style = bold] +"rsc1_start_0 node2" -> "rsc2_start_0 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc2_start_0 node2" -> "rsc2_monitor_5000 node2" [ style = bold] +"rsc2_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-fence-2-granted.exp b/pengine/test10/ticket-group-fence-2-granted.exp new file mode 100644 index 0000000000..0c18c585fb --- /dev/null +++ b/pengine/test10/ticket-group-fence-2-granted.exp @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-fence-2-granted.scores b/pengine/test10/ticket-group-fence-2-granted.scores new file mode 100644 index 0000000000..8078251c62 --- /dev/null +++ b/pengine/test10/ticket-group-fence-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: 0 +group_color: rsc1 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-fence-2-granted.xml b/pengine/test10/ticket-group-fence-2-granted.xml new file mode 100644 index 0000000000..4232acbb23 --- /dev/null +++ b/pengine/test10/ticket-group-fence-2-granted.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-fence-3-revoked.dot b/pengine/test10/ticket-group-fence-3-revoked.dot new file mode 100644 index 0000000000..090a149968 --- /dev/null +++ b/pengine/test10/ticket-group-fence-3-revoked.dot @@ -0,0 +1,28 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"group1_stop_0" -> "group1_stopped_0" [ style = bold] +"group1_stop_0" -> "rsc1_stop_0 node2" [ style = bold] +"group1_stop_0" -> "rsc2_stop_0 node2" [ style = bold] +"group1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="orange" ] +"rsc2_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc2_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] +"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="orange" ] +"stonith node2" -> "all_stopped" [ style = bold] +"stonith node2" -> "group1_stop_0" [ style = bold] +"stonith node2" -> "rsc1_stop_0 node2" [ style = bold] +"stonith node2" -> "rsc2_stop_0 node2" [ style = bold] +"stonith node2" -> "stonith_complete" [ style = bold] +"stonith node2" [ style=bold color="green" fontcolor="black" ] +"stonith_complete" -> "rsc1_stop_0 node2" [ style = bold] +"stonith_complete" -> "rsc2_stop_0 node2" [ style = bold] +"stonith_complete" [ style=bold color="green" fontcolor="orange" ] +"stonith_up" -> "stonith node2" [ style = bold] +"stonith_up" -> "stonith_complete" [ style = bold] +"stonith_up" [ style=bold color="green" fontcolor="orange" ] +} diff --git a/pengine/test10/ticket-group-fence-3-revoked.exp b/pengine/test10/ticket-group-fence-3-revoked.exp new file mode 100644 index 0000000000..13324fb057 --- /dev/null +++ b/pengine/test10/ticket-group-fence-3-revoked.exp @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-fence-3-revoked.scores b/pengine/test10/ticket-group-fence-3-revoked.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-fence-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-fence-3-revoked.xml b/pengine/test10/ticket-group-fence-3-revoked.xml new file mode 100644 index 0000000000..dfd10c9dc5 --- /dev/null +++ b/pengine/test10/ticket-group-fence-3-revoked.xml @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-freeze-1-initial.dot b/pengine/test10/ticket-group-freeze-1-initial.dot new file mode 100644 index 0000000000..547e88a659 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-1-initial.dot @@ -0,0 +1,15 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-freeze-1-initial.exp b/pengine/test10/ticket-group-freeze-1-initial.exp new file mode 100644 index 0000000000..3cdd221fc7 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-1-initial.exp @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-freeze-1-initial.scores b/pengine/test10/ticket-group-freeze-1-initial.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-freeze-1-initial.xml b/pengine/test10/ticket-group-freeze-1-initial.xml new file mode 100644 index 0000000000..225fae28d5 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-1-initial.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-freeze-2-granted.dot b/pengine/test10/ticket-group-freeze-2-granted.dot new file mode 100644 index 0000000000..b33d124db4 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-2-granted.dot @@ -0,0 +1,18 @@ +digraph "g" { +"group1_running_0" [ style=bold color="green" fontcolor="orange" ] +"group1_start_0" -> "group1_running_0" [ style = bold] +"group1_start_0" -> "rsc1_start_0 node2" [ style = bold] +"group1_start_0" -> "rsc2_start_0 node2" [ style = bold] +"group1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc1_start_0 node2" -> "rsc1_monitor_5000 node2" [ style = bold] +"rsc1_start_0 node2" -> "rsc2_start_0 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc2_start_0 node2" -> "rsc2_monitor_5000 node2" [ style = bold] +"rsc2_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-freeze-2-granted.exp b/pengine/test10/ticket-group-freeze-2-granted.exp new file mode 100644 index 0000000000..0c18c585fb --- /dev/null +++ b/pengine/test10/ticket-group-freeze-2-granted.exp @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-freeze-2-granted.scores b/pengine/test10/ticket-group-freeze-2-granted.scores new file mode 100644 index 0000000000..8078251c62 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: 0 +group_color: rsc1 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-freeze-2-granted.xml b/pengine/test10/ticket-group-freeze-2-granted.xml new file mode 100644 index 0000000000..7bbc2bc620 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-2-granted.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-freeze-3-revoked.dot b/pengine/test10/ticket-group-freeze-3-revoked.dot new file mode 100644 index 0000000000..5a800ee418 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-3-revoked.dot @@ -0,0 +1,4 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-freeze-3-revoked.exp b/pengine/test10/ticket-group-freeze-3-revoked.exp new file mode 100644 index 0000000000..efac49f6bf --- /dev/null +++ b/pengine/test10/ticket-group-freeze-3-revoked.exp @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-freeze-3-revoked.scores b/pengine/test10/ticket-group-freeze-3-revoked.scores new file mode 100644 index 0000000000..8078251c62 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: 0 +group_color: rsc1 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-freeze-3-revoked.xml b/pengine/test10/ticket-group-freeze-3-revoked.xml new file mode 100644 index 0000000000..7a2e2399a2 --- /dev/null +++ b/pengine/test10/ticket-group-freeze-3-revoked.xml @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-stop-1-initial.dot b/pengine/test10/ticket-group-stop-1-initial.dot new file mode 100644 index 0000000000..547e88a659 --- /dev/null +++ b/pengine/test10/ticket-group-stop-1-initial.dot @@ -0,0 +1,15 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-stop-1-initial.exp b/pengine/test10/ticket-group-stop-1-initial.exp new file mode 100644 index 0000000000..3cdd221fc7 --- /dev/null +++ b/pengine/test10/ticket-group-stop-1-initial.exp @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-stop-1-initial.scores b/pengine/test10/ticket-group-stop-1-initial.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-stop-1-initial.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-stop-1-initial.xml b/pengine/test10/ticket-group-stop-1-initial.xml new file mode 100644 index 0000000000..248396ece0 --- /dev/null +++ b/pengine/test10/ticket-group-stop-1-initial.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-stop-2-granted.dot b/pengine/test10/ticket-group-stop-2-granted.dot new file mode 100644 index 0000000000..b33d124db4 --- /dev/null +++ b/pengine/test10/ticket-group-stop-2-granted.dot @@ -0,0 +1,18 @@ +digraph "g" { +"group1_running_0" [ style=bold color="green" fontcolor="orange" ] +"group1_start_0" -> "group1_running_0" [ style = bold] +"group1_start_0" -> "rsc1_start_0 node2" [ style = bold] +"group1_start_0" -> "rsc2_start_0 node2" [ style = bold] +"group1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc1_start_0 node2" -> "rsc1_monitor_5000 node2" [ style = bold] +"rsc1_start_0 node2" -> "rsc2_start_0 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_start_0 node2" -> "group1_running_0" [ style = bold] +"rsc2_start_0 node2" -> "rsc2_monitor_5000 node2" [ style = bold] +"rsc2_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-stop-2-granted.exp b/pengine/test10/ticket-group-stop-2-granted.exp new file mode 100644 index 0000000000..0c18c585fb --- /dev/null +++ b/pengine/test10/ticket-group-stop-2-granted.exp @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-stop-2-granted.scores b/pengine/test10/ticket-group-stop-2-granted.scores new file mode 100644 index 0000000000..8078251c62 --- /dev/null +++ b/pengine/test10/ticket-group-stop-2-granted.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: 0 +group_color: rsc1 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-stop-2-granted.xml b/pengine/test10/ticket-group-stop-2-granted.xml new file mode 100644 index 0000000000..f4c8c434c5 --- /dev/null +++ b/pengine/test10/ticket-group-stop-2-granted.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-stop-3-revoked.dot b/pengine/test10/ticket-group-stop-3-revoked.dot new file mode 100644 index 0000000000..3e24620652 --- /dev/null +++ b/pengine/test10/ticket-group-stop-3-revoked.dot @@ -0,0 +1,17 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"group1_stop_0" -> "group1_stopped_0" [ style = bold] +"group1_stop_0" -> "rsc1_stop_0 node2" [ style = bold] +"group1_stop_0" -> "rsc2_stop_0 node2" [ style = bold] +"group1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc2_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] +"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-group-stop-3-revoked.exp b/pengine/test10/ticket-group-stop-3-revoked.exp new file mode 100644 index 0000000000..3cd204e96a --- /dev/null +++ b/pengine/test10/ticket-group-stop-3-revoked.exp @@ -0,0 +1,89 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-group-stop-3-revoked.scores b/pengine/test10/ticket-group-stop-3-revoked.scores new file mode 100644 index 0000000000..e5b8167173 --- /dev/null +++ b/pengine/test10/ticket-group-stop-3-revoked.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: group1 allocation score on node1: 0 +group_color: group1 allocation score on node2: 0 +group_color: rsc1 allocation score on node1: -INFINITY +group_color: rsc1 allocation score on node2: -INFINITY +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-group-stop-3-revoked.xml b/pengine/test10/ticket-group-stop-3-revoked.xml new file mode 100644 index 0000000000..3746e031eb --- /dev/null +++ b/pengine/test10/ticket-group-stop-3-revoked.xml @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-demote-1-initial.dot b/pengine/test10/ticket-master-demote-1-initial.dot new file mode 100644 index 0000000000..728f5b47eb --- /dev/null +++ b/pengine/test10/ticket-master-demote-1-initial.dot @@ -0,0 +1,22 @@ +digraph "g" { +"ms1_running_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_start_0" -> "ms1_running_0" [ style = bold] +"ms1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"ms1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"ms1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" -> "rsc1:0_start_0 node2" [ style = bold] +"probe_complete" -> "rsc1:1_start_0 node1" [ style = bold] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "ms1_running_0" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "ms1_running_0" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-demote-1-initial.exp b/pengine/test10/ticket-master-demote-1-initial.exp new file mode 100644 index 0000000000..f5068c5ebe --- /dev/null +++ b/pengine/test10/ticket-master-demote-1-initial.exp @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-demote-1-initial.scores b/pengine/test10/ticket-master-demote-1-initial.scores new file mode 100644 index 0000000000..1cbce7411f --- /dev/null +++ b/pengine/test10/ticket-master-demote-1-initial.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-master-demote-1-initial.xml b/pengine/test10/ticket-master-demote-1-initial.xml new file mode 100644 index 0000000000..629ecc923e --- /dev/null +++ b/pengine/test10/ticket-master-demote-1-initial.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-demote-2-granted.dot b/pengine/test10/ticket-master-demote-2-granted.dot new file mode 100644 index 0000000000..3b12088520 --- /dev/null +++ b/pengine/test10/ticket-master-demote-2-granted.dot @@ -0,0 +1,9 @@ +digraph "g" { +"ms1_promote_0" -> "rsc1:1_promote_0 node1" [ style = bold] +"ms1_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_promote_0 node1" -> "ms1_promoted_0" [ style = bold] +"rsc1:1_promote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-demote-2-granted.exp b/pengine/test10/ticket-master-demote-2-granted.exp new file mode 100644 index 0000000000..702b7896ac --- /dev/null +++ b/pengine/test10/ticket-master-demote-2-granted.exp @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-demote-2-granted.scores b/pengine/test10/ticket-master-demote-2-granted.scores new file mode 100644 index 0000000000..03ef2ab699 --- /dev/null +++ b/pengine/test10/ticket-master-demote-2-granted.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: 1 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: 1 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: 9 diff --git a/pengine/test10/ticket-master-demote-2-granted.xml b/pengine/test10/ticket-master-demote-2-granted.xml new file mode 100644 index 0000000000..fa15ae261c --- /dev/null +++ b/pengine/test10/ticket-master-demote-2-granted.xml @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-demote-3-revoked.dot b/pengine/test10/ticket-master-demote-3-revoked.dot new file mode 100644 index 0000000000..597a48f4fa --- /dev/null +++ b/pengine/test10/ticket-master-demote-3-revoked.dot @@ -0,0 +1,10 @@ +digraph "g" { +"ms1_demote_0" -> "ms1_demoted_0" [ style = bold] +"ms1_demote_0" -> "rsc1:1_demote_0 node1" [ style = bold] +"ms1_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_demote_0 node1" -> "ms1_demoted_0" [ style = bold] +"rsc1:1_demote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-demote-3-revoked.exp b/pengine/test10/ticket-master-demote-3-revoked.exp new file mode 100644 index 0000000000..679631cf9a --- /dev/null +++ b/pengine/test10/ticket-master-demote-3-revoked.exp @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-demote-3-revoked.scores b/pengine/test10/ticket-master-demote-3-revoked.scores new file mode 100644 index 0000000000..3e4f30f6f1 --- /dev/null +++ b/pengine/test10/ticket-master-demote-3-revoked.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: 1 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: 1 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-master-demote-3-revoked.xml b/pengine/test10/ticket-master-demote-3-revoked.xml new file mode 100644 index 0000000000..2f691b4f4f --- /dev/null +++ b/pengine/test10/ticket-master-demote-3-revoked.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-fence-1-initial.dot b/pengine/test10/ticket-master-fence-1-initial.dot new file mode 100644 index 0000000000..728f5b47eb --- /dev/null +++ b/pengine/test10/ticket-master-fence-1-initial.dot @@ -0,0 +1,22 @@ +digraph "g" { +"ms1_running_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_start_0" -> "ms1_running_0" [ style = bold] +"ms1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"ms1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"ms1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" -> "rsc1:0_start_0 node2" [ style = bold] +"probe_complete" -> "rsc1:1_start_0 node1" [ style = bold] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "ms1_running_0" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "ms1_running_0" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-fence-1-initial.exp b/pengine/test10/ticket-master-fence-1-initial.exp new file mode 100644 index 0000000000..f5068c5ebe --- /dev/null +++ b/pengine/test10/ticket-master-fence-1-initial.exp @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-fence-1-initial.scores b/pengine/test10/ticket-master-fence-1-initial.scores new file mode 100644 index 0000000000..1cbce7411f --- /dev/null +++ b/pengine/test10/ticket-master-fence-1-initial.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-master-fence-1-initial.xml b/pengine/test10/ticket-master-fence-1-initial.xml new file mode 100644 index 0000000000..f13a88cc37 --- /dev/null +++ b/pengine/test10/ticket-master-fence-1-initial.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-fence-2-granted.dot b/pengine/test10/ticket-master-fence-2-granted.dot new file mode 100644 index 0000000000..3b12088520 --- /dev/null +++ b/pengine/test10/ticket-master-fence-2-granted.dot @@ -0,0 +1,9 @@ +digraph "g" { +"ms1_promote_0" -> "rsc1:1_promote_0 node1" [ style = bold] +"ms1_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_promote_0 node1" -> "ms1_promoted_0" [ style = bold] +"rsc1:1_promote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-fence-2-granted.exp b/pengine/test10/ticket-master-fence-2-granted.exp new file mode 100644 index 0000000000..702b7896ac --- /dev/null +++ b/pengine/test10/ticket-master-fence-2-granted.exp @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-fence-2-granted.scores b/pengine/test10/ticket-master-fence-2-granted.scores new file mode 100644 index 0000000000..03ef2ab699 --- /dev/null +++ b/pengine/test10/ticket-master-fence-2-granted.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: 1 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: 1 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: 9 diff --git a/pengine/test10/ticket-master-fence-2-granted.xml b/pengine/test10/ticket-master-fence-2-granted.xml new file mode 100644 index 0000000000..4c7bd424b9 --- /dev/null +++ b/pengine/test10/ticket-master-fence-2-granted.xml @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-fence-3-revoked.dot b/pengine/test10/ticket-master-fence-3-revoked.dot new file mode 100644 index 0000000000..7d432d6cc8 --- /dev/null +++ b/pengine/test10/ticket-master-fence-3-revoked.dot @@ -0,0 +1,31 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"ms1_demote_0" -> "ms1_demoted_0" [ style = bold] +"ms1_demote_0" -> "rsc1:1_demote_0 node1" [ style = bold] +"ms1_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_demoted_0" -> "ms1_stop_0" [ style = bold] +"ms1_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_stop_0" -> "ms1_stopped_0" [ style = bold] +"ms1_stop_0" -> "rsc1:1_stop_0 node1" [ style = bold] +"ms1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_demote_0 node1" -> "ms1_demoted_0" [ style = bold] +"rsc1:1_demote_0 node1" -> "rsc1:1_stop_0 node1" [ style = bold] +"rsc1:1_demote_0 node1" [ style=bold color="green" fontcolor="orange"] +"rsc1:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc1:1_stop_0 node1" -> "ms1_stopped_0" [ style = bold] +"rsc1:1_stop_0 node1" [ style=bold color="green" fontcolor="orange" ] +"stonith node1" -> "all_stopped" [ style = bold] +"stonith node1" -> "ms1_stop_0" [ style = bold] +"stonith node1" -> "rsc1:1_demote_0 node1" [ style = bold] +"stonith node1" -> "rsc1:1_stop_0 node1" [ style = bold] +"stonith node1" -> "stonith_complete" [ style = bold] +"stonith node1" [ style=bold color="green" fontcolor="black" ] +"stonith_complete" -> "rsc1:1_demote_0 node1" [ style = bold] +"stonith_complete" -> "rsc1:1_stop_0 node1" [ style = bold] +"stonith_complete" [ style=bold color="green" fontcolor="orange" ] +"stonith_up" -> "stonith node1" [ style = bold] +"stonith_up" -> "stonith_complete" [ style = bold] +"stonith_up" [ style=bold color="green" fontcolor="orange" ] +} diff --git a/pengine/test10/ticket-master-fence-3-revoked.exp b/pengine/test10/ticket-master-fence-3-revoked.exp new file mode 100644 index 0000000000..a411188bb8 --- /dev/null +++ b/pengine/test10/ticket-master-fence-3-revoked.exp @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-fence-3-revoked.scores b/pengine/test10/ticket-master-fence-3-revoked.scores new file mode 100644 index 0000000000..fcd7fd92e0 --- /dev/null +++ b/pengine/test10/ticket-master-fence-3-revoked.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on none: 0 diff --git a/pengine/test10/ticket-master-fence-3-revoked.xml b/pengine/test10/ticket-master-fence-3-revoked.xml new file mode 100644 index 0000000000..b81e762e94 --- /dev/null +++ b/pengine/test10/ticket-master-fence-3-revoked.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-freeze-1-initial.dot b/pengine/test10/ticket-master-freeze-1-initial.dot new file mode 100644 index 0000000000..728f5b47eb --- /dev/null +++ b/pengine/test10/ticket-master-freeze-1-initial.dot @@ -0,0 +1,22 @@ +digraph "g" { +"ms1_running_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_start_0" -> "ms1_running_0" [ style = bold] +"ms1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"ms1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"ms1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" -> "rsc1:0_start_0 node2" [ style = bold] +"probe_complete" -> "rsc1:1_start_0 node1" [ style = bold] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "ms1_running_0" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "ms1_running_0" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-freeze-1-initial.exp b/pengine/test10/ticket-master-freeze-1-initial.exp new file mode 100644 index 0000000000..f5068c5ebe --- /dev/null +++ b/pengine/test10/ticket-master-freeze-1-initial.exp @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-freeze-1-initial.scores b/pengine/test10/ticket-master-freeze-1-initial.scores new file mode 100644 index 0000000000..1cbce7411f --- /dev/null +++ b/pengine/test10/ticket-master-freeze-1-initial.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-master-freeze-1-initial.xml b/pengine/test10/ticket-master-freeze-1-initial.xml new file mode 100644 index 0000000000..8dbd7d9179 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-1-initial.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-freeze-2-granted.dot b/pengine/test10/ticket-master-freeze-2-granted.dot new file mode 100644 index 0000000000..3b12088520 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-2-granted.dot @@ -0,0 +1,9 @@ +digraph "g" { +"ms1_promote_0" -> "rsc1:1_promote_0 node1" [ style = bold] +"ms1_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_promote_0 node1" -> "ms1_promoted_0" [ style = bold] +"rsc1:1_promote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-freeze-2-granted.exp b/pengine/test10/ticket-master-freeze-2-granted.exp new file mode 100644 index 0000000000..702b7896ac --- /dev/null +++ b/pengine/test10/ticket-master-freeze-2-granted.exp @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-freeze-2-granted.scores b/pengine/test10/ticket-master-freeze-2-granted.scores new file mode 100644 index 0000000000..03ef2ab699 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-2-granted.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: 1 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: 1 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: 9 diff --git a/pengine/test10/ticket-master-freeze-2-granted.xml b/pengine/test10/ticket-master-freeze-2-granted.xml new file mode 100644 index 0000000000..151d5fbe97 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-2-granted.xml @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-freeze-3-revoked.dot b/pengine/test10/ticket-master-freeze-3-revoked.dot new file mode 100644 index 0000000000..5a800ee418 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-3-revoked.dot @@ -0,0 +1,4 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-freeze-3-revoked.exp b/pengine/test10/ticket-master-freeze-3-revoked.exp new file mode 100644 index 0000000000..efac49f6bf --- /dev/null +++ b/pengine/test10/ticket-master-freeze-3-revoked.exp @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-freeze-3-revoked.scores b/pengine/test10/ticket-master-freeze-3-revoked.scores new file mode 100644 index 0000000000..32ce4de7b5 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-3-revoked.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 1 +clone_color: rsc1:1 allocation score on node1: 1 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 1 +native_color: rsc1:1 allocation score on node1: 1 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: INFINITY diff --git a/pengine/test10/ticket-master-freeze-3-revoked.xml b/pengine/test10/ticket-master-freeze-3-revoked.xml new file mode 100644 index 0000000000..3a238ebcb8 --- /dev/null +++ b/pengine/test10/ticket-master-freeze-3-revoked.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-stop-1-initial.dot b/pengine/test10/ticket-master-stop-1-initial.dot new file mode 100644 index 0000000000..4601f99af7 --- /dev/null +++ b/pengine/test10/ticket-master-stop-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-stop-1-initial.exp b/pengine/test10/ticket-master-stop-1-initial.exp new file mode 100644 index 0000000000..06eb083eb8 --- /dev/null +++ b/pengine/test10/ticket-master-stop-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-stop-1-initial.scores b/pengine/test10/ticket-master-stop-1-initial.scores new file mode 100644 index 0000000000..23d58d8523 --- /dev/null +++ b/pengine/test10/ticket-master-stop-1-initial.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on none: 0 +rsc1:1 promotion score on none: 0 diff --git a/pengine/test10/ticket-master-stop-1-initial.xml b/pengine/test10/ticket-master-stop-1-initial.xml new file mode 100644 index 0000000000..18a942492a --- /dev/null +++ b/pengine/test10/ticket-master-stop-1-initial.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-stop-2-granted.dot b/pengine/test10/ticket-master-stop-2-granted.dot new file mode 100644 index 0000000000..6562b2cee3 --- /dev/null +++ b/pengine/test10/ticket-master-stop-2-granted.dot @@ -0,0 +1,20 @@ +digraph "g" { +"ms1_promote_0" -> "rsc1:1_promote_0 node1" [ style = bold] +"ms1_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_running_0" -> "ms1_promote_0" [ style = bold] +"ms1_running_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_start_0" -> "ms1_running_0" [ style = bold] +"ms1_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"ms1_start_0" -> "rsc1:1_start_0 node1" [ style = bold] +"ms1_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_start_0 node2" -> "ms1_running_0" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_promote_0 node1" -> "ms1_promoted_0" [ style = bold] +"rsc1:1_promote_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_start_0 node1" -> "ms1_running_0" [ style = bold] +"rsc1:1_start_0 node1" -> "rsc1:1_promote_0 node1" [ style = bold] +"rsc1:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-stop-2-granted.exp b/pengine/test10/ticket-master-stop-2-granted.exp new file mode 100644 index 0000000000..d6a95b399d --- /dev/null +++ b/pengine/test10/ticket-master-stop-2-granted.exp @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-stop-2-granted.scores b/pengine/test10/ticket-master-stop-2-granted.scores new file mode 100644 index 0000000000..d3e5d03062 --- /dev/null +++ b/pengine/test10/ticket-master-stop-2-granted.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: 0 +clone_color: rsc1:0 allocation score on node2: 0 +clone_color: rsc1:1 allocation score on node1: 0 +clone_color: rsc1:1 allocation score on node2: 0 +native_color: rsc1:0 allocation score on node1: 0 +native_color: rsc1:0 allocation score on node2: 0 +native_color: rsc1:1 allocation score on node1: 0 +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on node2: -1 +rsc1:1 promotion score on node1: 9 diff --git a/pengine/test10/ticket-master-stop-2-granted.xml b/pengine/test10/ticket-master-stop-2-granted.xml new file mode 100644 index 0000000000..b45fe979f7 --- /dev/null +++ b/pengine/test10/ticket-master-stop-2-granted.xml @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-stop-3-revoked.dot b/pengine/test10/ticket-master-stop-3-revoked.dot new file mode 100644 index 0000000000..913e54652f --- /dev/null +++ b/pengine/test10/ticket-master-stop-3-revoked.dot @@ -0,0 +1,24 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"ms1_demote_0" -> "ms1_demoted_0" [ style = bold] +"ms1_demote_0" -> "rsc1:1_demote_0 node1" [ style = bold] +"ms1_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_demoted_0" -> "ms1_stop_0" [ style = bold] +"ms1_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_stop_0" -> "ms1_stopped_0" [ style = bold] +"ms1_stop_0" -> "rsc1:0_stop_0 node2" [ style = bold] +"ms1_stop_0" -> "rsc1:1_stop_0 node1" [ style = bold] +"ms1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:0_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1:0_stop_0 node2" -> "ms1_stopped_0" [ style = bold] +"rsc1:0_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_demote_0 node1" -> "ms1_demoted_0" [ style = bold] +"rsc1:1_demote_0 node1" -> "rsc1:1_stop_0 node1" [ style = bold] +"rsc1:1_demote_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc1:1_stop_0 node1" -> "ms1_stopped_0" [ style = bold] +"rsc1:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-master-stop-3-revoked.exp b/pengine/test10/ticket-master-stop-3-revoked.exp new file mode 100644 index 0000000000..82e5a72998 --- /dev/null +++ b/pengine/test10/ticket-master-stop-3-revoked.exp @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-master-stop-3-revoked.scores b/pengine/test10/ticket-master-stop-3-revoked.scores new file mode 100644 index 0000000000..23d58d8523 --- /dev/null +++ b/pengine/test10/ticket-master-stop-3-revoked.scores @@ -0,0 +1,15 @@ +Allocation scores: +clone_color: ms1 allocation score on node1: 0 +clone_color: ms1 allocation score on node2: 0 +clone_color: rsc1:0 allocation score on node1: -INFINITY +clone_color: rsc1:0 allocation score on node2: -INFINITY +clone_color: rsc1:1 allocation score on node1: -INFINITY +clone_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc1:0 allocation score on node1: -INFINITY +native_color: rsc1:0 allocation score on node2: -INFINITY +native_color: rsc1:1 allocation score on node1: -INFINITY +native_color: rsc1:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc1:0 promotion score on none: 0 +rsc1:1 promotion score on none: 0 diff --git a/pengine/test10/ticket-master-stop-3-revoked.xml b/pengine/test10/ticket-master-stop-3-revoked.xml new file mode 100644 index 0000000000..641b26a1d1 --- /dev/null +++ b/pengine/test10/ticket-master-stop-3-revoked.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-demote-1-initial.dot b/pengine/test10/ticket-primitive-demote-1-initial.dot new file mode 100644 index 0000000000..32068cd299 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-demote-1-initial.exp b/pengine/test10/ticket-primitive-demote-1-initial.exp new file mode 100644 index 0000000000..d0e174955f --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-demote-1-initial.scores b/pengine/test10/ticket-primitive-demote-1-initial.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-1-initial.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-demote-1-initial.xml b/pengine/test10/ticket-primitive-demote-1-initial.xml new file mode 100644 index 0000000000..1b22ea30c7 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-1-initial.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-demote-2-granted.dot b/pengine/test10/ticket-primitive-demote-2-granted.dot new file mode 100644 index 0000000000..1f4f142966 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-2-granted.dot @@ -0,0 +1,7 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-demote-2-granted.exp b/pengine/test10/ticket-primitive-demote-2-granted.exp new file mode 100644 index 0000000000..75606ccefb --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-2-granted.exp @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-demote-2-granted.scores b/pengine/test10/ticket-primitive-demote-2-granted.scores new file mode 100644 index 0000000000..5c5889bb39 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-2-granted.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-demote-2-granted.xml b/pengine/test10/ticket-primitive-demote-2-granted.xml new file mode 100644 index 0000000000..60e7df6386 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-2-granted.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-demote-3-revoked.dot b/pengine/test10/ticket-primitive-demote-3-revoked.dot new file mode 100644 index 0000000000..026eef8064 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-3-revoked.dot @@ -0,0 +1,7 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-demote-3-revoked.exp b/pengine/test10/ticket-primitive-demote-3-revoked.exp new file mode 100644 index 0000000000..0cd9d6157e --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-3-revoked.exp @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-demote-3-revoked.scores b/pengine/test10/ticket-primitive-demote-3-revoked.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-3-revoked.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-demote-3-revoked.xml b/pengine/test10/ticket-primitive-demote-3-revoked.xml new file mode 100644 index 0000000000..958ef16c78 --- /dev/null +++ b/pengine/test10/ticket-primitive-demote-3-revoked.xml @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-fence-1-initial.dot b/pengine/test10/ticket-primitive-fence-1-initial.dot new file mode 100644 index 0000000000..32068cd299 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-fence-1-initial.exp b/pengine/test10/ticket-primitive-fence-1-initial.exp new file mode 100644 index 0000000000..d0e174955f --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-fence-1-initial.scores b/pengine/test10/ticket-primitive-fence-1-initial.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-1-initial.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-fence-1-initial.xml b/pengine/test10/ticket-primitive-fence-1-initial.xml new file mode 100644 index 0000000000..e38cbed293 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-1-initial.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-fence-2-granted.dot b/pengine/test10/ticket-primitive-fence-2-granted.dot new file mode 100644 index 0000000000..1f4f142966 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-2-granted.dot @@ -0,0 +1,7 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-fence-2-granted.exp b/pengine/test10/ticket-primitive-fence-2-granted.exp new file mode 100644 index 0000000000..75606ccefb --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-2-granted.exp @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-fence-2-granted.scores b/pengine/test10/ticket-primitive-fence-2-granted.scores new file mode 100644 index 0000000000..5c5889bb39 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-2-granted.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-fence-2-granted.xml b/pengine/test10/ticket-primitive-fence-2-granted.xml new file mode 100644 index 0000000000..5ed4f6fece --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-2-granted.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-fence-3-revoked.dot b/pengine/test10/ticket-primitive-fence-3-revoked.dot new file mode 100644 index 0000000000..ff183c8d96 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-3-revoked.dot @@ -0,0 +1,15 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="orange" ] +"stonith node2" -> "all_stopped" [ style = bold] +"stonith node2" -> "rsc1_stop_0 node2" [ style = bold] +"stonith node2" -> "stonith_complete" [ style = bold] +"stonith node2" [ style=bold color="green" fontcolor="black" ] +"stonith_complete" -> "rsc1_stop_0 node2" [ style = bold] +"stonith_complete" [ style=bold color="green" fontcolor="orange" ] +"stonith_up" -> "stonith node2" [ style = bold] +"stonith_up" -> "stonith_complete" [ style = bold] +"stonith_up" [ style=bold color="green" fontcolor="orange" ] +} diff --git a/pengine/test10/ticket-primitive-fence-3-revoked.exp b/pengine/test10/ticket-primitive-fence-3-revoked.exp new file mode 100644 index 0000000000..c30a389411 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-3-revoked.exp @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-fence-3-revoked.scores b/pengine/test10/ticket-primitive-fence-3-revoked.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-3-revoked.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-fence-3-revoked.xml b/pengine/test10/ticket-primitive-fence-3-revoked.xml new file mode 100644 index 0000000000..4b7225d757 --- /dev/null +++ b/pengine/test10/ticket-primitive-fence-3-revoked.xml @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-freeze-1-initial.dot b/pengine/test10/ticket-primitive-freeze-1-initial.dot new file mode 100644 index 0000000000..32068cd299 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-freeze-1-initial.exp b/pengine/test10/ticket-primitive-freeze-1-initial.exp new file mode 100644 index 0000000000..d0e174955f --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-freeze-1-initial.scores b/pengine/test10/ticket-primitive-freeze-1-initial.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-1-initial.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-freeze-1-initial.xml b/pengine/test10/ticket-primitive-freeze-1-initial.xml new file mode 100644 index 0000000000..3902f10710 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-1-initial.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-freeze-2-granted.dot b/pengine/test10/ticket-primitive-freeze-2-granted.dot new file mode 100644 index 0000000000..1f4f142966 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-2-granted.dot @@ -0,0 +1,7 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-freeze-2-granted.exp b/pengine/test10/ticket-primitive-freeze-2-granted.exp new file mode 100644 index 0000000000..75606ccefb --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-2-granted.exp @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-freeze-2-granted.scores b/pengine/test10/ticket-primitive-freeze-2-granted.scores new file mode 100644 index 0000000000..5c5889bb39 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-2-granted.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-freeze-2-granted.xml b/pengine/test10/ticket-primitive-freeze-2-granted.xml new file mode 100644 index 0000000000..8f21e292e8 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-2-granted.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-freeze-3-revoked.dot b/pengine/test10/ticket-primitive-freeze-3-revoked.dot new file mode 100644 index 0000000000..5a800ee418 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-3-revoked.dot @@ -0,0 +1,4 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-freeze-3-revoked.exp b/pengine/test10/ticket-primitive-freeze-3-revoked.exp new file mode 100644 index 0000000000..f067af55c9 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-3-revoked.exp @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-freeze-3-revoked.scores b/pengine/test10/ticket-primitive-freeze-3-revoked.scores new file mode 100644 index 0000000000..5c5889bb39 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-3-revoked.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-freeze-3-revoked.xml b/pengine/test10/ticket-primitive-freeze-3-revoked.xml new file mode 100644 index 0000000000..9c7b7f2845 --- /dev/null +++ b/pengine/test10/ticket-primitive-freeze-3-revoked.xml @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-stop-1-initial.dot b/pengine/test10/ticket-primitive-stop-1-initial.dot new file mode 100644 index 0000000000..32068cd299 --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-1-initial.dot @@ -0,0 +1,11 @@ +digraph "g" { +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-stop-1-initial.exp b/pengine/test10/ticket-primitive-stop-1-initial.exp new file mode 100644 index 0000000000..d0e174955f --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-1-initial.exp @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-stop-1-initial.scores b/pengine/test10/ticket-primitive-stop-1-initial.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-1-initial.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-stop-1-initial.xml b/pengine/test10/ticket-primitive-stop-1-initial.xml new file mode 100644 index 0000000000..cf413f613b --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-1-initial.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-stop-2-granted.dot b/pengine/test10/ticket-primitive-stop-2-granted.dot new file mode 100644 index 0000000000..1f4f142966 --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-2-granted.dot @@ -0,0 +1,7 @@ +digraph "g" { +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-stop-2-granted.exp b/pengine/test10/ticket-primitive-stop-2-granted.exp new file mode 100644 index 0000000000..75606ccefb --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-2-granted.exp @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-stop-2-granted.scores b/pengine/test10/ticket-primitive-stop-2-granted.scores new file mode 100644 index 0000000000..5c5889bb39 --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-2-granted.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-stop-2-granted.xml b/pengine/test10/ticket-primitive-stop-2-granted.xml new file mode 100644 index 0000000000..190fe6cecd --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-2-granted.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-stop-3-revoked.dot b/pengine/test10/ticket-primitive-stop-3-revoked.dot new file mode 100644 index 0000000000..026eef8064 --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-3-revoked.dot @@ -0,0 +1,7 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-primitive-stop-3-revoked.exp b/pengine/test10/ticket-primitive-stop-3-revoked.exp new file mode 100644 index 0000000000..0cd9d6157e --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-3-revoked.exp @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-primitive-stop-3-revoked.scores b/pengine/test10/ticket-primitive-stop-3-revoked.scores new file mode 100644 index 0000000000..ed3f3cdfd6 --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-3-revoked.scores @@ -0,0 +1,5 @@ +Allocation scores: +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 diff --git a/pengine/test10/ticket-primitive-stop-3-revoked.xml b/pengine/test10/ticket-primitive-stop-3-revoked.xml new file mode 100644 index 0000000000..3f535f169d --- /dev/null +++ b/pengine/test10/ticket-primitive-stop-3-revoked.xml @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.dot b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.dot new file mode 100644 index 0000000000..fe1ab9f5db --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.dot @@ -0,0 +1,38 @@ +digraph "g" { +"ms5_running_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_start_0" -> "ms5_running_0" [ style = bold] +"ms5_start_0" -> "rsc5:0_start_0 node2" [ style = bold] +"ms5_start_0" -> "rsc5:1_start_0 node1" [ style = bold] +"ms5_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" -> "rsc5:0_start_0 node2" [ style = bold] +"probe_complete" -> "rsc5:1_start_0 node1" [ style = bold] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc3_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc3_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc3_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc4:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc4:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc5:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc5:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc5:0_start_0 node2" -> "ms5_running_0" [ style = bold] +"rsc5:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc5:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_start_0 node1" -> "ms5_running_0" [ style = bold] +"rsc5:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.exp b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.exp new file mode 100644 index 0000000000..1eedbd074f --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.exp @@ -0,0 +1,214 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.scores b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.scores new file mode 100644 index 0000000000..ccce5542a9 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: -INFINITY +clone_color: rsc4:0 allocation score on node2: -INFINITY +clone_color: rsc4:1 allocation score on node1: -INFINITY +clone_color: rsc4:1 allocation score on node2: -INFINITY +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 0 +clone_color: rsc5:1 allocation score on node1: 0 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +group_color: rsc3 allocation score on node1: -INFINITY +group_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc3 allocation score on node1: -INFINITY +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: -INFINITY +native_color: rsc4:0 allocation score on node2: -INFINITY +native_color: rsc4:1 allocation score on node1: -INFINITY +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 0 +native_color: rsc5:1 allocation score on node1: 0 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.xml b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.xml new file mode 100644 index 0000000000..36c346c07a --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-1-initial.xml @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.dot b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.dot new file mode 100644 index 0000000000..29966395d5 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.dot @@ -0,0 +1,39 @@ +digraph "g" { +"clone4_running_0" [ style=bold color="green" fontcolor="orange" ] +"clone4_start_0" -> "clone4_running_0" [ style = bold] +"clone4_start_0" -> "rsc4:0_start_0 node2" [ style = bold] +"clone4_start_0" -> "rsc4:1_start_0 node1" [ style = bold] +"clone4_start_0" [ style=bold color="green" fontcolor="orange" ] +"group2_running_0" [ style=bold color="green" fontcolor="orange" ] +"group2_start_0" -> "group2_running_0" [ style = bold] +"group2_start_0" -> "rsc2_start_0 node1" [ style = bold] +"group2_start_0" -> "rsc3_start_0 node1" [ style = bold] +"group2_start_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_promote_0" -> "rsc5:1_promote_0 node1" [ style = bold] +"ms5_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_start_0 node1" -> "group2_running_0" [ style = bold] +"rsc2_start_0 node1" -> "rsc2_monitor_5000 node1" [ style = bold] +"rsc2_start_0 node1" -> "rsc3_start_0 node1" [ style = bold] +"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_start_0 node1" -> "group2_running_0" [ style = bold] +"rsc3_start_0 node1" -> "rsc3_monitor_5000 node1" [ style = bold] +"rsc3_start_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_start_0 node2" -> "clone4_running_0" [ style = bold] +"rsc4:0_start_0 node2" -> "rsc4:0_monitor_5000 node2" [ style = bold] +"rsc4:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:1_start_0 node1" -> "clone4_running_0" [ style = bold] +"rsc4:1_start_0 node1" -> "rsc4:1_monitor_5000 node1" [ style = bold] +"rsc4:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_promote_0 node1" -> "ms5_promoted_0" [ style = bold] +"rsc5:1_promote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.exp b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.exp new file mode 100644 index 0000000000..6fe02b7053 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.exp @@ -0,0 +1,233 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.scores b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.scores new file mode 100644 index 0000000000..95d4445e43 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: 0 +clone_color: rsc4:0 allocation score on node2: 0 +clone_color: rsc4:1 allocation score on node1: 0 +clone_color: rsc4:1 allocation score on node2: 0 +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 1 +clone_color: rsc5:1 allocation score on node1: 1 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +group_color: rsc3 allocation score on node1: 0 +group_color: rsc3 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: 0 +native_color: rsc2 allocation score on node2: 0 +native_color: rsc3 allocation score on node1: 0 +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: 0 +native_color: rsc4:0 allocation score on node2: 0 +native_color: rsc4:1 allocation score on node1: 0 +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 1 +native_color: rsc5:1 allocation score on node1: 1 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: 9 diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.xml b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.xml new file mode 100644 index 0000000000..b0a8ffdbc4 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-2-granted.xml @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.dot b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.dot new file mode 100644 index 0000000000..c489cf8dbb --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.dot @@ -0,0 +1,36 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"clone4_stop_0" -> "clone4_stopped_0" [ style = bold] +"clone4_stop_0" -> "rsc4:0_stop_0 node2" [ style = bold] +"clone4_stop_0" -> "rsc4:1_stop_0 node1" [ style = bold] +"clone4_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone4_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"group2_stop_0" -> "group2_stopped_0" [ style = bold] +"group2_stop_0" -> "rsc2_stop_0 node1" [ style = bold] +"group2_stop_0" -> "rsc3_stop_0 node1" [ style = bold] +"group2_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group2_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_demote_0" -> "ms5_demoted_0" [ style = bold] +"ms5_demote_0" -> "rsc5:1_demote_0 node1" [ style = bold] +"ms5_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc2_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"rsc2_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc3_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"rsc3_stop_0 node1" -> "rsc2_stop_0 node1" [ style = bold] +"rsc3_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc4:0_stop_0 node2" -> "clone4_stopped_0" [ style = bold] +"rsc4:0_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc4:1_stop_0 node1" -> "clone4_stopped_0" [ style = bold] +"rsc4:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_demote_0 node1" -> "ms5_demoted_0" [ style = bold] +"rsc5:1_demote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.exp b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.exp new file mode 100644 index 0000000000..ef47e0bd08 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.exp @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.scores b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.scores new file mode 100644 index 0000000000..4fd810123c --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: -INFINITY +clone_color: rsc4:0 allocation score on node2: -INFINITY +clone_color: rsc4:1 allocation score on node1: -INFINITY +clone_color: rsc4:1 allocation score on node2: -INFINITY +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 1 +clone_color: rsc5:1 allocation score on node1: 1 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +group_color: rsc3 allocation score on node1: -INFINITY +group_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc3 allocation score on node1: -INFINITY +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: -INFINITY +native_color: rsc4:0 allocation score on node2: -INFINITY +native_color: rsc4:1 allocation score on node1: -INFINITY +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 1 +native_color: rsc5:1 allocation score on node1: 1 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.xml b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.xml new file mode 100644 index 0000000000..96396bbdcf --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-1-ticket-3-revoked.xml @@ -0,0 +1,145 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.dot b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.dot new file mode 100644 index 0000000000..fe1ab9f5db --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.dot @@ -0,0 +1,38 @@ +digraph "g" { +"ms5_running_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_start_0" -> "ms5_running_0" [ style = bold] +"ms5_start_0" -> "rsc5:0_start_0 node2" [ style = bold] +"ms5_start_0" -> "rsc5:1_start_0 node1" [ style = bold] +"ms5_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" -> "probe_complete" [ style = bold] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" -> "probe_complete" [ style = bold] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"probe_complete" -> "rsc5:0_start_0 node2" [ style = bold] +"probe_complete" -> "rsc5:1_start_0 node1" [ style = bold] +"probe_complete" [ style=bold color="green" fontcolor="orange" ] +"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc3_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc3_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc3_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc4:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc4:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc5:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] +"rsc5:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc5:0_start_0 node2" -> "ms5_running_0" [ style = bold] +"rsc5:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] +"rsc5:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_start_0 node1" -> "ms5_running_0" [ style = bold] +"rsc5:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.exp b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.exp new file mode 100644 index 0000000000..1eedbd074f --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.exp @@ -0,0 +1,214 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.scores b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.scores new file mode 100644 index 0000000000..ccce5542a9 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: -INFINITY +clone_color: rsc4:0 allocation score on node2: -INFINITY +clone_color: rsc4:1 allocation score on node1: -INFINITY +clone_color: rsc4:1 allocation score on node2: -INFINITY +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 0 +clone_color: rsc5:1 allocation score on node1: 0 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +group_color: rsc3 allocation score on node1: -INFINITY +group_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc3 allocation score on node1: -INFINITY +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: -INFINITY +native_color: rsc4:0 allocation score on node2: -INFINITY +native_color: rsc4:1 allocation score on node1: -INFINITY +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 0 +native_color: rsc5:1 allocation score on node1: 0 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.xml b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.xml new file mode 100644 index 0000000000..7160889d39 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-1-initial.xml @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.dot b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.dot new file mode 100644 index 0000000000..9abc1362a5 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.dot @@ -0,0 +1,21 @@ +digraph "g" { +"group2_running_0" [ style=bold color="green" fontcolor="orange" ] +"group2_start_0" -> "group2_running_0" [ style = bold] +"group2_start_0" -> "rsc2_start_0 node1" [ style = bold] +"group2_start_0" -> "rsc3_start_0 node1" [ style = bold] +"group2_start_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold] +"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc2_start_0 node1" -> "group2_running_0" [ style = bold] +"rsc2_start_0 node1" -> "rsc2_monitor_5000 node1" [ style = bold] +"rsc2_start_0 node1" -> "rsc3_start_0 node1" [ style = bold] +"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_start_0 node1" -> "group2_running_0" [ style = bold] +"rsc3_start_0 node1" -> "rsc3_monitor_5000 node1" [ style = bold] +"rsc3_start_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.exp b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.exp new file mode 100644 index 0000000000..6c3541ab83 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.exp @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.scores b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.scores new file mode 100644 index 0000000000..4f5364e3c4 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: -INFINITY +clone_color: rsc4:0 allocation score on node2: -INFINITY +clone_color: rsc4:1 allocation score on node1: -INFINITY +clone_color: rsc4:1 allocation score on node2: -INFINITY +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 1 +clone_color: rsc5:1 allocation score on node1: 1 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +group_color: rsc3 allocation score on node1: 0 +group_color: rsc3 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: 0 +native_color: rsc2 allocation score on node2: 0 +native_color: rsc3 allocation score on node1: 0 +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: -INFINITY +native_color: rsc4:0 allocation score on node2: -INFINITY +native_color: rsc4:1 allocation score on node1: -INFINITY +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 1 +native_color: rsc5:1 allocation score on node1: 1 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.xml b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.xml new file mode 100644 index 0000000000..e416852696 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-2-granted.xml @@ -0,0 +1,136 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.dot b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.dot new file mode 100644 index 0000000000..8bec10e3ad --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.dot @@ -0,0 +1,22 @@ +digraph "g" { +"clone4_running_0" [ style=bold color="green" fontcolor="orange" ] +"clone4_start_0" -> "clone4_running_0" [ style = bold] +"clone4_start_0" -> "rsc4:0_start_0 node2" [ style = bold] +"clone4_start_0" -> "rsc4:1_start_0 node1" [ style = bold] +"clone4_start_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_promote_0" -> "rsc5:1_promote_0 node1" [ style = bold] +"ms5_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_monitor_5000 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_start_0 node2" -> "clone4_running_0" [ style = bold] +"rsc4:0_start_0 node2" -> "rsc4:0_monitor_5000 node2" [ style = bold] +"rsc4:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:1_start_0 node1" -> "clone4_running_0" [ style = bold] +"rsc4:1_start_0 node1" -> "rsc4:1_monitor_5000 node1" [ style = bold] +"rsc4:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_promote_0 node1" -> "ms5_promoted_0" [ style = bold] +"rsc5:1_promote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.exp b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.exp new file mode 100644 index 0000000000..1e21b0afe2 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.exp @@ -0,0 +1,130 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.scores b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.scores new file mode 100644 index 0000000000..95d4445e43 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: 0 +clone_color: rsc4:0 allocation score on node2: 0 +clone_color: rsc4:1 allocation score on node1: 0 +clone_color: rsc4:1 allocation score on node2: 0 +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 1 +clone_color: rsc5:1 allocation score on node1: 1 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: 0 +group_color: rsc2 allocation score on node2: 0 +group_color: rsc3 allocation score on node1: 0 +group_color: rsc3 allocation score on node2: 0 +native_color: rsc1 allocation score on node1: 0 +native_color: rsc1 allocation score on node2: 0 +native_color: rsc2 allocation score on node1: 0 +native_color: rsc2 allocation score on node2: 0 +native_color: rsc3 allocation score on node1: 0 +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: 0 +native_color: rsc4:0 allocation score on node2: 0 +native_color: rsc4:1 allocation score on node1: 0 +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 1 +native_color: rsc5:1 allocation score on node1: 1 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: 9 diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.xml b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.xml new file mode 100644 index 0000000000..dd8f577186 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-3-granted.xml @@ -0,0 +1,143 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.dot b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.dot new file mode 100644 index 0000000000..c489cf8dbb --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.dot @@ -0,0 +1,36 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"clone4_stop_0" -> "clone4_stopped_0" [ style = bold] +"clone4_stop_0" -> "rsc4:0_stop_0 node2" [ style = bold] +"clone4_stop_0" -> "rsc4:1_stop_0 node1" [ style = bold] +"clone4_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone4_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"group2_stop_0" -> "group2_stopped_0" [ style = bold] +"group2_stop_0" -> "rsc2_stop_0 node1" [ style = bold] +"group2_stop_0" -> "rsc3_stop_0 node1" [ style = bold] +"group2_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group2_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_demote_0" -> "ms5_demoted_0" [ style = bold] +"ms5_demote_0" -> "rsc5:1_demote_0 node1" [ style = bold] +"ms5_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms5_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"probe_complete node1" [ style=bold color="green" fontcolor="black" ] +"probe_complete node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc2_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc2_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"rsc2_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc3_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc3_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"rsc3_stop_0 node1" -> "rsc2_stop_0 node1" [ style = bold] +"rsc3_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc4:0_stop_0 node2" -> "all_stopped" [ style = bold] +"rsc4:0_stop_0 node2" -> "clone4_stopped_0" [ style = bold] +"rsc4:0_stop_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc4:1_stop_0 node1" -> "all_stopped" [ style = bold] +"rsc4:1_stop_0 node1" -> "clone4_stopped_0" [ style = bold] +"rsc4:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] +"rsc5:1_demote_0 node1" -> "ms5_demoted_0" [ style = bold] +"rsc5:1_demote_0 node1" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.exp b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.exp new file mode 100644 index 0000000000..ef47e0bd08 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.exp @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.scores b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.scores new file mode 100644 index 0000000000..4fd810123c --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.scores @@ -0,0 +1,37 @@ +Allocation scores: +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: ms5 allocation score on node1: 0 +clone_color: ms5 allocation score on node2: 0 +clone_color: rsc4:0 allocation score on node1: -INFINITY +clone_color: rsc4:0 allocation score on node2: -INFINITY +clone_color: rsc4:1 allocation score on node1: -INFINITY +clone_color: rsc4:1 allocation score on node2: -INFINITY +clone_color: rsc5:0 allocation score on node1: 0 +clone_color: rsc5:0 allocation score on node2: 1 +clone_color: rsc5:1 allocation score on node1: 1 +clone_color: rsc5:1 allocation score on node2: 0 +group_color: group2 allocation score on node1: 0 +group_color: group2 allocation score on node2: 0 +group_color: rsc2 allocation score on node1: -INFINITY +group_color: rsc2 allocation score on node2: -INFINITY +group_color: rsc3 allocation score on node1: -INFINITY +group_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc1 allocation score on node1: -INFINITY +native_color: rsc1 allocation score on node2: -INFINITY +native_color: rsc2 allocation score on node1: -INFINITY +native_color: rsc2 allocation score on node2: -INFINITY +native_color: rsc3 allocation score on node1: -INFINITY +native_color: rsc3 allocation score on node2: -INFINITY +native_color: rsc4:0 allocation score on node1: -INFINITY +native_color: rsc4:0 allocation score on node2: -INFINITY +native_color: rsc4:1 allocation score on node1: -INFINITY +native_color: rsc4:1 allocation score on node2: -INFINITY +native_color: rsc5:0 allocation score on node1: 0 +native_color: rsc5:0 allocation score on node2: 1 +native_color: rsc5:1 allocation score on node1: 1 +native_color: rsc5:1 allocation score on node2: -INFINITY +native_color: rsc_stonith allocation score on node1: 0 +native_color: rsc_stonith allocation score on node2: 0 +rsc5:0 promotion score on node2: -1 +rsc5:1 promotion score on node1: -INFINITY diff --git a/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.xml b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.xml new file mode 100644 index 0000000000..7ebbc667b5 --- /dev/null +++ b/pengine/test10/ticket-rsc-sets-2-tickets-4-revoked.xml @@ -0,0 +1,150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/utils.h b/pengine/utils.h index ce7d00e6c8..2be2c6b2af 100644 --- a/pengine/utils.h +++ b/pengine/utils.h @@ -1,71 +1,74 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef PENGINE_AUTILS__H #define PENGINE_AUTILS__H /* Constraint helper functions */ extern rsc_colocation_t *invert_constraint(rsc_colocation_t *constraint); extern rsc_to_node_t *copy_constraint(rsc_to_node_t *constraint); extern void print_rsc_to_node( const char *pre_text, rsc_to_node_t *cons, gboolean details); extern void print_rsc_colocation( const char *pre_text, rsc_colocation_t *cons, gboolean details); extern rsc_to_node_t *rsc2node_new( const char *id, resource_t *rsc, int weight, node_t *node, pe_working_set_t *data_set); extern void pe_free_rsc_to_node(GListPtr constraints); extern void pe_free_ordering(GListPtr constraints); extern const char *ordering_type2text(enum pe_ordering type); extern gboolean rsc_colocation_new( const char *id, const char *node_attr, int score, resource_t *rsc_lh, resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set); +extern gboolean rsc_ticket_new(const char *id, resource_t *rsc_lh, ticket_t *ticket, + const char *state_lh, const char *loss_policy, pe_working_set_t *data_set); + extern rsc_to_node_t *generate_location_rule( resource_t *rsc, xmlNode *location_rule, pe_working_set_t *data_set); extern gint sort_node_weight(gconstpointer a, gconstpointer b, gpointer data_set); extern gboolean can_run_resources(const node_t *node); extern gboolean native_assign_node(resource_t *rsc, GListPtr candidates, node_t *chosen, gboolean force); void native_deallocate(resource_t *rsc); extern gboolean order_actions(action_t *lh_action, action_t *rh_action, enum pe_ordering order); extern void log_action(unsigned int log_level, const char *pre_text, action_t *action, gboolean details); extern action_t *get_pseudo_op(const char *name, pe_working_set_t *data_set); extern gboolean can_run_any(GHashTable *nodes); extern resource_t *find_compatible_child( resource_t *local_child, resource_t *rsc, enum rsc_role_e filter, gboolean current); #define STONITH_UP "stonith_up" #define STONITH_DONE "stonith_complete" #define ALL_STOPPED "all_stopped" #define LOAD_STOPPED "load_stopped" #endif diff --git a/tools/Makefile.am b/tools/Makefile.am index 353a396f9b..534af39ea4 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -1,151 +1,151 @@ # # Copyright (C) 2004-2009 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \ -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl COMMONLIBS = \ $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cib/libcib.la \ $(CURSESLIBS) $(CLUSTERLIBS) headerdir = $(pkgincludedir)/crm header_HEADERS = attrd.h pcmkdir = $(datadir)/$(PACKAGE) pcmk_DATA = report.common report.collector -sbin_SCRIPTS = crm_report crm_standby crm_master crm_failcount +sbin_SCRIPTS = crm_report crm_standby crm_master crm_failcount crm_ticket EXTRA_DIST = $(sbin_SCRIPTS) halibdir = $(CRM_DAEMON_DIR) halib_SCRIPTS = hb2openais.sh halib_PROGRAMS = attrd halib_PYTHON = crm_primitive.py hb2openais-helper.py sbin_PROGRAMS = crm_simulate crmadmin cibadmin crm_node crm_attribute crm_resource crm_verify \ crm_uuid crm_shadow attrd_updater crm_diff crm_mon iso8601 testdir = $(datadir)/$(PACKAGE)/tests test_SCRIPTS = coverage.sh clidir = $(testdir)/cli cli_SCRIPTS = regression.sh cli_DATA = regression.exp cli.supp if BUILD_SERVICELOG sbin_PROGRAMS += notifyServicelogEvent endif if BUILD_OPENIPMI_SERVICELOG sbin_PROGRAMS += ipmiservicelogd endif if BUILD_HELP -man8_MANS = $(sbin_PROGRAMS:%=%.8) crm_report.8 crm_failcount.8 crm_master.8 crm_standby.8 +man8_MANS = $(sbin_PROGRAMS:%=%.8) crm_report.8 crm_failcount.8 crm_master.8 crm_standby.8 crm_ticket.8 endif ## SOURCES noinst_HEADERS = crmadmin_SOURCES = crmadmin.c crmadmin_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(COMMONLIBS) $(CLUSTERLIBS) crm_uuid_SOURCES = crm_uuid.c crm_uuid_LDADD = $(top_builddir)/lib/common/libcrmcluster.la cibadmin_SOURCES = cibadmin.c cibadmin_LDADD = $(COMMONLIBS) crm_shadow_SOURCES = cib_shadow.c crm_shadow_LDADD = $(COMMONLIBS) crm_node_SOURCES = ccm_epoche.c crm_node_LDADD = $(top_builddir)/lib/common/libcrmcluster.la \ $(COMMONLIBS) $(CLUSTERLIBS) crm_simulate_SOURCES = crm_inject.c crm_simulate_CFLAGS = -I$(top_srcdir)/pengine crm_simulate_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/pengine/libpengine.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/transition/libtransitioner.la \ $(COMMONLIBS) crm_diff_SOURCES = xml_diff.c crm_diff_LDADD = $(COMMONLIBS) crm_mon_SOURCES = crm_mon.c crm_mon_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(COMMONLIBS) $(SNMPLIBS) $(ESMTPLIBS) -llrm # Arguments could be made that this should live in crm/pengine crm_verify_SOURCES = crm_verify.c crm_verify_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/pengine/libpengine.la \ $(COMMONLIBS) crm_attribute_SOURCES = crm_attribute.c crm_attribute_LDADD = $(COMMONLIBS) crm_resource_SOURCES = crm_resource.c crm_resource_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/pengine/libpengine.la \ $(COMMONLIBS) iso8601_SOURCES = test.iso8601.c iso8601_LDADD = $(COMMONLIBS) attrd_SOURCES = attrd.c attrd_LDADD = $(top_builddir)/lib/common/libcrmcluster.la $(COMMONLIBS) #pingd_SOURCES = pingd.c #pingd_LDADD = $(COMMONLIBS) attrd_updater_SOURCES = attrd_updater.c attrd_updater_LDADD = $(COMMONLIBS) if BUILD_SERVICELOG notifyServicelogEvent_SOURCES = notifyServicelogEvent.c notifyServicelogEvent_CFLAGS = $(SERVICELOG_CFLAGS) notifyServicelogEvent_LDADD = $(top_builddir)/lib/common/libcrmcommon.la $(SERVICELOG_LIBS) endif if BUILD_OPENIPMI_SERVICELOG ipmiservicelogd_SOURCES = ipmiservicelogd.c ipmiservicelogd_CFLAGS = $(OPENIPMI_SERVICELOG_CFLAGS) $(SERVICELOG_CFLAGS) ipmiservicelogd_LDFLAGS = $(top_builddir)/lib/common/libcrmcommon.la $(OPENIPMI_SERVICELOG_LIBS) $(SERVICELOG_LIBS) endif %.8: % crm_attribute echo Creating $@ chmod a+x $(top_builddir)/tools/$< PATH=$$PATH:$(top_builddir)/tools $(HELP2MAN) --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(top_builddir)/tools/$< clean-generic: rm -f *.log *.debug *.xml *~ install-exec-local: uninstall-local: .PHONY: install-exec-hook diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c index a7c529f536..b7d83b3ec1 100644 --- a/tools/crm_attribute.c +++ b/tools/crm_attribute.c @@ -1,316 +1,316 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean BE_QUIET = FALSE; char command = 'G'; char *dest_uname = NULL; char *dest_node = NULL; char *set_name = NULL; char *attr_id = NULL; char *attr_name = NULL; const char *type = NULL; const char *rsc_id = NULL; const char *attr_value = NULL; const char *attr_default = NULL; const char *set_type = NULL; static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"quiet", 0, 0, 'q', "\tPrint only the value on stdout\n"}, {"name", 1, 0, 'n', "Name of the attribute/option to operate on"}, {"-spacer-", 0, 0, '-', "\nCommands:"}, {"query", 0, 0, 'G', "\tQuery the current value of the attribute/option"}, {"update", 1, 0, 'v', "Update the value of the attribute/option"}, {"delete", 0, 0, 'D', "\tDelete the attribute/option"}, {"-spacer-", 0, 0, '-', "\nAdditional Options:"}, {"node", 1, 0, 'N', "Set an attribute for the named node (instead of a cluster option). See also: -l"}, {"type", 1, 0, 't', "Which part of the configuration to update/delete/query the option in."}, - {"-spacer-", 0, 0, '-', "\t\t\tValid values: crm_config, rsc_defaults, op_defaults"}, + {"-spacer-", 0, 0, '-', "\t\t\tValid values: crm_config, rsc_defaults, op_defaults, tickets"}, {"lifetime", 1, 0, 'l', "Lifetime of the node attribute."}, {"-spacer-", 0, 0, '-', "\t\t\tValid values: reboot, forever"}, {"utilization", 0, 0, 'z', "Set an utilization attribute for the node."}, {"set-name", 1, 0, 's', "(Advanced) The attribute set in which to place the value"}, {"id", 1, 0, 'i', "\t(Advanced) The ID used to identify the attribute"}, {"default", 1, 0, 'd', "(Advanced) The default value to display if none is found in the configuration"}, {"inhibit-policy-engine", 0, 0, '!', NULL, 1}, /* legacy */ {"quiet", 0, 0, 'Q', NULL, 1}, {"node-uname", 1, 0, 'U', NULL, 1}, {"node-uuid", 1, 0, 'u', NULL, 1}, {"get-value", 0, 0, 'G', NULL, 1}, {"delete-attr", 0, 0, 'D', NULL, 1}, {"attr-value", 1, 0, 'v', NULL, 1}, {"attr-name", 1, 0, 'n', NULL, 1}, {"attr-id", 1, 0, 'i', NULL, 1}, {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "Add a new attribute called 'location' with the value of 'office' for host 'myhost':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_attribute --node myhost --name location --update office", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Query the value of the 'location' node attribute for host myhost:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_attribute --node myhost --name location --query", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Change the value of the 'location' node attribute for host myhost:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_attribute --node myhost --name location --update backoffice", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Delete the 'location' node attribute for the host myhost:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_attribute --node myhost --name location --delete", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Query the value of the cluster-delay cluster option:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_attribute --type crm_config --name cluster-delay --query", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Query the value of the cluster-delay cluster option. Only print the value:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_attribute --type crm_config --name cluster-delay --query --quiet", pcmk_option_example}, {0, 0, 0, 0} }; int main(int argc, char **argv) { cib_t * the_cib = NULL; enum cib_errors rc = cib_ok; int cib_opts = cib_sync_call; int argerr = 0; int flag; int option_index = 0; crm_log_init_quiet(NULL, LOG_ERR, FALSE, FALSE, argc, argv); crm_set_options("V?$GDQqN:U:u:s:n:v:l:t:zi:!r:d:", "command -n attribute [options]", long_options, "Manage node's attributes and cluster options." "\n\nAllows node attributes and cluster options to be queried, modified and deleted.\n"); if(argc < 2) { crm_help('?', LSB_EXIT_EINVAL); } while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch(flag) { case 'V': cl_log_enable_stderr(TRUE); alter_debug(DEBUG_INC); break; case '$': case '?': crm_help(flag, LSB_EXIT_OK); break; case 'D': case 'G': case 'v': command = flag; attr_value = optarg; break; case 'q': case 'Q': BE_QUIET = TRUE; break; case 'U': case 'N': dest_uname = crm_strdup(optarg); break; case 'u': dest_node = crm_strdup(optarg); break; case 's': set_name = crm_strdup(optarg); break; case 'l': case 't': type = optarg; break; case 'z': type = XML_CIB_TAG_NODES; set_type = XML_TAG_UTILIZATION; break; case 'n': attr_name = crm_strdup(optarg); break; case 'i': attr_id = crm_strdup(optarg); break; case 'r': rsc_id = optarg; break; case 'd': attr_default = optarg; break; case '!': crm_warn("Inhibiting notifications for this update"); cib_opts |= cib_inhibit_notify; break; default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if(BE_QUIET == FALSE) { cl_log_args(argc, argv); } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', LSB_EXIT_GENERIC); } the_cib = cib_new(); rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); if(rc != cib_ok) { fprintf(stderr, "Error signing on to the CIB service: %s\n", cib_error2string(rc)); return rc; } if(safe_str_eq(type, "reboot")) { type = XML_CIB_TAG_STATUS; } else if(safe_str_eq(type, "forever")) { type = XML_CIB_TAG_NODES; } if(type == NULL && dest_uname == NULL) { /* we're updating cluster options - dont populate dest_node */ type = XML_CIB_TAG_CRMCONFIG; - } else { + } else if(safe_str_neq(type, XML_CIB_TAG_TICKETS)){ determine_host(the_cib, &dest_uname, &dest_node); } if(rc != cib_ok) { crm_info("Error during setup of %s=%s update", attr_name, command=='D'?"":attr_value); } else if( (command=='v' || command=='D') - && safe_str_eq(type, XML_CIB_TAG_STATUS) + && (safe_str_eq(type, XML_CIB_TAG_STATUS) || safe_str_eq(type, XML_CIB_TAG_TICKETS)) && attrd_lazy_update(command, dest_uname, attr_name, attr_value, type, set_name, NULL)) { crm_info("Update %s=%s sent via attrd", attr_name, command=='D'?"":attr_value); } else if(command=='D') { rc = delete_attr(the_cib, cib_opts, type, dest_node, set_type, set_name, attr_id, attr_name, attr_value, TRUE); if(rc == cib_NOTEXISTS) { /* Nothing to delete... * which means its not there... * which is what the admin wanted */ rc = cib_ok; } else if(rc != cib_missing_data && safe_str_eq(crm_system_name, "crm_failcount")) { char *now_s = NULL; time_t now = time(NULL); now_s = crm_itoa(now); update_attr(the_cib, cib_sync_call, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, TRUE); crm_free(now_s); } } else if(command=='v') { CRM_LOG_ASSERT(type != NULL); CRM_LOG_ASSERT(attr_name != NULL); CRM_LOG_ASSERT(attr_value != NULL); rc = update_attr(the_cib, cib_opts, type, dest_node, set_type, set_name, attr_id, attr_name, attr_value, TRUE); } else /* query */ { char *read_value = NULL; rc = read_attr(the_cib, type, dest_node, set_type, set_name, attr_id, attr_name, &read_value, TRUE); if(rc == cib_NOTEXISTS && attr_default) { read_value = crm_strdup(attr_default); rc = cib_ok; } crm_info("Read %s=%s %s%s", attr_name, crm_str(read_value), set_name?"in ":"", set_name?set_name:""); if(rc == cib_missing_data) { rc = cib_ok; } else if(BE_QUIET == FALSE) { fprintf(stdout, "%s%s %s%s %s%s value=%s\n", type?"scope=":"", type?type:"", attr_id?"id=":"", attr_id?attr_id:"", attr_name?"name=":"", attr_name?attr_name:"", read_value?read_value:"(null)"); } else if(read_value != NULL) { fprintf(stdout, "%s\n", read_value); } } if(rc == cib_missing_data) { printf("Please choose from one of the matches above and suppy the 'id' with --attr-id\n"); } else if(rc != cib_ok) { fprintf(stderr, "Error performing operation: %s\n", cib_error2string(rc)); } the_cib->cmds->signoff(the_cib); cib_delete(the_cib); crm_xml_cleanup(); return rc; } diff --git a/tools/crm_mon.c b/tools/crm_mon.c index 85b07e4928..4f730f4336 100644 --- a/tools/crm_mon.c +++ b/tools/crm_mon.c @@ -1,2014 +1,2015 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include <../lib/pengine/unpack.h> /* GMainLoop *mainloop = NULL; */ void wait_for_refresh(int offset, const char *prefix, int msec); void clean_up(int rc); void crm_diff_update(const char *event, xmlNode *msg); gboolean mon_refresh_display(gpointer user_data); int cib_connect(gboolean full); char *xml_file = NULL; char *as_html_file = NULL; char *pid_file = NULL; char *snmp_target = NULL; char *snmp_community = NULL; gboolean as_console = TRUE;; gboolean simple_status = FALSE; gboolean group_by_node = FALSE; gboolean inactive_resources = FALSE; gboolean web_cgi = FALSE; int reconnect_msec = 5000; gboolean daemonize = FALSE; GMainLoop *mainloop = NULL; guint timer_id = 0; GList *attr_list = NULL; const char *crm_mail_host = NULL; const char *crm_mail_prefix = NULL; const char *crm_mail_from = NULL; const char *crm_mail_to = NULL; const char *external_agent = NULL; const char *external_recipient = NULL; cib_t *cib = NULL; xmlNode *current_cib = NULL; gboolean one_shot = FALSE; gboolean has_warnings = FALSE; gboolean print_failcount = FALSE; gboolean print_operations = FALSE; gboolean print_timing = FALSE; gboolean print_nodes_attr = FALSE; gboolean print_last_updated = TRUE; gboolean print_last_change = TRUE; #define FILTER_STR {"shutdown", "terminate", "standby", "fail-count", \ "last-failure", "probe_complete", "#id", "#uname", \ "#is_dc", NULL} gboolean log_diffs = FALSE; gboolean log_updates = FALSE; long last_refresh = 0; crm_trigger_t *refresh_trigger = NULL; /* * 1.3.6.1.4.1.32723 has been assigned to the project by IANA * http://www.iana.org/assignments/enterprise-numbers */ #define PACEMAKER_PREFIX "1.3.6.1.4.1.32723" #define PACEMAKER_TRAP_PREFIX PACEMAKER_PREFIX ".1" #define snmp_crm_trap_oid PACEMAKER_TRAP_PREFIX #define snmp_crm_oid_node PACEMAKER_TRAP_PREFIX ".1" #define snmp_crm_oid_rsc PACEMAKER_TRAP_PREFIX ".2" #define snmp_crm_oid_task PACEMAKER_TRAP_PREFIX ".3" #define snmp_crm_oid_desc PACEMAKER_TRAP_PREFIX ".4" #define snmp_crm_oid_status PACEMAKER_TRAP_PREFIX ".5" #define snmp_crm_oid_rc PACEMAKER_TRAP_PREFIX ".6" #define snmp_crm_oid_trc PACEMAKER_TRAP_PREFIX ".7" #if CURSES_ENABLED # define print_dot() if(as_console) { \ printw("."); \ clrtoeol(); \ refresh(); \ } else { \ fprintf(stdout, "."); \ } #else # define print_dot() fprintf(stdout, "."); #endif #if CURSES_ENABLED # define print_as(fmt, args...) if(as_console) { \ printw(fmt, ##args); \ clrtoeol(); \ refresh(); \ } else { \ fprintf(stdout, fmt, ##args); \ } #else # define print_as(fmt, args...) fprintf(stdout, fmt, ##args); #endif static void blank_screen(void) { #if CURSES_ENABLED int lpc = 0; for(lpc = 0; lpc < LINES; lpc++) { move(lpc, 0); clrtoeol(); } move(0, 0); refresh(); #endif } static gboolean mon_timer_popped(gpointer data) { int rc = cib_ok; if(timer_id > 0) { g_source_remove(timer_id); } rc = cib_connect(TRUE); if(rc != cib_ok) { print_dot(); timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL); } return FALSE; } static void mon_cib_connection_destroy(gpointer user_data) { print_as("Connection to the CIB terminated\n"); if(cib) { print_as("Reconnecting..."); cib->cmds->signoff(cib); timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL); } return; } /* * Mainloop signal handler. */ static void mon_shutdown(int nsig) { clean_up(LSB_EXIT_OK); } #if ON_DARWIN # define sighandler_t sig_t #endif #if CURSES_ENABLED static sighandler_t ncurses_winch_handler; static void mon_winresize(int nsig) { static int not_done; int lines = 0, cols = 0; if(!not_done++) { if(ncurses_winch_handler) /* the original ncurses WINCH signal handler does the * magic of retrieving the new window size; * otherwise, we'd have to use ioctl or tgetent */ (*ncurses_winch_handler) (SIGWINCH); getmaxyx(stdscr, lines, cols); resizeterm(lines,cols); mainloop_set_trigger(refresh_trigger); } not_done--; } #endif int cib_connect(gboolean full) { int rc = cib_ok; static gboolean need_pass = TRUE; CRM_CHECK(cib != NULL, return cib_missing); if(getenv("CIB_passwd") != NULL) { need_pass = FALSE; } if(cib->state != cib_connected_query && cib->state != cib_connected_command) { crm_debug_4("Connecting to the CIB"); if(as_console && need_pass && cib->variant == cib_remote) { need_pass = FALSE; print_as("Password:"); } rc = cib->cmds->signon(cib, crm_system_name, cib_query); if(rc != cib_ok) { return rc; } current_cib = get_cib_copy(cib); mon_refresh_display(NULL); if(full) { if(rc == cib_ok) { rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy); if(rc == cib_NOTSUPPORTED) { print_as("Notification setup failed, won't be able to reconnect after failure"); if(as_console) { sleep(2); } rc = cib_ok; } } if(rc == cib_ok) { cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); } if(rc != cib_ok) { print_as("Notification setup failed, could not monitor CIB actions"); if(as_console) { sleep(2); } clean_up(-rc); } } } return rc; } static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"quiet", 0, 0, 'Q', "\tDisplay only essential output" }, {"-spacer-", 1, 0, '-', "\nModes:"}, {"as-html", 1, 0, 'h', "Write cluster status to the named file"}, {"web-cgi", 0, 0, 'w', "\tWeb mode with output suitable for cgi"}, {"simple-status", 0, 0, 's', "Display the cluster status once as a simple one line output (suitable for nagios)"}, {"snmp-traps", 1, 0, 'S', "Send SNMP traps to this station", !ENABLE_SNMP}, {"snmp-community", 1, 0, 'C', "Specify community for SNMP traps(default is NULL)", !ENABLE_SNMP}, {"mail-to", 1, 0, 'T', "Send Mail alerts to this user. See also --mail-from, --mail-host, --mail-prefix", !ENABLE_ESMTP}, {"-spacer-", 1, 0, '-', "\nDisplay Options:"}, {"group-by-node", 0, 0, 'n', "\tGroup resources by node" }, {"inactive", 0, 0, 'r', "\tDisplay inactive resources" }, {"failcounts", 0, 0, 'f', "\tDisplay resource fail counts"}, {"operations", 0, 0, 'o', "\tDisplay resource operation history" }, {"timing-details", 0, 0, 't', "\tDisplay resource operation history with timing details" }, {"show-node-attributes", 0, 0, 'A', "Display node attributes\n" }, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"interval", 1, 0, 'i', "\tUpdate frequency in seconds" }, {"one-shot", 0, 0, '1', "\tDisplay the cluster status once on the console and exit"}, {"disable-ncurses",0, 0, 'N', "\tDisable the use of ncurses", !CURSES_ENABLED}, {"daemonize", 0, 0, 'd', "\tRun in the background as a daemon"}, {"pid-file", 1, 0, 'p', "\t(Advanced) Daemon pid file location"}, {"mail-from", 1, 0, 'F', "\tMail alerts should come from the named user", !ENABLE_ESMTP}, {"mail-host", 1, 0, 'H', "\tMail alerts should be sent via the named host", !ENABLE_ESMTP}, {"mail-prefix", 1, 0, 'P', "Subjects for mail alerts should start with this string", !ENABLE_ESMTP}, {"external-agent", 1, 0, 'E', "A program to run when resource operations take place."}, {"external-recipient",1, 0, 'e', "A recipient for your program (assuming you want the program to send something to someone)."}, {"xml-file", 1, 0, 'x', NULL, 1}, {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "Display the cluster´s status on the console with updates as they occur:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the cluster´s status on the console just once then exit:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon -1", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display your cluster´s status, group resources by node, and include inactive resources in the list:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon --group-by-node --inactive", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it write the cluster´s status to an HTML file:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon --daemonize --as-html /path/to/docroot/filename.html", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it send email alerts:", pcmk_option_paragraph|!ENABLE_ESMTP}, {"-spacer-", 1, 0, '-', " crm_mon --daemonize --mail-to user@example.com --mail-host mail.example.com", pcmk_option_example|!ENABLE_ESMTP}, {"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it send SNMP alerts:", pcmk_option_paragraph|!ENABLE_SNMP}, {"-spacer-", 1, 0, '-', " crm_mon --daemonize --snmp-traps snmptrapd.example.com", pcmk_option_example|!ENABLE_SNMP}, {NULL, 0, 0, 0} }; int main(int argc, char **argv) { int flag; int argerr = 0; int exit_code = 0; int option_index = 0; pid_file = crm_strdup("/tmp/ClusterMon.pid"); crm_log_init_quiet(NULL, LOG_CRIT, FALSE, FALSE, argc, argv); crm_set_options("VQ?$i:nrh:dp:s1wx:oftANS:T:F:H:P:E:e:C:", "mode [options]", long_options, "Provides a summary of cluster's current state." "\n\nOutputs varying levels of detail in a number of different formats.\n"); #ifndef ON_DARWIN /* prevent zombies */ signal(SIGCLD, SIG_IGN); #endif if (strcmp(crm_system_name, "crm_mon.cgi")==0) { web_cgi = TRUE; one_shot = TRUE; } while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch(flag) { case 'V': cl_log_enable_stderr(TRUE); alter_debug(DEBUG_INC); break; case 'Q': print_last_updated = FALSE; print_last_change = FALSE; break; case 'i': reconnect_msec = crm_get_msec(optarg); break; case 'n': group_by_node = TRUE; break; case 'r': inactive_resources = TRUE; break; case 'd': daemonize = TRUE; break; case 't': print_timing = TRUE; print_operations = TRUE; break; case 'o': print_operations = TRUE; break; case 'f': print_failcount = TRUE; break; case 'A': print_nodes_attr = TRUE; break; case 'p': crm_free(pid_file); pid_file = crm_strdup(optarg); break; case 'x': xml_file = crm_strdup(optarg); one_shot = TRUE; break; case 'h': as_html_file = crm_strdup(optarg); break; case 'w': web_cgi = TRUE; one_shot = TRUE; break; case 's': simple_status = TRUE; one_shot = TRUE; break; case 'S': snmp_target = optarg; break; case 'T': crm_mail_to = optarg; break; case 'F': crm_mail_from = optarg; break; case 'H': crm_mail_host = optarg; break; case 'P': crm_mail_prefix = optarg; break; case 'E': external_agent = optarg; break; case 'e': external_recipient = optarg; break; case '1': one_shot = TRUE; break; case 'N': as_console = FALSE; break; case 'C': snmp_community = optarg; break; case '$': case '?': crm_help(flag, LSB_EXIT_OK); break; default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (argerr) { crm_help('?', LSB_EXIT_GENERIC); } if(one_shot) { as_console = FALSE; } else if(daemonize) { as_console = FALSE; cl_log_enable_stderr(FALSE); if(!as_html_file && !snmp_target && !crm_mail_to && !external_agent) { printf("Looks like you forgot to specify one or more of: --as-html, --mail-to, --snmp-target, --external-agent\n"); crm_help('?', LSB_EXIT_GENERIC); } crm_make_daemon(crm_system_name, TRUE, pid_file); } else if(as_console) { #if CURSES_ENABLED initscr(); cbreak(); noecho(); cl_log_enable_stderr(FALSE); #else one_shot = TRUE; as_console = FALSE; printf("Defaulting to one-shot mode\n"); printf("You need to have curses available at compile time to enable console mode\n"); #endif } crm_info("Starting %s", crm_system_name); if(xml_file != NULL) { current_cib = filename2xml(xml_file); mon_refresh_display(NULL); return exit_code; } if(current_cib == NULL) { cib = cib_new(); if(!one_shot) { print_as("Attempting connection to the cluster..."); } do { exit_code = cib_connect(!one_shot); if(one_shot) { break; } else if(exit_code != cib_ok) { print_dot(); sleep(reconnect_msec/1000); } } while(exit_code == cib_connection); if(exit_code != cib_ok) { print_as("\nConnection to cluster failed: %s\n", cib_error2string(exit_code)); if(as_console) { sleep(2); } clean_up(-exit_code); } } if(one_shot) { return exit_code; } mainloop = g_main_new(FALSE); mainloop_add_signal(SIGTERM, mon_shutdown); mainloop_add_signal(SIGINT, mon_shutdown); #if CURSES_ENABLED if(as_console) { ncurses_winch_handler = signal(SIGWINCH, mon_winresize); if(ncurses_winch_handler == SIG_DFL || ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR) ncurses_winch_handler = NULL; } #endif refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL); g_main_run(mainloop); g_main_destroy(mainloop); crm_info("Exiting %s", crm_system_name); clean_up(0); return 0; /* never reached */ } void wait_for_refresh(int offset, const char *prefix, int msec) { int lpc = msec / 1000; struct timespec sleept = {1 , 0}; if(as_console == FALSE) { timer_id = g_timeout_add(msec, mon_timer_popped, NULL); return; } crm_notice("%sRefresh in %ds...", prefix?prefix:"", lpc); while(lpc > 0) { #if CURSES_ENABLED move(offset, 0); /* printw("%sRefresh in \033[01;32m%ds\033[00m...", prefix?prefix:"", lpc); */ printw("%sRefresh in %ds...\n", prefix?prefix:"", lpc); clrtoeol(); refresh(); #endif lpc--; if(lpc == 0) { timer_id = g_timeout_add( 1000, mon_timer_popped, NULL); } else { if (nanosleep(&sleept, NULL) != 0) { return; } } } } #define mon_warn(fmt...) do { \ if (!has_warnings) { \ print_as("Warning:"); \ } else { \ print_as(","); \ } \ print_as(fmt); \ has_warnings = TRUE; \ } while(0) static int count_resources(pe_working_set_t *data_set, resource_t *rsc) { int count = 0; GListPtr gIter = NULL; if(rsc == NULL) { gIter = data_set->resources; } else if(rsc->children) { gIter = rsc->children; } else { return is_not_set(rsc->flags, pe_rsc_orphan); } for(; gIter != NULL; gIter = gIter->next) { count += count_resources(data_set, gIter->data); } return count; } static int print_simple_status(pe_working_set_t *data_set) { node_t *dc = NULL; GListPtr gIter = NULL; int nodes_online = 0; int nodes_standby = 0; dc = data_set->dc_node; if(dc == NULL) { mon_warn("No DC "); } for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if(node->details->standby && node->details->online) { nodes_standby++; } else if(node->details->online) { nodes_online++; } else { mon_warn("offline node: %s", node->details->uname); } } if (!has_warnings) { print_as("Ok: %d nodes online", nodes_online); if (nodes_standby > 0) { print_as(", %d standby nodes", nodes_standby); } print_as(", %d resources configured", count_resources(data_set, NULL)); } print_as("\n"); return 0; } extern int get_failcount(node_t *node, resource_t *rsc, int *last_failure, pe_working_set_t *data_set); static void print_date(time_t time) { int lpc = 0; char date_str[26]; asctime_r(localtime(&time), date_str); for(; lpc < 26; lpc++) { if(date_str[lpc] == '\n') { date_str[lpc] = 0; } } print_as("'%s'", date_str); } static void print_rsc_summary(pe_working_set_t *data_set, node_t *node, resource_t *rsc, gboolean all) { gboolean printed = FALSE; time_t last_failure = 0; char *fail_attr = crm_concat("fail-count", rsc->id, '-'); const char *value = g_hash_table_lookup(node->details->attrs, fail_attr); int failcount = char2score(value); /* Get the true value, not the effective one from get_failcount() */ get_failcount(node, rsc, (int*)&last_failure, data_set); crm_free(fail_attr); if(all || failcount || last_failure > 0) { printed = TRUE; print_as(" %s: migration-threshold=%d", rsc->id, rsc->migration_threshold); } if(failcount > 0) { printed = TRUE; print_as(" fail-count=%d", failcount); } if(last_failure > 0) { printed = TRUE; print_as(" last-failure="); print_date(last_failure); } if(printed) { print_as("\n"); } } static void print_rsc_history(pe_working_set_t *data_set, node_t *node, xmlNode *rsc_entry) { GListPtr gIter = NULL; GListPtr op_list = NULL; gboolean print_name = TRUE; GListPtr sorted_op_list = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); xmlNode *rsc_op = NULL; for(rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { if(crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_append(op_list, rsc_op); } } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for(gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *xml_op = (xmlNode*)gIter->data; const char *value = NULL; const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); const char *interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); int rc = crm_parse_int(op_rc, "0"); if(safe_str_eq(task, CRMD_ACTION_STATUS) && safe_str_eq(interval, "0")) { task = "probe"; } if(rc == 7 && safe_str_eq(task, "probe")) { continue; } else if(safe_str_eq(task, CRMD_ACTION_NOTIFY)) { continue; } if(print_name) { print_name = FALSE; if(rsc == NULL) { print_as("Orphan resource: %s", rsc_id); } else { print_rsc_summary(data_set, node, rsc, TRUE); } } print_as(" + (%s) %s:", call, task); if(safe_str_neq(interval, "0")) { print_as(" interval=%sms", interval); } if(print_timing) { int int_value; const char *attr = "last-rc-change"; value = crm_element_value(xml_op, attr); if(value) { int_value = crm_parse_int(value, NULL); print_as(" %s=", attr); print_date(int_value); } attr = "last-run"; value = crm_element_value(xml_op, attr); if(value) { int_value = crm_parse_int(value, NULL); print_as(" %s=", attr); print_date(int_value); } attr = "exec-time"; value = crm_element_value(xml_op, attr); if(value) { int_value = crm_parse_int(value, NULL); print_as(" %s=%dms", attr, int_value); } attr = "queue-time"; value = crm_element_value(xml_op, attr); if(value) { int_value = crm_parse_int(value, NULL); print_as(" %s=%dms", attr, int_value); } } print_as(" rc=%s (%s)\n", op_rc, execra_code2string(rc)); } /* no need to free the contents */ g_list_free(sorted_op_list); } static void print_attr_msg(node_t *node, GListPtr rsc_list, const char *attrname, const char *attrvalue) { GListPtr gIter = NULL; for(gIter = rsc_list; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; const char *type = g_hash_table_lookup(rsc->meta, "type"); if(rsc->children != NULL) { print_attr_msg(node, rsc->children, attrname, attrvalue); } if(safe_str_eq(type, "ping") || safe_str_eq(type, "pingd")) { const char *name = "pingd"; const char *multiplier = NULL; char **host_list = NULL; int host_list_num = 0; int expected_score = 0; if(g_hash_table_lookup(rsc->meta, "name") != NULL) { name = g_hash_table_lookup(rsc->meta, "name"); } /* To identify the resource with the attribute name. */ if(safe_str_eq(name, attrname)) { int value = crm_parse_int(attrvalue, "0"); multiplier = g_hash_table_lookup(rsc->meta, "multiplier"); host_list = g_strsplit(g_hash_table_lookup(rsc->meta, "host_list"), " ", 0); host_list_num = g_strv_length(host_list); g_strfreev(host_list); /* pingd multiplier is the same as the default value. */ expected_score = host_list_num * crm_parse_int(multiplier, "1"); /* pingd is abnormal score. */ if(value <= 0) { print_as("\t: Connectivity is lost"); } else if(value < expected_score) { print_as("\t: Connectivity is degraded (Expected=%d)", expected_score); } } } } } static int compare_attribute(gconstpointer a, gconstpointer b) { int rc; rc = strcmp((const char*)a, (const char*)b); return rc; } static void create_attr_list(gpointer name, gpointer value, gpointer data) { int i; const char *filt_str[] = FILTER_STR; CRM_CHECK(name != NULL, return); /* filtering automatic attributes */ for(i = 0; filt_str[i] != NULL; i++) { if(g_str_has_prefix(name, filt_str[i])) { return; } } attr_list = g_list_insert_sorted(attr_list, name, compare_attribute); } static void print_node_attribute(gpointer name, gpointer node_data) { const char *value = NULL; node_t *node = (node_t *)node_data; value = g_hash_table_lookup(node->details->attrs, name); print_as(" + %-32s\t: %-10s", (char *)name, value); print_attr_msg(node, node->details->running_rsc, name, value); print_as("\n"); } static void print_node_summary(pe_working_set_t *data_set, gboolean operations) { xmlNode *lrm_rsc = NULL; xmlNode *rsc_entry = NULL; xmlNode *node_state = NULL; xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); if(operations) { print_as("\nOperations:\n"); } else { print_as("\nMigration summary:\n"); } for(node_state = __xml_first_child(cib_status); node_state != NULL; node_state = __xml_next(node_state)) { if(crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { node_t *node = pe_find_node_id(data_set->nodes, ID(node_state)); if(node == NULL || node->details->online == FALSE){ continue; } print_as("* Node %s: ", crm_element_value(node_state, XML_ATTR_UNAME)); print_as("\n"); lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); for(rsc_entry = __xml_first_child(lrm_rsc); rsc_entry != NULL; rsc_entry = __xml_next(rsc_entry)) { if(crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if(operations) { print_rsc_history(data_set, node, rsc_entry); } else { const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if(rsc) { print_rsc_summary(data_set, node, rsc, FALSE); } else { print_as(" %s: orphan\n", rsc_id); } } } } } } } static char * add_list_element(char *list, const char *value) { int len = 0; int last = 0; if(value == NULL) { return list; } if(list) { last = strlen(list); } len = last + 2; /* +1 space, +1 EOS */ len += strlen(value); crm_realloc(list, len); sprintf(list + last, " %s", value); return list; } static int print_status(pe_working_set_t *data_set) { static int updates = 0; GListPtr gIter = NULL; node_t *dc = NULL; char *since_epoch = NULL; char *online_nodes = NULL; char *offline_nodes = NULL; xmlNode *dc_version = NULL; xmlNode *quorum_node = NULL; xmlNode *stack = NULL; time_t a_time = time(NULL); int print_opts = pe_print_ncurses; const char *quorum_votes = "unknown"; if(as_console) { blank_screen(); } else { print_opts = pe_print_printf; } updates++; dc = data_set->dc_node; print_as("============\n"); if(a_time == (time_t)-1) { crm_perror(LOG_ERR,"set_node_tstamp(): Invalid time returned"); return 1; } since_epoch = ctime(&a_time); if(since_epoch != NULL && print_last_updated) { print_as("Last updated: %s", since_epoch); } if(print_last_change) { const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN); const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER); const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT); const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG); print_as("Last change: %s", last_written?last_written:""); if(user) { print_as(" by %s", user); } if(client) { print_as(" via %s", client); } if(origin) { print_as(" on %s", origin); } print_as("\n"); } stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']", data_set->input, LOG_DEBUG); if(stack) { print_as("Stack: %s\n", crm_element_value(stack, XML_NVPAIR_ATTR_VALUE)); } dc_version = get_xpath_object("//nvpair[@name='dc-version']", data_set->input, LOG_DEBUG); if(dc == NULL) { print_as("Current DC: NONE\n"); } else { const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM); if(safe_str_neq(dc->details->uname, dc->details->id)) { print_as("Current DC: %s (%s)", dc->details->uname, dc->details->id); } else { print_as("Current DC: %s", dc->details->uname); } print_as(" - partition %s quorum\n", crm_is_true(quorum)?"with":"WITHOUT"); if(dc_version) { print_as("Version: %s\n", crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)); } } quorum_node = get_xpath_object("//nvpair[@name='"XML_ATTR_EXPECTED_VOTES"']", data_set->input, LOG_DEBUG); if(quorum_node) { quorum_votes = crm_element_value(quorum_node, XML_NVPAIR_ATTR_VALUE); } print_as("%d Nodes configured, %s expected votes\n", g_list_length(data_set->nodes), quorum_votes); print_as("%d Resources configured.\n", count_resources(data_set, NULL)); print_as("============\n\n"); for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; const char *node_mode = NULL; if(node->details->unclean) { if(node->details->online && node->details->unclean) { node_mode = "UNCLEAN (online)"; } else if(node->details->pending) { node_mode = "UNCLEAN (pending)"; } else { node_mode = "UNCLEAN (offline)"; } } else if(node->details->pending) { node_mode = "pending"; } else if(node->details->standby_onfail && node->details->online) { node_mode = "standby (on-fail)"; } else if(node->details->standby) { if(node->details->online) { node_mode = "standby"; } else { node_mode = "OFFLINE (standby)"; } } else if(node->details->online) { node_mode = "online"; if(group_by_node == FALSE) { online_nodes = add_list_element(online_nodes, node->details->uname); continue; } } else { node_mode = "OFFLINE"; if(group_by_node == FALSE) { offline_nodes = add_list_element(offline_nodes, node->details->uname); continue; } } if(safe_str_eq(node->details->uname, node->details->id)) { print_as("Node %s: %s\n", node->details->uname, node_mode); } else { print_as("Node %s (%s): %s\n", node->details->uname, node->details->id, node_mode); } if(group_by_node) { GListPtr gIter2 = NULL; for(gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t*)gIter2->data; rsc->fns->print(rsc, "\t", print_opts|pe_print_rsconly, stdout); } } } if(online_nodes) { print_as("Online: [%s ]\n", online_nodes); crm_free(online_nodes); } if(offline_nodes) { print_as("OFFLINE: [%s ]\n", offline_nodes); crm_free(offline_nodes); } if(group_by_node == FALSE && inactive_resources) { print_as("\nFull list of resources:\n"); } else if(inactive_resources) { print_as("\nInactive resources:\n"); } if(group_by_node == FALSE || inactive_resources) { print_as("\n"); for(gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; gboolean is_active = rsc->fns->active(rsc, TRUE); gboolean partially_active = rsc->fns->active(rsc, FALSE); if(is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) { continue; } else if(group_by_node == FALSE) { if(partially_active || inactive_resources) { rsc->fns->print(rsc, NULL, print_opts, stdout); } } else if(is_active == FALSE && inactive_resources) { rsc->fns->print(rsc, NULL, print_opts, stdout); } } } if(print_nodes_attr) { print_as("\nNode Attributes:\n"); for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; if(node == NULL || node->details->online == FALSE){ continue; } attr_list = NULL; print_as("* Node %s:\n", node->details->uname); g_hash_table_foreach(node->details->attrs, create_attr_list, NULL); g_list_foreach(attr_list, print_node_attribute, node); } } if(print_operations || print_failcount) { print_node_summary(data_set, print_operations); } if(xml_has_children(data_set->failed)) { xmlNode *xml_op = NULL; print_as("\nFailed actions:\n"); for(xml_op = __xml_first_child(data_set->failed); xml_op != NULL; xml_op = __xml_next(xml_op)) { int val = 0; const char *id = ID(xml_op); + const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); const char *last = crm_element_value(xml_op, "last_run"); const char *node = crm_element_value(xml_op, XML_ATTR_UNAME); const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); const char *rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); const char *status = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); val = crm_parse_int(status, "0"); print_as(" %s (node=%s, call=%s, rc=%s, status=%s", - id, node, call, rc, op_status2text(val)); + op_key?op_key:id, node, call, rc, op_status2text(val)); if(last) { time_t run_at = crm_parse_int(last, "0"); print_as(", last-run=%s, queued=%sms, exec=%sms\n", ctime(&run_at), crm_element_value(xml_op, "exec_time"), crm_element_value(xml_op, "queue_time")); } val = crm_parse_int(rc, "0"); print_as("): %s\n", execra_code2string(val)); } } #if CURSES_ENABLED if(as_console) { refresh(); } #endif return 0; } static int print_html_status(pe_working_set_t *data_set, const char *filename, gboolean web_cgi) { FILE *stream; GListPtr gIter = NULL; node_t *dc = NULL; static int updates = 0; char *filename_tmp = NULL; if (web_cgi) { stream=stdout; fprintf(stream, "Content-type: text/html\n\n"); } else { filename_tmp = crm_concat(filename, "tmp", '.'); stream = fopen(filename_tmp, "w"); if(stream == NULL) { crm_perror(LOG_ERR,"Cannot open %s for writing", filename_tmp); crm_free(filename_tmp); return -1; } } updates++; dc = data_set->dc_node; fprintf(stream, ""); fprintf(stream, ""); fprintf(stream, "Cluster status"); /* content="%d;url=http://webdesign.about.com" */ fprintf(stream, "", reconnect_msec/1000); fprintf(stream, ""); /*** SUMMARY ***/ fprintf(stream, "

Cluster summary

"); { char *now_str = NULL; time_t now = time(NULL); now_str = ctime(&now); now_str[24] = EOS; /* replace the newline */ fprintf(stream, "Last updated: %s
\n", now_str); } if(dc == NULL) { fprintf(stream, "Current DC: NONE
"); } else { fprintf(stream, "Current DC: %s (%s)
", dc->details->uname, dc->details->id); } fprintf(stream, "%d Nodes configured.
", g_list_length(data_set->nodes)); fprintf(stream, "%d Resources configured.
", count_resources(data_set, NULL)); /*** CONFIG ***/ fprintf(stream, "

Config Options

\n"); fprintf(stream, "\n"); fprintf(stream, "\n", is_set(data_set->flags, pe_flag_stonith_enabled)?"enabled":"disabled"); fprintf(stream, "\n", is_set(data_set->flags, pe_flag_symmetric_cluster)?"":"a-"); fprintf(stream, "\n
STONITH of failed nodes:%s
Cluster is:%ssymmetric
No Quorum Policy:"); switch (data_set->no_quorum_policy) { case no_quorum_freeze: fprintf(stream, "Freeze resources"); break; case no_quorum_stop: fprintf(stream, "Stop ALL resources"); break; case no_quorum_ignore: fprintf(stream, "Ignore"); break; case no_quorum_suicide: fprintf(stream, "Suicide"); break; } fprintf(stream, "\n
\n"); /*** NODE LIST ***/ fprintf(stream, "

Node List

\n"); fprintf(stream, "
    \n"); for(gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t*)gIter->data; fprintf(stream, "
  • "); if(node->details->standby_onfail && node->details->online) { fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"standby (on-fail)\n"); } else if(node->details->standby && node->details->online) { fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"standby\n"); } else if(node->details->standby) { fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"OFFLINE (standby)\n"); } else if(node->details->online) { fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"online\n"); } else { fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"OFFLINE\n"); } if(group_by_node) { GListPtr lpc2 = NULL; fprintf(stream, "
      \n"); for(lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) { resource_t *rsc = (resource_t*)lpc2->data; fprintf(stream, "
    • "); rsc->fns->print(rsc, NULL, pe_print_html|pe_print_rsconly, stream); fprintf(stream, "
    • \n"); } fprintf(stream, "
    \n"); } fprintf(stream, "
  • \n"); } fprintf(stream, "
\n"); if(group_by_node && inactive_resources) { fprintf(stream, "

Inactive Resources

\n"); } else if(group_by_node == FALSE) { fprintf(stream, "

Resource List

\n"); } if(group_by_node == FALSE || inactive_resources) { for(gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t*)gIter->data; gboolean is_active = rsc->fns->active(rsc, TRUE); gboolean partially_active = rsc->fns->active(rsc, FALSE); if(is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) { continue; } else if(group_by_node == FALSE) { if(partially_active || inactive_resources) { rsc->fns->print(rsc, NULL, pe_print_html, stream); } } else if(is_active == FALSE && inactive_resources) { rsc->fns->print(rsc, NULL, pe_print_html, stream); } } } fprintf(stream, ""); fflush(stream); fclose(stream); if (!web_cgi) { if(rename(filename_tmp, filename) != 0) { crm_perror(LOG_ERR,"Unable to rename %s->%s", filename_tmp, filename); } crm_free(filename_tmp); } return 0; } #if ENABLE_SNMP #include #include #include #include #include #include #define add_snmp_field(list, oid_string, value) do { \ oid name[MAX_OID_LEN]; \ size_t name_length = MAX_OID_LEN; \ if (snmp_parse_oid(oid_string, name, &name_length)) { \ int s_rc = snmp_add_var(list, name, name_length, 's', (value)); \ if(s_rc != 0) { \ crm_err("Could not add %s=%s rc=%d", oid_string, value, s_rc); \ } else { \ crm_debug_2("Added %s=%s", oid_string, value); \ } \ } else { \ crm_err("Could not parse OID: %s", oid_string); \ } \ } while(0) \ #define add_snmp_field_int(list, oid_string, value) do { \ oid name[MAX_OID_LEN]; \ size_t name_length = MAX_OID_LEN; \ if (snmp_parse_oid(oid_string, name, &name_length)) { \ if(NULL == snmp_pdu_add_variable( \ list, name, name_length, ASN_INTEGER, \ (u_char *) & value, sizeof(value))) { \ crm_err("Could not add %s=%d", oid_string, value); \ } else { \ crm_debug_2("Added %s=%d", oid_string, value); \ } \ } else { \ crm_err("Could not parse OID: %s", oid_string); \ } \ } while(0) \ static int snmp_input(int operation, netsnmp_session *session, int reqid, netsnmp_pdu *pdu, void *magic) { return 1; } static netsnmp_session *crm_snmp_init(const char *target, char *community) { static netsnmp_session *session = NULL; #ifdef NETSNMPV53 char target53[128]; snprintf(target53, sizeof(target53), "%s:162", target); #endif if(session) { return session; } if(target == NULL) { return NULL; } if(crm_log_level > LOG_INFO) { char *debug_tokens = crm_strdup("run:shell,snmptrap,tdomain"); debug_register_tokens(debug_tokens); snmp_set_do_debugging(1); } crm_malloc0(session, sizeof(netsnmp_session)); snmp_sess_init(session); session->version = SNMP_VERSION_2c; session->callback = snmp_input; session->callback_magic = NULL; if(community) { session->community_len = strlen(community); session->community = (unsigned char*)community; } session = snmp_add(session, #ifdef NETSNMPV53 netsnmp_tdomain_transport(target53, 0, "udp"), #else netsnmp_transport_open_client("snmptrap", target), #endif NULL, NULL); if (session == NULL) { snmp_sess_perror("Could not create snmp transport", session); } return session; } #endif static int send_snmp_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc) { int ret = 1; #if ENABLE_SNMP static oid snmptrap_oid[] = { 1,3,6,1,6,3,1,1,4,1,0 }; static oid sysuptime_oid[] = { 1,3,6,1,2,1,1,3,0 }; netsnmp_pdu *trap_pdu; netsnmp_session *session = crm_snmp_init(snmp_target, snmp_community); trap_pdu = snmp_pdu_create(SNMP_MSG_TRAP2); if ( !trap_pdu ) { crm_err("Failed to create SNMP notification"); return SNMPERR_GENERR; } if(1) { /* send uptime */ char csysuptime[20]; time_t now = time(NULL); sprintf(csysuptime, "%ld", now); snmp_add_var(trap_pdu, sysuptime_oid, sizeof(sysuptime_oid) / sizeof(oid), 't', csysuptime); } /* Indicate what the trap is by setting snmpTrapOid.0 */ ret = snmp_add_var(trap_pdu, snmptrap_oid, sizeof(snmptrap_oid) / sizeof(oid), 'o', snmp_crm_trap_oid); if (ret != 0) { crm_err("Failed set snmpTrapOid.0=%s", snmp_crm_trap_oid); return ret; } /* Add extries to the trap */ add_snmp_field(trap_pdu, snmp_crm_oid_rsc, rsc); add_snmp_field(trap_pdu, snmp_crm_oid_node, node); add_snmp_field(trap_pdu, snmp_crm_oid_task, task); add_snmp_field(trap_pdu, snmp_crm_oid_desc, desc); add_snmp_field_int(trap_pdu, snmp_crm_oid_rc, rc); add_snmp_field_int(trap_pdu, snmp_crm_oid_trc, target_rc); add_snmp_field_int(trap_pdu, snmp_crm_oid_status, status); /* Send and cleanup */ ret = snmp_send(session, trap_pdu); if(ret == 0) { /* error */ snmp_sess_perror("Could not send SNMP trap", session); snmp_free_pdu(trap_pdu); ret = SNMPERR_GENERR; } else { ret = SNMPERR_SUCCESS; } #else crm_err("Sending SNMP traps is not supported by this installation"); #endif return ret; } #if ENABLE_ESMTP #include #include static void print_recipient_status( smtp_recipient_t recipient, const char *mailbox, void *arg) { const smtp_status_t *status; status = smtp_recipient_status (recipient); printf ("%s: %d %s", mailbox, status->code, status->text); } static void event_cb (smtp_session_t session, int event_no, void *arg, ...) { int *ok; va_list alist; va_start(alist, arg); switch(event_no) { case SMTP_EV_CONNECT: case SMTP_EV_MAILSTATUS: case SMTP_EV_RCPTSTATUS: case SMTP_EV_MESSAGEDATA: case SMTP_EV_MESSAGESENT: case SMTP_EV_DISCONNECT: break; case SMTP_EV_WEAK_CIPHER: { int bits = va_arg(alist, long); ok = va_arg(alist, int*); crm_debug("SMTP_EV_WEAK_CIPHER, bits=%d - accepted.", bits); *ok = 1; break; } case SMTP_EV_STARTTLS_OK: crm_debug("SMTP_EV_STARTTLS_OK - TLS started here."); break; case SMTP_EV_INVALID_PEER_CERTIFICATE: { long vfy_result = va_arg(alist, long); ok = va_arg(alist, int*); /* There is a table in handle_invalid_peer_certificate() of mail-file.c */ crm_err("SMTP_EV_INVALID_PEER_CERTIFICATE: %ld", vfy_result); *ok = 1; break; } case SMTP_EV_NO_PEER_CERTIFICATE: ok = va_arg(alist, int*); crm_debug("SMTP_EV_NO_PEER_CERTIFICATE - accepted."); *ok = 1; break; case SMTP_EV_WRONG_PEER_CERTIFICATE: ok = va_arg(alist, int*); crm_debug("SMTP_EV_WRONG_PEER_CERTIFICATE - accepted."); *ok = 1; break; case SMTP_EV_NO_CLIENT_CERTIFICATE: ok = va_arg(alist, int*); crm_debug("SMTP_EV_NO_CLIENT_CERTIFICATE - accepted."); *ok = 1; break; default: crm_debug("Got event: %d - ignored.\n", event_no); } va_end(alist); } #endif #define BODY_MAX 2048 #if ENABLE_ESMTP static void crm_smtp_debug (const char *buf, int buflen, int writing, void *arg) { char type = 0; int lpc = 0, last = 0, level = *(int*)arg; if (writing == SMTP_CB_HEADERS) { type = 'H'; } else if(writing) { type = 'C'; } else { type = 'S'; } for(; lpc < buflen; lpc++) { switch(buf[lpc]) { case 0: case '\n': if(last > 0) { do_crm_log(level, " %.*s", lpc-last, buf+last); } else { do_crm_log(level, "%c: %.*s", type, lpc-last, buf+last); } last = lpc + 1; break; } } } #endif static int send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc) { pid_t pid; /*setenv needs chars, these are ints*/ char *rc_s = crm_itoa(rc); char *status_s = crm_itoa(status); char *target_rc_s = crm_itoa(target_rc); crm_debug("Sending external notification to '%s' via '%s'", external_recipient, external_agent); setenv("CRM_notify_recipient",external_recipient,1); setenv("CRM_notify_node",node,1); setenv("CRM_notify_rsc",rsc,1); setenv("CRM_notify_task",task,1); setenv("CRM_notify_desc",desc,1); setenv("CRM_notify_rc",rc_s,1); setenv("CRM_notify_target_rc",target_rc_s,1); setenv("CRM_notify_status",status_s,1); pid=fork(); if(pid == -1) { cl_perror("notification fork() failed."); } if(pid == 0) { /* crm_debug("notification: I am the child. Executing the nofitication program."); */ execl(external_agent,external_agent,NULL); } crm_debug_2("Finished running custom notification program '%s'.",external_agent); crm_free(target_rc_s); crm_free(status_s); crm_free(rc_s); return 0; } static int send_smtp_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc) { #if ENABLE_ESMTP smtp_session_t session; smtp_message_t message; auth_context_t authctx; struct sigaction sa; int len = 20; int noauth = 1; int smtp_debug = LOG_DEBUG; char crm_mail_body[BODY_MAX]; char *crm_mail_subject = NULL; if(node == NULL) { node = "-"; } if(rsc == NULL) { rsc = "-"; } if(desc == NULL) { desc = "-"; } if(crm_mail_to == NULL) { return 1; } if(crm_mail_host == NULL) { crm_mail_host = "localhost:25"; } if(crm_mail_prefix == NULL) { crm_mail_prefix = "Cluster notification"; } crm_debug("Sending '%s' mail to %s via %s", crm_mail_prefix, crm_mail_to, crm_mail_host); len += strlen(crm_mail_prefix); len += strlen(task); len += strlen(rsc); len += strlen(node); len += strlen(desc); len++; crm_malloc0(crm_mail_subject, len); snprintf(crm_mail_subject, len, "%s - %s event for %s on %s: %s\r\n", crm_mail_prefix, task, rsc, node, desc); len = 0; len += snprintf(crm_mail_body+len, BODY_MAX-len, "\r\n%s\r\n", crm_mail_prefix); len += snprintf(crm_mail_body+len, BODY_MAX-len, "====\r\n\r\n"); if(rc==target_rc) { len += snprintf(crm_mail_body+len, BODY_MAX-len, "Completed operation %s for resource %s on %s\r\n", task, rsc, node); } else { len += snprintf(crm_mail_body+len, BODY_MAX-len, "Operation %s for resource %s on %s failed: %s\r\n", task, rsc, node, desc); } len += snprintf(crm_mail_body+len, BODY_MAX-len, "\r\nDetails:\r\n"); len += snprintf(crm_mail_body+len, BODY_MAX-len, "\toperation status: (%d) %s\r\n", status, op_status2text(status)); if(status == LRM_OP_DONE) { len += snprintf(crm_mail_body+len, BODY_MAX-len, "\tscript returned: (%d) %s\r\n", rc, execra_code2string(rc)); len += snprintf(crm_mail_body+len, BODY_MAX-len, "\texpected return value: (%d) %s\r\n", target_rc, execra_code2string(target_rc)); } auth_client_init(); session = smtp_create_session(); message = smtp_add_message(session); smtp_starttls_enable (session, Starttls_ENABLED); sa.sa_handler = SIG_IGN; sigemptyset (&sa.sa_mask); sa.sa_flags = 0; sigaction (SIGPIPE, &sa, NULL); smtp_set_server (session, crm_mail_host); authctx = auth_create_context (); auth_set_mechanism_flags (authctx, AUTH_PLUGIN_PLAIN, 0); smtp_set_eventcb(session, event_cb, NULL); /* Now tell libESMTP it can use the SMTP AUTH extension. */ if (!noauth) { crm_debug("Adding authentication context"); smtp_auth_set_context (session, authctx); } if(crm_mail_from == NULL) { struct utsname us; char auto_from[BODY_MAX]; CRM_ASSERT(uname(&us) == 0); snprintf(auto_from, BODY_MAX, "crm_mon@%s", us.nodename); smtp_set_reverse_path (message, auto_from); } else { /* NULL is ok */ smtp_set_reverse_path (message, crm_mail_from); } smtp_set_header (message, "To", NULL/*phrase*/, NULL/*addr*/); /* "Phrase" */ smtp_add_recipient (message, crm_mail_to); /* Set the Subject: header and override any subject line in the message headers. */ smtp_set_header (message, "Subject", crm_mail_subject); smtp_set_header_option (message, "Subject", Hdr_OVERRIDE, 1); smtp_set_message_str(message, crm_mail_body); smtp_set_monitorcb (session, crm_smtp_debug, &smtp_debug, 1); if (smtp_start_session (session)) { char buf[128]; int rc = smtp_errno(); crm_err("SMTP server problem: %s (%d)", smtp_strerror (rc, buf, sizeof buf), rc); } else { char buf[128]; int rc = smtp_errno(); const smtp_status_t *smtp_status = smtp_message_transfer_status(message); if(rc != 0) { crm_err("SMTP server problem: %s (%d)", smtp_strerror (rc, buf, sizeof buf), rc); } crm_info("Send status: %d %s", smtp_status->code, crm_str(smtp_status->text)); smtp_enumerate_recipients (message, print_recipient_status, NULL); } smtp_destroy_session(session); auth_destroy_context(authctx); auth_client_exit(); #endif return 0; } static void handle_rsc_op(xmlNode *rsc_op) { int rc = -1; int status = -1; int action = -1; int interval = 0; int target_rc = -1; int transition_num = -1; gboolean notify = TRUE; char *rsc = NULL; char *task = NULL; const char *desc = NULL; const char *node = NULL; const char *magic = NULL; const char *id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY); char *update_te_uuid = NULL; xmlNode *n = rsc_op; if(id == NULL) { /* Compatability with <= 1.1.5 */ id = ID(rsc_op); } magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC); if(magic == NULL) { /* non-change */ return; } if(FALSE == decode_transition_magic( magic, &update_te_uuid, &transition_num, &action, &status, &rc, &target_rc)) { crm_err("Invalid event %s detected for %s", magic, id); return; } if(parse_op_key(id, &rsc, &task, &interval) == FALSE) { crm_err("Invalid event detected for %s", id); goto bail; } while(n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) { n = n->parent; } node = crm_element_value(n, XML_ATTR_UNAME); if(node == NULL) { node = ID(n); } if(node == NULL) { crm_err("No node detected for event %s (%s)", magic, id); goto bail; } /* look up where we expected it to be? */ desc = cib_error2string(cib_ok); if(status == LRM_OP_DONE && target_rc == rc) { crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc); if(rc == EXECRA_NOT_RUNNING) { notify = FALSE; } } else if(status == LRM_OP_DONE) { desc = execra_code2string(rc); crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc); } else { desc = op_status2text(status); crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc); } if(notify && snmp_target) { send_snmp_trap(node, rsc, task, target_rc, rc, status, desc); } if(notify && crm_mail_to) { send_smtp_trap(node, rsc, task, target_rc, rc, status, desc); } if(notify && external_agent) { send_custom_trap(node, rsc, task, target_rc, rc, status, desc); } bail: crm_free(update_te_uuid); crm_free(rsc); crm_free(task); } void crm_diff_update(const char *event, xmlNode *msg) { int rc = -1; long now = time(NULL); const char *op = NULL; unsigned int log_level = LOG_INFO; xmlNode *diff = NULL; xmlNode *cib_last = NULL; xmlNode *update = get_message_xml(msg, F_CIB_UPDATE); print_dot(); if(msg == NULL) { crm_err("NULL update"); return; } crm_element_value_int(msg, F_CIB_RC, &rc); op = crm_element_value(msg, F_CIB_OPERATION); diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); if(rc < cib_ok) { log_level = LOG_WARNING; do_crm_log(log_level, "[%s] %s ABORTED: %s", event, op, cib_error2string(rc)); return; } if(current_cib != NULL) { cib_last = current_cib; current_cib = NULL; rc = cib_process_diff(op, cib_force_diff, NULL, NULL, diff, cib_last, ¤t_cib, NULL); if(rc != cib_ok) { crm_debug("Update didn't apply, requesting full copy: %s", cib_error2string(rc)); free_xml(current_cib); current_cib = NULL; } } if(current_cib == NULL) { current_cib = get_cib_copy(cib); } if(log_diffs && diff) { log_cib_diff(LOG_DEBUG, diff, op); } if(log_updates && update != NULL) { do_crm_log_xml(LOG_DEBUG, "raw_update", update); } if(diff && (crm_mail_to || snmp_target || external_agent)) { /* Process operation updates */ xmlXPathObject *xpathObj = xpath_search( diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_LRM_TAG_RSC_OP); if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); handle_rsc_op(rsc_op); } } if (xpathObj) { xmlXPathFreeObject(xpathObj); } } if((now - last_refresh) > (reconnect_msec/1000)) { /* Force a refresh */ mon_refresh_display(NULL); } else { mainloop_set_trigger(refresh_trigger); } free_xml(cib_last); } gboolean mon_refresh_display(gpointer user_data) { xmlNode *cib_copy = copy_xml(current_cib); pe_working_set_t data_set; last_refresh = time(NULL); if(cli_config_update(&cib_copy, NULL, FALSE) == FALSE) { if(cib) { cib->cmds->signoff(cib); } print_as("Upgrade failed: %s", cib_error2string(cib_dtd_validation)); if(as_console) { sleep(2); } clean_up(LSB_EXIT_GENERIC); return FALSE; } set_working_set_defaults(&data_set); data_set.input = cib_copy; cluster_status(&data_set); if(as_html_file || web_cgi) { if (print_html_status(&data_set, as_html_file, web_cgi) != 0) { fprintf(stderr, "Critical: Unable to output html file\n"); clean_up(LSB_EXIT_GENERIC); } } else if(daemonize) { /* do nothing */ } else if (simple_status) { print_simple_status(&data_set); if (has_warnings) { clean_up(LSB_EXIT_GENERIC); } } else { print_status(&data_set); } cleanup_calculations(&data_set); return TRUE; } /* * De-init ncurses, signoff from the CIB and deallocate memory. */ void clean_up(int rc) { #if ENABLE_SNMP netsnmp_session *session = crm_snmp_init(NULL, NULL); if(session) { snmp_close(session); snmp_shutdown("snmpapp"); } #endif #if CURSES_ENABLED if(as_console) { as_console = FALSE; echo(); nocbreak(); endwin(); } #endif if (cib != NULL) { cib->cmds->signoff(cib); cib_delete(cib); cib = NULL; } crm_free(as_html_file); crm_free(xml_file); crm_free(pid_file); if(rc >= 0) { exit(rc); } return; } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index aa0dbc3fab..3658464e15 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,1678 +1,1679 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean do_force = FALSE; gboolean BE_QUIET = FALSE; const char *attr_set_type = XML_TAG_ATTR_SETS; char *host_id = NULL; const char *rsc_id = NULL; const char *host_uname = NULL; const char *prop_name = NULL; const char *prop_value = NULL; const char *rsc_type = NULL; const char *prop_id = NULL; const char *prop_set = NULL; char *move_lifetime = NULL; char rsc_cmd = 'L'; char *our_pid = NULL; IPC_Channel *crmd_channel = NULL; char *xml_file = NULL; int cib_options = cib_sync_call; int crmd_replies_needed = 0; GMainLoop *mainloop = NULL; extern void cleanup_alloc_calculations(pe_working_set_t *data_set); #define CMD_ERR(fmt, args...) do { \ crm_warn(fmt, ##args); \ fprintf(stderr, fmt, ##args); \ } while(0) #define message_timeout_ms 60*1000 static gboolean resource_ipc_timeout(gpointer data) { fprintf(stderr, "No messages received in %d seconds.. aborting\n", (int)message_timeout_ms/1000); crm_err("No messages received in %d seconds", (int)message_timeout_ms/1000); exit(-1); } static void resource_ipc_connection_destroy(gpointer user_data) { crm_info("Connection to CRMd was terminated"); exit(1); } static void start_mainloop(void) { mainloop = g_main_new(FALSE); crmd_replies_needed++; /* The welcome message */ fprintf(stderr, "Waiting for %d replies from the CRMd", crmd_replies_needed); crm_debug("Waiting for %d replies from the CRMd", crmd_replies_needed); g_timeout_add(message_timeout_ms, resource_ipc_timeout, NULL); g_main_run(mainloop); } static gboolean resource_ipc_callback(IPC_Channel * server, void *private_data) { int lpc = 0; xmlNode *msg = NULL; gboolean stay_connected = TRUE; while(IPC_ISRCONN(server)) { if(server->ops->is_message_pending(server) == 0) { break; } msg = xmlfromIPC(server, MAX_IPC_DELAY); if (msg == NULL) { break; } lpc++; fprintf(stderr, "."); crm_log_xml(LOG_DEBUG_2, "[inbound]", msg); crmd_replies_needed--; if(crmd_replies_needed == 0) { fprintf(stderr, " OK\n"); crm_debug("Got all the replies we expected"); crm_xml_cleanup(); exit(0); } free_xml(msg); msg = NULL; if(server->ch_status != IPC_CONNECT) { break; } } crm_debug_2("Processed %d messages (%d)", lpc, server->ch_status); if (server->ch_status != IPC_CONNECT) { stay_connected = FALSE; } return stay_connected; } static int do_find_resource(const char *rsc, resource_t *the_rsc, pe_working_set_t *data_set) { int found = 0; GListPtr lpc = NULL; if(the_rsc == NULL) { the_rsc = pe_find_resource(data_set->resources, rsc); } if(the_rsc == NULL) { return cib_NOTEXISTS; } if(the_rsc->variant > pe_clone) { GListPtr gIter = the_rsc->children; for(; gIter != NULL; gIter = gIter->next) { found += do_find_resource(rsc, gIter->data, data_set); } return found; } for(lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) { node_t *node = (node_t*)lpc->data; crm_debug_3("resource %s is running on: %s", rsc, node->details->uname); if(BE_QUIET) { fprintf(stdout, "%s\n", node->details->uname); } else { const char *state = ""; if(the_rsc->variant == pe_native && the_rsc->role == RSC_ROLE_MASTER) { state = "Master"; } fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state); } found++; } if(BE_QUIET == FALSE && found == 0) { fprintf(stderr, "resource %s is NOT running\n", rsc); } return 0; } #define cons_string(x) x?x:"NA" static void print_cts_constraints(pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; xmlNode *lifetime = NULL; xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); for(xml_obj = __xml_first_child(cib_constraints); xml_obj != NULL; xml_obj = __xml_next(xml_obj)) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); if(id == NULL) { continue; } lifetime = first_named_child(xml_obj, "lifetime"); if(test_ruleset(lifetime, NULL, data_set->now) == FALSE) { continue; } if(safe_str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj))) { printf("Constraint %s %s %s %s %s %s %s\n", crm_element_name(xml_obj), cons_string(crm_element_value(xml_obj, XML_ATTR_ID)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)), cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE))); } else if(safe_str_eq(XML_CONS_TAG_RSC_LOCATION, crm_element_name(xml_obj))) { /* unpack_rsc_location(xml_obj, data_set); */ } } } static void print_cts_rsc(resource_t *rsc) { GListPtr lpc = NULL; const char *host = NULL; gboolean needs_quorum = TRUE; const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(safe_str_eq(rclass, "stonith")) { xmlNode *op = NULL; needs_quorum = FALSE; for(op = __xml_first_child(rsc->ops_xml); op != NULL; op = __xml_next(op)) { if(crm_str_eq((const char *)op->name, "op", TRUE)) { const char *name = crm_element_value(op, "name"); if(safe_str_neq(name, CRMD_ACTION_START)) { const char *value = crm_element_value(op, "requires"); if(safe_str_eq(value, "nothing")) { needs_quorum = FALSE; } break; } } } } if(rsc->running_on != NULL && g_list_length(rsc->running_on) == 1) { node_t *tmp = rsc->running_on->data; host = tmp->details->uname; } printf("Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx\n", crm_element_name(rsc->xml), rsc->id, rsc->clone_name?rsc->clone_name:rsc->id, rsc->parent?rsc->parent->id:"NA", rprov?rprov:"NA", rclass, rtype, host?host:"NA", needs_quorum, rsc->flags, rsc->flags); for(lpc = rsc->children; lpc != NULL; lpc = lpc->next) { resource_t *child = (resource_t*)lpc->data; print_cts_rsc(child); } } static void print_raw_rsc(resource_t *rsc) { GListPtr lpc = NULL; GListPtr children = rsc->children; if(children == NULL) { printf("%s\n", rsc->id); } for(lpc = children; lpc != NULL; lpc = lpc->next) { resource_t *child = (resource_t*)lpc->data; print_raw_rsc(child); } } static int do_find_resource_list(pe_working_set_t *data_set, gboolean raw) { int found = 0; GListPtr lpc = NULL; for(lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t*)lpc->data; if(is_set(rsc->flags, pe_rsc_orphan) && rsc->fns->active(rsc, TRUE) == FALSE) { continue; } rsc->fns->print( rsc, NULL, pe_print_printf|pe_print_rsconly, stdout); found++; } if(found == 0) { printf("NO resources configured\n"); return cib_NOTEXISTS; } return 0; } static resource_t *find_rsc_or_clone(const char *rsc, pe_working_set_t *data_set) { resource_t *the_rsc = pe_find_resource(data_set->resources, rsc); if(the_rsc == NULL) { char *as_clone = crm_concat(rsc, "0", ':'); the_rsc = pe_find_resource(data_set->resources, as_clone); crm_free(as_clone); } return the_rsc; } static int dump_resource(const char *rsc, pe_working_set_t *data_set) { char *rsc_xml = NULL; resource_t *the_rsc = find_rsc_or_clone(rsc, data_set); if(the_rsc == NULL) { return cib_NOTEXISTS; } the_rsc->fns->print(the_rsc, NULL, pe_print_printf, stdout); rsc_xml = dump_xml_formatted(the_rsc->xml); fprintf(stdout, "raw xml:\n%s\n", rsc_xml); crm_free(rsc_xml); return 0; } static int dump_resource_attr( const char *rsc, const char *attr, pe_working_set_t *data_set) { int rc = cib_NOTEXISTS; node_t *current = NULL; GHashTable *params = NULL; resource_t *the_rsc = find_rsc_or_clone(rsc, data_set); const char *value = NULL; if(the_rsc == NULL) { return cib_NOTEXISTS; } if(g_list_length(the_rsc->running_on) == 1) { current = the_rsc->running_on->data; } else if(g_list_length(the_rsc->running_on) > 1) { CMD_ERR("%s is active on more than one node," " returning the default value for %s\n", the_rsc->id, crm_str(value)); } params = g_hash_table_new_full( crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); if(safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { get_rsc_attributes(params, the_rsc, current, data_set); } else if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { get_meta_attributes(params, the_rsc, current, data_set); } else { unpack_instance_attributes(data_set->input, the_rsc->xml, XML_TAG_UTILIZATION, NULL, params, NULL, FALSE, data_set->now); } crm_debug("Looking up %s in %s", attr, the_rsc->id); value = g_hash_table_lookup(params, attr); if(value != NULL) { fprintf(stdout, "%s\n", value); rc = 0; } g_hash_table_destroy(params); return rc; } static int find_resource_attr( cib_t *the_cib, const char *attr, const char *rsc, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int offset = 0; static int xpath_max = 1024; enum cib_errors rc = cib_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; CRM_ASSERT(value != NULL); *value = NULL; crm_malloc0(xpath_string, xpath_max); offset += snprintf(xpath_string + offset, xpath_max - offset, "%s", get_object_path("resources")); offset += snprintf(xpath_string + offset, xpath_max - offset, "//*[@id=\"%s\"]", rsc); if(set_type) { offset += snprintf(xpath_string + offset, xpath_max - offset, "/%s", set_type); if(set_name) { offset += snprintf(xpath_string + offset, xpath_max - offset, "[@id=\"%s\"]", set_name); } } offset += snprintf(xpath_string + offset, xpath_max - offset, "//nvpair["); if(attr_id) { offset += snprintf(xpath_string + offset, xpath_max - offset, "@id=\"%s\"", attr_id); } if(attr_name) { if(attr_id) { offset += snprintf(xpath_string + offset, xpath_max - offset, " and "); } offset += snprintf(xpath_string + offset, xpath_max - offset, "@name=\"%s\"", attr_name); } offset += snprintf(xpath_string + offset, xpath_max - offset, "]"); rc = the_cib->cmds->query( the_cib, xpath_string, &xml_search, cib_sync_call|cib_scope_local|cib_xpath); if(rc != cib_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if(xml_has_children(xml_search)) { xmlNode *child = NULL; rc = cib_missing_data; printf("Multiple attributes match name=%s\n", attr_name); for(child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) { printf(" Value: %s \t(id=%s)\n", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } } else { const char *tmp = crm_element_value(xml_search, attr); if(tmp) { *value = crm_strdup(tmp); } } bail: crm_free(xpath_string); free_xml(xml_search); return rc; } static int set_resource_attr(const char *rsc_id, const char *attr_set, const char *attr_id, const char *attr_name, const char *attr_value, cib_t *cib, pe_working_set_t *data_set) { int rc = cib_ok; char *local_attr_id = NULL; char *local_attr_set = NULL; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; gboolean use_attributes_tag = FALSE; resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); if(rsc == NULL) { return cib_NOTEXISTS; } if(safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { rc = find_resource_attr( cib, XML_ATTR_ID, rsc_id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &local_attr_id); if(rc == cib_ok) { printf("WARNING: There is already a meta attribute called %s (id=%s)\n", attr_name, local_attr_id); } } rc = find_resource_attr( cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == cib_ok) { crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); attr_id = local_attr_id; } else if(rc != cib_NOTEXISTS) { crm_free(local_attr_id); return rc; } else { const char *value = NULL; xmlNode *cib_top = NULL; const char *tag = crm_element_name(rsc->xml); rc = cib->cmds->query(cib, "/cib", &cib_top, cib_sync_call|cib_scope_local|cib_xpath|cib_no_children); value = crm_element_value(cib_top, "ignore_dtd"); if(value != NULL) { use_attributes_tag = TRUE; } else { value = crm_element_value(cib_top, XML_ATTR_VALIDATION); if(value && strstr(value, "-0.6")) { use_attributes_tag = TRUE; } } free_xml(cib_top); if(attr_set == NULL) { local_attr_set = crm_concat(rsc_id, attr_set_type, '-'); attr_set = local_attr_set; } if(attr_id == NULL) { local_attr_id = crm_concat(attr_set, attr_name, '-'); attr_id = local_attr_id; } if(use_attributes_tag && safe_str_eq(tag, XML_CIB_TAG_MASTER)) { tag = "master_slave"; /* use the old name */ } xml_top = create_xml_node(NULL, tag); crm_xml_add(xml_top, XML_ATTR_ID, rsc_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); if(use_attributes_tag) { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTRS); } } xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); if(xml_top == NULL) { xml_top = xml_obj; } crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, attr_value); crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); free_xml(xml_top); crm_free(local_attr_id); crm_free(local_attr_set); return rc; } static int delete_resource_attr( const char *rsc_id, const char *attr_set, const char *attr_id, const char *attr_name, cib_t *cib, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; int rc = cib_ok; char *local_attr_id = NULL; resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); if(rsc == NULL) { return cib_NOTEXISTS; } rc = find_resource_attr( cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == cib_NOTEXISTS) { return cib_ok; } else if(rc != cib_ok) { return rc; } if(attr_id == NULL) { attr_id = local_attr_id; } xml_obj = create_xml_node(NULL, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_log_xml_debug(xml_obj, "Delete"); rc = cib->cmds->delete(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); if(rc == cib_ok) { printf("Deleted %s option: id=%s%s%s%s%s\n", rsc_id, local_attr_id, attr_set?" set=":"", attr_set?attr_set:"", attr_name?" name=":"", attr_name?attr_name:""); } free_xml(xml_obj); crm_free(local_attr_id); return rc; } static int dump_resource_prop( const char *rsc, const char *attr, pe_working_set_t *data_set) { const char *value = NULL; resource_t *the_rsc = pe_find_resource(data_set->resources, rsc); if(the_rsc == NULL) { return cib_NOTEXISTS; } value = crm_element_value(the_rsc->xml, attr); if(value != NULL) { fprintf(stdout, "%s\n", value); return 0; } return cib_NOTEXISTS; } static int send_lrm_rsc_op(IPC_Channel *crmd_channel, const char *op, const char *host_uname, const char *rsc_id, gboolean only_failed, pe_working_set_t *data_set) { char *key = NULL; int rc = cib_send_failed; xmlNode *cmd = NULL; xmlNode *xml_rsc = NULL; const char *value = NULL; xmlNode *params = NULL; xmlNode *msg_data = NULL; resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if(rsc == NULL) { CMD_ERR("Resource %s not found\n", rsc_id); return cib_NOTEXISTS; } else if(rsc->variant != pe_native) { CMD_ERR("We can only process primitive resources, not %s\n", rsc_id); return cib_invalid_argument; } else if(host_uname == NULL) { CMD_ERR("Please supply a hostname with -H\n"); return cib_invalid_argument; } key = crm_concat("0:0:crm-resource", our_pid, '-'); msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); crm_free(key); xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); if(rsc->clone_name) { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id); } else { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->long_name); } value = crm_element_value(rsc->xml, XML_ATTR_TYPE); crm_xml_add(xml_rsc, XML_ATTR_TYPE, value); if(value == NULL) { CMD_ERR("%s has no type! Aborting...\n", rsc_id); return cib_NOTEXISTS; } value = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, value); if(value == NULL) { CMD_ERR("%s has no class! Aborting...\n", rsc_id); return cib_NOTEXISTS; } value = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, value); params = create_xml_node(msg_data, XML_TAG_ATTRS); crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); key = crm_meta_name(XML_LRM_ATTR_INTERVAL); crm_xml_add(params, key, "60000"); /* 1 minute */ crm_free(key); cmd = create_request(op, msg_data, host_uname, CRM_SYSTEM_CRMD, crm_system_name, our_pid); /* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */ free_xml(msg_data); if(send_ipc_message(crmd_channel, cmd)) { rc = 0; } else { CMD_ERR("Could not send %s op to the crmd", op); rc = cib_connection; } free_xml(cmd); return rc; } static int delete_lrm_rsc(IPC_Channel *crmd_channel, const char *host_uname, resource_t *rsc, pe_working_set_t *data_set) { int rc = cib_ok; if(rsc == NULL) { return cib_NOTEXISTS; } else if(rsc->children) { GListPtr lpc = NULL; for(lpc = rsc->children; lpc != NULL; lpc = lpc->next) { resource_t *child = (resource_t*)lpc->data; delete_lrm_rsc(crmd_channel, host_uname, child, data_set); } return cib_ok; } else if(host_uname == NULL) { GListPtr lpc = NULL; for(lpc = data_set->nodes; lpc != NULL; lpc = lpc->next) { node_t *node = (node_t*)lpc->data; if(node->details->online) { delete_lrm_rsc(crmd_channel, node->details->uname, rsc, data_set); } } return cib_ok; } printf("Cleaning up %s on %s\n", rsc->id, host_uname); rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id, TRUE, data_set); if(rc == cib_ok) { char *attr_name = NULL; const char *id = rsc->id; crmd_replies_needed++; if(rsc->clone_name) { id = rsc->clone_name; } attr_name = crm_concat("fail-count", id, '-'); attrd_lazy_update('D', host_uname, attr_name, NULL, XML_CIB_TAG_STATUS, NULL, NULL); crm_free(attr_name); } return rc; } static int fail_lrm_rsc(IPC_Channel *crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { crm_warn("Failing: %s", rsc_id); #if HAVE_STRUCT_LRM_OPS_FAIL_RSC crmd_replies_needed++; #endif return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set); } static int refresh_lrm(IPC_Channel *crmd_channel, const char *host_uname) { xmlNode *cmd = NULL; int rc = cib_send_failed; cmd = create_request(CRM_OP_LRM_REFRESH, NULL, host_uname, CRM_SYSTEM_CRMD, crm_system_name, our_pid); if(send_ipc_message(crmd_channel, cmd)) { rc = 0; } free_xml(cmd); return rc; } static int move_resource( const char *rsc_id, const char *existing_node, const char *preferred_node, cib_t * cib_conn) { char *later_s = NULL; enum cib_errors rc = cib_ok; char *id = NULL; xmlNode *rule = NULL; xmlNode *expr = NULL; xmlNode *constraints = NULL; xmlNode *fragment = NULL; xmlNode *can_run = NULL; xmlNode *dont_run = NULL; fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); constraints = fragment; id = crm_concat("cli-prefer", rsc_id, '-'); can_run = create_xml_node(NULL, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(can_run, XML_ATTR_ID, id); crm_free(id); id = crm_concat("cli-standby", rsc_id, '-'); dont_run = create_xml_node(NULL, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(dont_run, XML_ATTR_ID, id); crm_free(id); if(move_lifetime) { char *life = crm_strdup(move_lifetime); char *life_mutable = life; ha_time_t *now = NULL; ha_time_t *later = NULL; ha_time_t *duration = parse_time_duration(&life_mutable); if(duration == NULL) { CMD_ERR("Invalid duration specified: %s\n", move_lifetime); CMD_ERR("Please refer to" " http://en.wikipedia.org/wiki/ISO_8601#Duration" " for examples of valid durations\n"); crm_free(life); return cib_invalid_argument; } now = new_ha_date(TRUE); later = add_time(now, duration); log_date(LOG_INFO, "now ", now, ha_log_date|ha_log_time); log_date(LOG_INFO, "later ", later, ha_log_date|ha_log_time); log_date(LOG_INFO, "duration", duration, ha_log_date|ha_log_time|ha_log_local); later_s = date_to_string(later, ha_log_date|ha_log_time); printf("Migration will take effect until: %s\n", later_s); free_ha_date(duration); free_ha_date(later); free_ha_date(now); crm_free(life); } if(existing_node == NULL) { crm_log_xml_notice(can_run, "Deleting"); rc = cib_conn->cmds->delete( cib_conn, XML_CIB_TAG_CONSTRAINTS, dont_run, cib_options); if(rc == cib_NOTEXISTS) { rc = cib_ok; } else if(rc != cib_ok) { goto bail; } } else { if(BE_QUIET == FALSE) { fprintf(stderr, "WARNING: Creating rsc_location constraint '%s'" " with a score of -INFINITY for resource %s" " on %s.\n", ID(dont_run), rsc_id, existing_node); CMD_ERR("\tThis will prevent %s from running" " on %s until the constraint is removed using" " the 'crm_resource -U' command or manually" " with cibadmin\n", rsc_id, existing_node); CMD_ERR("\tThis will be the case even if %s is" " the last node in the cluster\n", existing_node); CMD_ERR("\tThis message can be disabled with -Q\n"); } crm_xml_add(dont_run, "rsc", rsc_id); rule = create_xml_node(dont_run, XML_TAG_RULE); expr = create_xml_node(rule, XML_TAG_EXPRESSION); id = crm_concat("cli-standby-rule", rsc_id, '-'); crm_xml_add(rule, XML_ATTR_ID, id); crm_free(id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, MINUS_INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); id = crm_concat("cli-standby-expr", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, "#uname"); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, existing_node); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); if(later_s) { expr = create_xml_node(rule, "date_expression"); id = crm_concat("cli-standby-lifetime-end",rsc_id,'-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } add_node_copy(constraints, dont_run); } if(preferred_node == NULL) { crm_log_xml_notice(can_run, "Deleting"); rc = cib_conn->cmds->delete( cib_conn, XML_CIB_TAG_CONSTRAINTS, can_run, cib_options); if(rc == cib_NOTEXISTS) { rc = cib_ok; } else if(rc != cib_ok) { goto bail; } } else { crm_xml_add(can_run, "rsc", rsc_id); rule = create_xml_node(can_run, XML_TAG_RULE); expr = create_xml_node(rule, XML_TAG_EXPRESSION); id = crm_concat("cli-prefer-rule", rsc_id, '-'); crm_xml_add(rule, XML_ATTR_ID, id); crm_free(id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); id = crm_concat("cli-prefer-expr", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, "#uname"); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, preferred_node); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); if(later_s) { expr = create_xml_node(rule, "date_expression"); id = crm_concat("cli-prefer-lifetime-end", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } add_node_copy(constraints, can_run); } if(preferred_node != NULL || existing_node != NULL) { crm_log_xml_notice(fragment, "CLI Update"); rc = cib_conn->cmds->update( cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); } bail: free_xml(fragment); free_xml(dont_run); free_xml(can_run); crm_free(later_s); return rc; } static int list_resource_operations( const char *rsc_id, const char *host_uname, gboolean active, pe_working_set_t *data_set) { resource_t *rsc = NULL; int opts = pe_print_printf|pe_print_rsconly|pe_print_suppres_nl; GListPtr ops = find_operations(rsc_id, host_uname, active, data_set); GListPtr lpc = NULL; for(lpc = ops; lpc != NULL; lpc = lpc->next) { xmlNode *xml_op = (xmlNode*)lpc->data; const char *op_rsc = crm_element_value(xml_op, "resource"); const char *last = crm_element_value(xml_op, "last_run"); const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); + const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); int status = crm_parse_int(status_s, "0"); rsc = pe_find_resource(data_set->resources, op_rsc); rsc->fns->print(rsc, "", opts, stdout); fprintf(stdout, ": %s (node=%s, call=%s, rc=%s", - ID(xml_op), + op_key?op_key:ID(xml_op), crm_element_value(xml_op, XML_ATTR_UNAME), crm_element_value(xml_op, XML_LRM_ATTR_CALLID), crm_element_value(xml_op, XML_LRM_ATTR_RC)); if(last) { time_t run_at = crm_parse_int(last, "0"); fprintf(stdout, ", last-run=%s, exec=%sms\n", ctime(&run_at), crm_element_value(xml_op, "exec_time")); } fprintf(stdout, "): %s\n", op_status2text(status)); } return cib_ok; } #include "../pengine/pengine.h" static void show_location(resource_t *rsc, const char *prefix) { GListPtr lpc = NULL; GListPtr list = rsc->rsc_location; int offset = 0; if(prefix) { offset = strlen(prefix) - 2; } for(lpc = list; lpc != NULL; lpc = lpc->next) { rsc_to_node_t *cons = (rsc_to_node_t*)lpc->data; GListPtr lpc2 = NULL; for(lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { node_t *node = (node_t*)lpc2->data; char *score = score2char(node->weight); fprintf(stdout, "%s: Node %-*s (score=%s, id=%s)\n", prefix?prefix:" ", 71-offset, node->details->uname, score, cons->id); crm_free(score); } } } static void show_colocation(resource_t *rsc, gboolean dependants, gboolean recursive, int offset) { char *prefix = NULL; GListPtr lpc = NULL; GListPtr list = rsc->rsc_cons; crm_malloc0(prefix, (offset*4) + 1); memset(prefix, ' ', offset*4); if(dependants) { list = rsc->rsc_cons_lhs; } if(is_set(rsc->flags, pe_rsc_allocating)) { /* Break colocation loops */ printf("loop %s\n", rsc->id); crm_free(prefix); return; } set_bit(rsc->flags, pe_rsc_allocating); for(lpc = list; lpc != NULL; lpc = lpc->next) { rsc_colocation_t *cons = (rsc_colocation_t*)lpc->data; char *score = NULL; resource_t *peer = cons->rsc_rh; if(dependants) { peer = cons->rsc_lh; } if(is_set(peer->flags, pe_rsc_allocating)) { if(dependants == FALSE) { fprintf(stdout, "%s%-*s (id=%s - loop)\n", prefix, 80-(4*offset), peer->id, cons->id); } continue; } if(dependants && recursive) { show_colocation(peer, dependants, recursive, offset+1); } score = score2char(cons->score); if(cons->role_rh > RSC_ROLE_STARTED) { fprintf(stdout, "%s%-*s (score=%s, %s role=%s, id=%s)\n", prefix, 80-(4*offset), peer->id, score, dependants?"needs":"with", role2text(cons->role_rh), cons->id); } else { fprintf(stdout, "%s%-*s (score=%s, id=%s)\n", prefix, 80-(4*offset), peer->id, score, cons->id); } show_location(peer, prefix); crm_free(score); if(!dependants && recursive) { show_colocation(peer, dependants, recursive, offset+1); } } crm_free(prefix); } static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\t\tThis text"}, {"version", 0, 0, '$', "\t\tVersion information" }, {"verbose", 0, 0, 'V', "\t\tIncrease debug output"}, {"quiet", 0, 0, 'Q', "\t\tPrint only the value on stdout\n"}, {"resource", 1, 0, 'r', "\tResource ID" }, {"-spacer-",1, 0, '-', "\nQueries:"}, {"list", 0, 0, 'L', "\t\tList all resources"}, {"list-raw", 0, 0, 'l', "\tList the IDs of all instantiated resources (no groups/clones/...)"}, {"list-cts", 0, 0, 'c', NULL, 1}, {"list-operations", 0, 0, 'O', "\tList active resource operations. Optionally filtered by resource (-r) and/or node (-N)"}, {"list-all-operations", 0, 0, 'o', "List all resource operations. Optionally filtered by resource (-r) and/or node (-N)\n"}, {"query-xml", 0, 0, 'q', "\tQuery the definition of a resource"}, {"locate", 0, 0, 'W', "\t\tDisplay the current location(s) of a resource"}, {"stack", 0, 0, 'A', "\t\tDisplay the prerequisites and dependents of a resource"}, {"constraints",0, 0, 'a', "\tDisplay the (co)location constraints that apply to a resource"}, {"-spacer-", 1, 0, '-', "\nCommands:"}, {"set-parameter", 1, 0, 'p', "Set the named parameter for a resource. See also -m, --meta"}, {"get-parameter", 1, 0, 'g', "Display the named parameter for a resource. See also -m, --meta"}, {"delete-parameter",1, 0, 'd', "Delete the named parameter for a resource. See also -m, --meta"}, {"get-property", 1, 0, 'G', "Display the 'class', 'type' or 'provider' of a resource", 1}, {"set-property", 1, 0, 'S', "(Advanced) Set the class, type or provider of a resource", 1}, {"move", 0, 0, 'M', "\t\tMove a resource from its current location, optionally specifying a destination (-N) and/or a period for which it should take effect (-u)" "\n\t\t\t\tIf -N is not specified, the cluster will force the resource to move by creating a rule for the current location and a score of -INFINITY" "\n\t\t\t\tNOTE: This will prevent the resource from running on this node until the constraint is removed with -U"}, {"un-move", 0, 0, 'U', "\tRemove all constraints created by a move command"}, {"-spacer-", 1, 0, '-', "\nAdvanced Commands:"}, {"delete", 0, 0, 'D', "\t\tDelete a resource from the CIB"}, {"fail", 0, 0, 'F', "\t\tTell the cluster this resource has failed"}, {"refresh", 0, 0, 'R', "\t\t(Advanced) Refresh the CIB from the LRM"}, {"cleanup", 0, 0, 'C', "\t\t(Advanced) Delete a resource from the LRM"}, {"reprobe", 0, 0, 'P', "\t\t(Advanced) Re-check for resources started outside of the CRM\n"}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"node", 1, 0, 'N', "\tHost uname"}, {"resource-type", 1, 0, 't', "Resource type (primitive, clone, group, ...)"}, {"parameter-value", 1, 0, 'v', "Value to use with -p, -g or -d"}, {"lifetime", 1, 0, 'u', "\tLifespan of migration constraints\n"}, {"meta", 0, 0, 'm', "\t\tModify a resource's configuration option rather than one which is passed to the resource agent script. For use with -p, -g, -d"}, {"utilization", 0, 0, 'z', "\tModify a resource's utilization attribute. For use with -p, -g, -d"}, {"set-name", 1, 0, 's', "\t(Advanced) ID of the instance_attributes object to change"}, {"nvpair", 1, 0, 'i', "\t(Advanced) ID of the nvpair object to change/delete"}, {"force", 0, 0, 'f', "\n" /* Is this actually true anymore? "\t\tForce the resource to move by creating a rule for the current location and a score of -INFINITY" "\n\t\tThis should be used if the resource's stickiness and constraint scores total more than INFINITY (Currently 100,000)" "\n\t\tNOTE: This will prevent the resource from running on this node until the constraint is removed with -U or the --lifetime duration expires\n"*/ }, {"xml-file", 1, 0, 'x', NULL, 1},\ /* legacy options */ {"host-uname", 1, 0, 'H', NULL, 1}, {"migrate", 0, 0, 'M', NULL, 1}, {"un-migrate", 0, 0, 'U', NULL, 1}, {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "List the configured resources:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --list", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the current location of 'myResource':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --locate", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Move 'myResource' to another machine:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Move 'myResource' to a specific machine:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --un-move", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Tell the cluster that 'myResource' failed:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --fail", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Stop a 'myResource' (and anything that depends on it):", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "The cluster will not attempt to start or stop the resource under any circumstances."}, {"-spacer-", 1, 0, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."}, {"-spacer-", 1, 0, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example}, {0, 0, 0, 0} }; int main(int argc, char **argv) { pe_working_set_t data_set; xmlNode *cib_xml_copy = NULL; cib_t * cib_conn = NULL; enum cib_errors rc = cib_ok; gboolean need_cib = TRUE; int option_index = 0; int argerr = 0; int flag; crm_log_init(NULL, LOG_ERR, FALSE, FALSE, argc, argv); crm_set_options("V?$LRQDCPp:WMUr:H:h:v:t:p:g:d:i:s:G:S:fx:lmzu:FOocqN:aA", "(query|command) [options]", long_options, "Perform tasks related to cluster resources.\n Allows resources to be queried (definition and location), modified, and moved around the cluster.\n"); if(argc < 2) { crm_help('?', LSB_EXIT_EINVAL); } while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch(flag) { case 'V': cl_log_enable_stderr(TRUE); alter_debug(DEBUG_INC); break; case '$': case '?': crm_help(flag, LSB_EXIT_OK); break; case 'x': xml_file = crm_strdup(optarg); break; case 'Q': BE_QUIET = TRUE; break; case 'm': attr_set_type = XML_TAG_META_SETS; break; case 'z': attr_set_type = XML_TAG_UTILIZATION; break; case 'u': move_lifetime = crm_strdup(optarg); break; case 'f': do_force = TRUE; break; case 'i': prop_id = optarg; break; case 's': prop_set = optarg; break; case 'r': rsc_id = optarg; break; case 'v': prop_value = optarg; break; case 't': rsc_type = optarg; break; case 'R': case 'P': need_cib = FALSE; rsc_cmd = flag; break; case 'L': case 'c': case 'l': case 'q': case 'D': case 'F': case 'C': case 'W': case 'M': case 'U': case 'O': case 'o': case 'A': case 'a': rsc_cmd = flag; break; case 'p': case 'g': case 'd': case 'S': case 'G': prop_name = optarg; rsc_cmd = flag; break; case 'h': case 'H': case 'N': crm_debug_2("Option %c => %s", flag, optarg); host_uname = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc && argv[optind] != NULL) { CMD_ERR("non-option ARGV-elements: "); while (optind < argc && argv[optind] != NULL) { CMD_ERR("%s ", argv[optind++]); ++argerr; } CMD_ERR("\n"); } if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', LSB_EXIT_GENERIC); } crm_malloc0(our_pid, 11); if(our_pid != NULL) { snprintf(our_pid, 10, "%d", getpid()); our_pid[10] = '\0'; } if(do_force) { crm_debug("Forcing..."); cib_options |= cib_quorum_override; } set_working_set_defaults(&data_set); if(need_cib) { resource_t *rsc = NULL; if(xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { cib_conn = cib_new(); rc = cib_conn->cmds->signon( cib_conn, crm_system_name, cib_command); if(rc != cib_ok) { CMD_ERR("Error signing on to the CIB service: %s\n", cib_error2string(rc)); return rc; } cib_xml_copy = get_cib_copy(cib_conn); } if(cli_config_update(&cib_xml_copy, NULL, FALSE) == FALSE) { rc = cib_STALE; goto bail; } data_set.input = cib_xml_copy; data_set.now = new_ha_date(TRUE); cluster_status(&data_set); if(rsc_id) { rsc = find_rsc_or_clone(rsc_id, &data_set); } if(rsc == NULL) { rc = cib_NOTEXISTS; } } if(rsc_cmd == 'R' || rsc_cmd == 'C' || rsc_cmd == 'F' || rsc_cmd == 'P') { GCHSource *src = NULL; src = init_client_ipc_comms(CRM_SYSTEM_CRMD, resource_ipc_callback, NULL, &crmd_channel); if(src == NULL) { CMD_ERR("Error signing on to the CRMd service\n"); rc = cib_connection; goto bail; } send_hello_message( crmd_channel, our_pid, crm_system_name, "0", "1"); set_IPC_Channel_dnotify(src, resource_ipc_connection_destroy); } if(rsc_cmd == 'L') { rc = cib_ok; do_find_resource_list(&data_set, FALSE); } else if(rsc_cmd == 'l') { int found = 0; GListPtr lpc = NULL; rc = cib_ok; for(lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t*)lpc->data; found++; print_raw_rsc(rsc); } if(found == 0) { printf("NO resources configured\n"); rc = cib_NOTEXISTS; goto bail; } } else if(rsc_cmd == 'A' || rsc_cmd == 'a') { GListPtr lpc = NULL; resource_t *rsc = pe_find_resource(data_set.resources, rsc_id); xmlNode * cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input); if(rsc == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } unpack_constraints(cib_constraints, &data_set); for(lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t*)lpc->data; clear_bit(r->flags, pe_rsc_allocating); } show_colocation(rsc, TRUE, rsc_cmd=='A', 1); fprintf(stdout, "* %s\n", rsc->id); show_location(rsc, NULL); for(lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t*)lpc->data; clear_bit(r->flags, pe_rsc_allocating); } show_colocation(rsc, FALSE, rsc_cmd=='A', 1); } else if(rsc_cmd == 'c') { int found = 0; GListPtr lpc = NULL; rc = cib_ok; for(lpc = data_set.resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t*)lpc->data; print_cts_rsc(rsc); found++; } print_cts_constraints(&data_set); } else if(rsc_cmd == 'C') { resource_t *rsc = pe_find_resource(data_set.resources, rsc_id); rc = delete_lrm_rsc(crmd_channel, host_uname, rsc, &data_set); if(rc == cib_ok) { start_mainloop(); } } else if(rsc_cmd == 'F') { rc = fail_lrm_rsc(crmd_channel, host_uname, rsc_id, &data_set); if(rc == cib_ok) { start_mainloop(); } } else if(rsc_cmd == 'O') { rc = list_resource_operations(rsc_id, host_uname, TRUE, &data_set); } else if(rsc_cmd == 'o') { rc = list_resource_operations(rsc_id, host_uname, FALSE, &data_set); } else if(rc == cib_NOTEXISTS) { CMD_ERR("Resource %s not found: %s\n", crm_str(rsc_id), cib_error2string(rc)); } else if(rsc_cmd == 'W') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } rc = do_find_resource(rsc_id, NULL, &data_set); } else if(rsc_cmd == 'q') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } rc = dump_resource(rsc_id, &data_set); } else if(rsc_cmd == 'U') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } rc = move_resource(rsc_id, NULL, NULL, cib_conn); } else if(rsc_cmd == 'M') { node_t *dest = NULL; node_t *current = NULL; const char *current_uname = NULL; resource_t *rsc = pe_find_resource(data_set.resources, rsc_id); if(rsc != NULL && rsc->running_on != NULL) { current = rsc->running_on->data; if(current != NULL) { current_uname = current->details->uname; } } if(host_uname != NULL) { dest = pe_find_node(data_set.nodes, host_uname); } if(rsc == NULL) { CMD_ERR("Resource %s not moved:" " not found\n", rsc_id); } else if(rsc->variant == pe_native && g_list_length(rsc->running_on) > 1) { CMD_ERR("Resource %s not moved:" " active on multiple nodes\n", rsc_id); } else if(host_uname != NULL && dest == NULL) { CMD_ERR("Error performing operation: " "%s is not a known node\n", host_uname); rc = cib_NOTEXISTS; } else if(host_uname != NULL && safe_str_eq(current_uname, host_uname)) { CMD_ERR("Error performing operation: " "%s is already active on %s\n", rsc_id, host_uname); } else if(current_uname != NULL && (do_force || host_uname == NULL)) { rc = move_resource(rsc_id, current_uname, host_uname, cib_conn); } else if(host_uname != NULL) { rc = move_resource( rsc_id, NULL, host_uname, cib_conn); } else { CMD_ERR("Resource %s not moved: " "not-active and no preferred location" " specified.\n", rsc_id); rc = cib_missing; } } else if(rsc_cmd == 'G') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } rc = dump_resource_prop(rsc_id, prop_name, &data_set); } else if(rsc_cmd == 'S') { xmlNode *msg_data = NULL; if(prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option\n"); rc = CIBRES_MISSING_FIELD; goto bail; } else if(cib_conn == NULL) { rc = cib_connection; goto bail; } if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } CRM_LOG_ASSERT(rsc_type != NULL); CRM_LOG_ASSERT(prop_name != NULL); CRM_LOG_ASSERT(prop_value != NULL); msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); crm_xml_add(msg_data, prop_name, prop_value); rc = cib_conn->cmds->modify( cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else if(rsc_cmd == 'g') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } rc = dump_resource_attr(rsc_id, prop_name, &data_set); } else if(rsc_cmd == 'p') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } if(prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option\n"); rc = CIBRES_MISSING_FIELD; goto bail; } rc = set_resource_attr(rsc_id, prop_set, prop_id, prop_name, prop_value, cib_conn, &data_set); } else if(rsc_cmd == 'd') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } rc = delete_resource_attr(rsc_id, prop_set, prop_id, prop_name, cib_conn, &data_set); } else if(rsc_cmd == 'P') { xmlNode *cmd = NULL; cmd = create_request(CRM_OP_REPROBE, NULL, host_uname, CRM_SYSTEM_CRMD, crm_system_name, our_pid); if(send_ipc_message(crmd_channel, cmd)) { start_mainloop(); } free_xml(cmd); } else if(rsc_cmd == 'R') { rc = refresh_lrm(crmd_channel, host_uname); if(rc == cib_ok) { start_mainloop(); } } else if(rsc_cmd == 'D') { xmlNode *msg_data = NULL; if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); rc = cib_NOTEXISTS; goto bail; } if(rsc_type == NULL) { CMD_ERR("You need to specify a resource type with -t"); rc = cib_NOTEXISTS; goto bail; } else if(cib_conn == NULL) { rc = cib_connection; goto bail; } msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); rc = cib_conn->cmds->delete( cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else { CMD_ERR("Unknown command: %c\n", rsc_cmd); } bail: if(cib_conn != NULL) { cleanup_alloc_calculations(&data_set); cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } crm_xml_cleanup(); if(rc == cib_no_quorum) { CMD_ERR("Error performing operation: %s\n", cib_error2string(rc)); CMD_ERR("Try using -f\n"); } else if(rc != cib_ok) { CMD_ERR("Error performing operation: %s\n", cib_error2string(rc)); } return rc; } diff --git a/tools/crm_ticket b/tools/crm_ticket new file mode 100755 index 0000000000..5804a08164 --- /dev/null +++ b/tools/crm_ticket @@ -0,0 +1,79 @@ +#!/bin/bash + +options="" +ticket="" +granted="" +last_granted="" + +TEMP=`getopt -o DGQVThU:v:i:t: --long help,version,ticket:,attr-value:,delete-attr,get-value,attr-id:,quiet,time \ + -n 'crm_ticket' -- "$@"` + +if [ $? != 0 ] ; then echo "crm_ticket - A convenience wrapper for crm_attribute"; echo ""; crm_attribute -?; exit 1 ; fi + +# Note the quotes around `$TEMP': they are essential! +eval set -- "$TEMP" + +while true ; do + case "$1" in + -v|--attr-value) options="$options $1 $2"; granted=$2; shift; shift;; + -i|--attr-id) options="$options $1 $2"; shift; shift;; + -Q|--quiet|-D|--delete-attr|-G|--get-value|-V) options="$options $1"; shift;; + -t|--ticket-id) ticket=$2; shift; shift;; + -T|--time) last_granted="time"; shift;; + --version) crm_attribute --version; exit 0;; + -h|--help) + echo "crm_ticket - A convenience wrapper for crm_attribute"; + echo "Grant or revoke the specified ticket for the cluster"; + echo ""; + echo "Usage: crm_ticket -t ticket_name command [options]"; + echo "Options:" + echo " -h, --help This text" + echo " --version Version information" + echo " -V, --verbose Increase debug output" + echo " -q, --quiet Print only the value on stdout" + echo "" + echo " -t, --ticket-id=value The ticket to update" + echo "" + echo "Commands:" + echo " -G, --query Query if the specified ticket is granted or not" + echo " -v, --update=value Grant/Revoke the specified ticket" + echo " -D, --delete Delete the granting/revoking record" + echo " -T, --time Query the time of last granted the specified ticket" + echo "" + echo "Additional Options:" + echo " -i, --id=value (Advanced) The ID used to identify the attribute" + exit 0;; + --) shift ; break ;; + *) echo "Unknown option: $1. See --help for details." exit 1;; + esac +done + +if [ X$last_granted != X ]; then + options="$options -n last-granted-$ticket -G -d -1" + crm_attribute $options -t tickets + rc=$? + exit $rc +else + options="$options -n granted-ticket-$ticket" +fi + +case "$granted" in + true|yes|1) + crm_attribute $options -t tickets >/dev/null 2>&1 + rc=$? + if [ $rc = 0 ]; then + options="$options -n last-granted-$ticket -v `date +%s`" + crm_attribute $options -t tickets >/dev/null 2>&1 + rc=$? + exit $rc + else + echo "Failed to grant ticket $ticket" + exit $rc + fi + ;; + *) + crm_attribute $options -t tickets -d false + rc=$? + exit $rc + ;; +esac diff --git a/tools/regression.exp b/tools/regression.exp index f266ffae43..d277c9de03 100755 --- a/tools/regression.exp +++ b/tools/regression.exp @@ -1,808 +1,969 @@ Setting up shadow instance A new shadow instance was created. To begin using it paste the following into your shell: CIB_shadow=tools-regression ; export CIB_shadow The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed. * Passed: cibadmin - Require --force for CIB erasure * Passed: cibadmin - Allow CIB erasure with --force * Passed: cibadmin - Query CIB * Passed: crm_attribute - Set cluster option * Passed: cibadmin - Query new cluster option * Passed: cibadmin - Query cluster options * Passed: cibadmin - Delete nvpair Call failed: The object already exists * Passed: cibadmin - Create operaton should fail with: -21, The object already exists * Passed: cibadmin - Modify cluster options section * Passed: cibadmin - Query updated cluster option * Passed: crm_attribute - Set duplicate cluster option Please choose from one of the matches above and suppy the 'id' with --attr-id * Passed: crm_attribute - Setting multiply defined cluster option should fail with -216, Could not set cluster option * Passed: crm_attribute - Set cluster option with -s Deleted crm_config option: id=(null) name=cluster-delay * Passed: crm_attribute - Delete cluster option with -i * Passed: cibadmin - Create node entry * Passed: cibadmin - Create node status entry * Passed: crm_attribute - Create node attribute * Passed: cibadmin - Query new node attribute Digest: * Passed: cibadmin - Digest calculation Call failed: Update was older than existing configuration * Passed: cibadmin - Replace operation should fail with: -45, Update was older than existing configuration Error performing operation: The object/attribute does not exist scope=status name=standby value=off * Passed: crm_standby - Default standby value * Passed: crm_standby - Set standby status scope=nodes name=standby value=true * Passed: crm_standby - Query standby value Deleted nodes attribute: id=nodes-clusterNode-UUID-standby name=standby * Passed: crm_standby - Delete standby value * Passed: cibadmin - Create a resource * Passed: crm_resource - Create a resource meta attribute false * Passed: crm_resource - Query a resource meta attribute Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed * Passed: crm_resource - Remove a resource meta attribute * Passed: crm_resource - Create a resource attribute dummy (ocf::pacemaker:Dummy) Stopped * Passed: crm_resource - List the configured resources * Passed: crm_resource - Set a resource's fail-count Resource dummy not moved: not-active and no preferred location specified. Error performing operation: cib object missing * Passed: crm_resource - Require a destination when migrating a resource that is stopped Error performing operation: i.dont.exist is not a known node Error performing operation: The object/attribute does not exist * Passed: crm_resource - Don't support migration to non-existant locations * Passed: crm_resource - Migrate a resource * Passed: crm_resource - Un-migrate a resource +scope=tickets name=granted-ticket-ticketA value=false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +* Passed: crm_ticket - Default granted-ticket value + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +* Passed: crm_ticket - Set granted-ticket value +scope=tickets name=granted-ticket-ticketA value=false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +* Passed: crm_ticket - Query granted-ticket value +Deleted tickets option: id=status-tickets-granted-ticket-ticketA name=granted-ticket-ticketA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +* Passed: crm_ticket - Delete granted-ticket value diff --git a/tools/regression.sh b/tools/regression.sh index 83beb5606f..3fc9fb12a3 100755 --- a/tools/regression.sh +++ b/tools/regression.sh @@ -1,196 +1,208 @@ #!/bin/bash : ${shadow=tools-regression} test_home=`dirname $0` num_errors=0 num_passed=0 GREP_OPTIONS= function assert() { rc=$1; shift target=$1; shift app=$1; shift msg=$1; shift exit_code=$1; shift cibadmin -Q if [ $rc -ne $target ]; then num_errors=`expr $num_errors + 1` printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$msg" if [ ! -z $exit_code ]; then echo "Aborting tests" exit $exit_code fi exit 1 else printf "* Passed: %-14s - %s\n" $app "$msg" num_passed=`expr $num_passed + 1` fi } function usage() { echo "Usage: ./regression.sh [-s(ave)] [-x] [-v(erbose)]" exit $1 } done=0 do_save=0 VALGRIND_CMD= while test "$done" = "0"; do case "$1" in -V|--verbose) verbose=1; shift;; -v|--valgrind) export G_SLICE=always-malloc VALGRIND_CMD="valgrind -q --show-reachable=no --leak-check=full --trace-children=no --time-stamp=yes --num-callers=20 --suppressions=$test_home/cli.supp" shift;; -x) set -x; shift;; -s) do_save=1; shift;; -p) PATH="$2:$PATH"; export PATH; shift 1;; -?) usage 0;; -*) echo "unknown option: $1"; usage 1;; *) done=1;; esac done if [ "x$VALGRIND_CMD" = "x" -a -x $test_home/crm_simulate ]; then echo "Using local binaries from: $test_home" PATH="$test_home:$PATH" fi function test_tools() { export CIB_shadow_dir=$test_home $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow export CIB_shadow=$shadow $VALGRIND_CMD cibadmin -Q $VALGRIND_CMD cibadmin -E assert $? 1 cibadmin "Require --force for CIB erasure" $VALGRIND_CMD cibadmin -E --force assert $? 0 cibadmin "Allow CIB erasure with --force" $VALGRIND_CMD cibadmin -Q > /tmp/$$.existing.xml assert $? 0 cibadmin "Query CIB" $VALGRIND_CMD crm_attribute -n cluster-delay -v 60s assert $? 0 crm_attribute "Set cluster option" $VALGRIND_CMD cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay assert $? 0 cibadmin "Query new cluster option" $VALGRIND_CMD cibadmin -Q -o crm_config > /tmp/$$.opt.xml assert $? 0 cibadmin "Query cluster options" $VALGRIND_CMD cibadmin -D -o crm_config --xml-text '' assert $? 0 cibadmin "Delete nvpair" $VALGRIND_CMD cibadmin -C -o crm_config --xml-file /tmp/$$.opt.xml assert $? 21 cibadmin "Create operaton should fail with: -21, The object already exists" $VALGRIND_CMD cibadmin -M -o crm_config --xml-file /tmp/$$.opt.xml assert $? 0 cibadmin "Modify cluster options section" $VALGRIND_CMD cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay assert $? 0 cibadmin "Query updated cluster option" $VALGRIND_CMD crm_attribute -n cluster-delay -v 40s -s duplicate assert $? 0 crm_attribute "Set duplicate cluster option" $VALGRIND_CMD crm_attribute -n cluster-delay -v 30s assert $? 216 crm_attribute "Setting multiply defined cluster option should fail with -216, Could not set cluster option" $VALGRIND_CMD crm_attribute -n cluster-delay -v 30s -s duplicate assert $? 0 crm_attribute "Set cluster option with -s" $VALGRIND_CMD crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay assert $? 0 crm_attribute "Delete cluster option with -i" $VALGRIND_CMD cibadmin -C -o nodes --xml-text '' assert $? 0 cibadmin "Create node entry" $VALGRIND_CMD cibadmin -C -o status --xml-text '' assert $? 0 cibadmin "Create node status entry" $VALGRIND_CMD crm_attribute -n ram -v 1024M -U clusterNode-UNAME -t nodes assert $? 0 crm_attribute "Create node attribute" $VALGRIND_CMD cibadmin -Q -o nodes | grep clusterNode-UUID-ram assert $? 0 cibadmin "Query new node attribute" $VALGRIND_CMD cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null assert $? 0 cibadmin "Digest calculation" # This update will fail because it has version numbers $VALGRIND_CMD cibadmin -R --xml-file /tmp/$$.existing.xml assert $? 45 cibadmin "Replace operation should fail with: -45, Update was older than existing configuration" crm_standby -N clusterNode-UNAME -G assert $? 0 crm_standby "Default standby value" crm_standby -N clusterNode-UNAME -v true assert $? 0 crm_standby "Set standby status" crm_standby -N clusterNode-UNAME -G assert $? 0 crm_standby "Query standby value" crm_standby -N clusterNode-UNAME -D assert $? 0 crm_standby "Delete standby value" $VALGRIND_CMD cibadmin -C -o resources --xml-text '' assert $? 0 cibadmin "Create a resource" $VALGRIND_CMD crm_resource -r dummy --meta -p is-managed -v false assert $? 0 crm_resource "Create a resource meta attribute" $VALGRIND_CMD crm_resource -r dummy --meta -g is-managed assert $? 0 crm_resource "Query a resource meta attribute" $VALGRIND_CMD crm_resource -r dummy --meta -d is-managed assert $? 0 crm_resource "Remove a resource meta attribute" $VALGRIND_CMD crm_resource -r dummy -p delay -v 10s assert $? 0 crm_resource "Create a resource attribute" $VALGRIND_CMD crm_resource -L assert $? 0 crm_resource "List the configured resources" crm_failcount -r dummy -v 10 -N clusterNode-UNAME assert $? 0 crm_resource "Set a resource's fail-count" $VALGRIND_CMD crm_resource -r dummy -M assert $? 244 crm_resource "Require a destination when migrating a resource that is stopped" $VALGRIND_CMD crm_resource -r dummy -M -N i.dont.exist assert $? 234 crm_resource "Don't support migration to non-existant locations" $VALGRIND_CMD crm_resource -r dummy -M -N clusterNode-UNAME assert $? 0 crm_resource "Migrate a resource" $VALGRIND_CMD crm_resource -r dummy -U assert $? 0 crm_resource "Un-migrate a resource" + + crm_ticket -t ticketA -G + assert $? 0 crm_ticket "Default granted-ticket value" + + crm_ticket -t ticketA -v false + assert $? 0 crm_ticket "Set granted-ticket value" + + crm_ticket -t ticketA -G + assert $? 0 crm_ticket "Query granted-ticket value" + + crm_ticket -t ticketA -D + assert $? 0 crm_ticket "Delete granted-ticket value" } test_tools 2>&1 | sed s/cib-last-written.*\>/\>/ > $test_home/regression.out rc=$? if [ $do_save = 1 ]; then cp $test_home/regression.out $test_home/regression.exp fi grep -e "^*" $test_home/regression.out diff -u $test_home/regression.exp $test_home/regression.out diff_rc=$? if [ $rc != 0 ]; then echo Tests failed exit 1 elif [ $diff_rc != 0 ]; then echo Tests passed but diff failed exit 2 else echo Tests passed exit 0 fi diff --git a/xml/constraints-1.1.rng b/xml/constraints-1.1.rng index 44ab338959..034028d388 100644 --- a/xml/constraints-1.1.rng +++ b/xml/constraints-1.1.rng @@ -1,202 +1,233 @@ + + + + + + + + + + + + + + + + + + + + + + stop + demote + fence + freeze + + + + + + start promote demote stop Stopped Started Master Slave Optional Mandatory Serialize