diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 7941440458..209f34cba1 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2151 +1,2151 @@
/*
* Copyright (C) 2004 Andrew Beekhof
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include
#include
#include
#include
#include
#include
#include
#include
pe_working_set_t *pe_dataset = NULL;
extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container,
pe_working_set_t * data_set);
static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key,
gboolean include_disabled);
bool pe_can_fence(pe_working_set_t * data_set, node_t *node)
{
if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
return FALSE; /* Turned off */
} else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) {
return FALSE; /* No devices */
} else if (is_set(data_set->flags, pe_flag_have_quorum)) {
return TRUE;
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
return TRUE;
} else if(node == NULL) {
return FALSE;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
return TRUE;
}
crm_trace("Cannot fence %s", node->details->uname);
return FALSE;
}
node_t *
node_copy(const node_t *this_node)
{
node_t *new_node = NULL;
CRM_CHECK(this_node != NULL, return NULL);
new_node = calloc(1, sizeof(node_t));
CRM_ASSERT(new_node != NULL);
crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed;
new_node->details = this_node->details;
return new_node;
}
/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
void
node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
{
GHashTable *result = hash;
node_t *other_node = NULL;
GListPtr gIter = list;
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
other_node = pe_find_node_id(list, node->details->id);
if (other_node == NULL) {
node->weight = -INFINITY;
} else if (merge_scores) {
node->weight = merge_weights(node->weight, other_node->weight);
}
}
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
other_node = pe_hash_table_lookup(result, node->details->id);
if (other_node == NULL) {
node_t *new_node = node_copy(node);
new_node->weight = -INFINITY;
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
}
}
GHashTable *
node_hash_from_list(GListPtr list)
{
GListPtr gIter = list;
GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
node_t *n = node_copy(node);
g_hash_table_insert(result, (gpointer) n->details->id, n);
}
return result;
}
GListPtr
node_list_dup(GListPtr list1, gboolean reset, gboolean filter)
{
GListPtr result = NULL;
GListPtr gIter = list1;
for (; gIter != NULL; gIter = gIter->next) {
node_t *new_node = NULL;
node_t *this_node = (node_t *) gIter->data;
if (filter && this_node->weight < 0) {
continue;
}
new_node = node_copy(this_node);
if (reset) {
new_node->weight = 0;
}
if (new_node != NULL) {
result = g_list_prepend(result, new_node);
}
}
return result;
}
gint
sort_node_uname(gconstpointer a, gconstpointer b)
{
const node_t *node_a = a;
const node_t *node_b = b;
return strcmp(node_a->details->uname, node_b->details->uname);
}
void
dump_node_scores_worker(int level, const char *file, const char *function, int line,
resource_t * rsc, const char *comment, GHashTable * nodes)
{
GHashTable *hash = nodes;
GHashTableIter iter;
node_t *node = NULL;
if (rsc) {
hash = rsc->allowed_nodes;
}
if (rsc && is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't show the allocation scores for orphans */
return;
}
if (level == 0) {
char score[128];
int len = sizeof(score);
/* For now we want this in sorted order to keep the regression tests happy */
GListPtr gIter = NULL;
GListPtr list = g_hash_table_get_values(hash);
list = g_list_sort(list, sort_node_uname);
gIter = list;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
/* This function is called a whole lot, use stack allocated score */
score2char_stack(node->weight, score, len);
if (rsc) {
printf("%s: %s allocation score on %s: %s\n",
comment, rsc->id, node->details->uname, score);
} else {
printf("%s: %s = %s\n", comment, node->details->uname, score);
}
}
g_list_free(list);
} else if (hash) {
char score[128];
int len = sizeof(score);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
/* This function is called a whole lot, use stack allocated score */
score2char_stack(node->weight, score, len);
if (rsc) {
do_crm_log_alias(LOG_TRACE, file, function, line,
"%s: %s allocation score on %s: %s", comment, rsc->id,
node->details->uname, score);
} else {
do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment,
node->details->uname, score);
}
}
}
if (rsc && rsc->children) {
GListPtr gIter = NULL;
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
dump_node_scores_worker(level, file, function, line, child, comment, nodes);
}
}
}
static void
append_dump_text(gpointer key, gpointer value, gpointer user_data)
{
char **dump_text = user_data;
int len = 0;
char *new_text = NULL;
len = strlen(*dump_text) + strlen(" ") + strlen(key) + strlen("=") + strlen(value) + 1;
new_text = calloc(1, len);
sprintf(new_text, "%s %s=%s", *dump_text, (char *)key, (char *)value);
free(*dump_text);
*dump_text = new_text;
}
void
dump_node_capacity(int level, const char *comment, node_t * node)
{
int len = 0;
char *dump_text = NULL;
len = strlen(comment) + strlen(": ") + strlen(node->details->uname) + strlen(" capacity:") + 1;
dump_text = calloc(1, len);
sprintf(dump_text, "%s: %s capacity:", comment, node->details->uname);
g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
if (level == 0) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
void
dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node)
{
int len = 0;
char *dump_text = NULL;
len = strlen(comment) + strlen(": ") + strlen(rsc->id) + strlen(" utilization on ")
+ strlen(node->details->uname) + strlen(":") + 1;
dump_text = calloc(1, len);
sprintf(dump_text, "%s: %s utilization on %s:", comment, rsc->id, node->details->uname);
g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
if (level == 0) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
gint
sort_rsc_index(gconstpointer a, gconstpointer b)
{
const resource_t *resource1 = (const resource_t *)a;
const resource_t *resource2 = (const resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->sort_index > resource2->sort_index) {
return -1;
}
if (resource1->sort_index < resource2->sort_index) {
return 1;
}
return 0;
}
gint
sort_rsc_priority(gconstpointer a, gconstpointer b)
{
const resource_t *resource1 = (const resource_t *)a;
const resource_t *resource2 = (const resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
action_t *
custom_action(resource_t * rsc, char *key, const char *task,
node_t * on_node, gboolean optional, gboolean save_action,
pe_working_set_t * data_set)
{
action_t *action = NULL;
GListPtr possible_matches = NULL;
CRM_CHECK(key != NULL, return NULL);
CRM_CHECK(task != NULL, free(key); return NULL);
if (save_action && rsc != NULL) {
possible_matches = find_actions(rsc->actions, key, on_node);
} else if(save_action) {
#if 0
action = g_hash_table_lookup(data_set->singletons, key);
#else
/* More expensive but takes 'node' into account */
possible_matches = find_actions(data_set->actions, key, on_node);
#endif
}
if(data_set->singletons == NULL) {
data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
}
if (possible_matches != NULL) {
if (g_list_length(possible_matches) > 1) {
pe_warn("Action %s for %s on %s exists %d times",
task, rsc ? rsc->id : "",
on_node ? on_node->details->uname : "", g_list_length(possible_matches));
}
action = g_list_nth_data(possible_matches, 0);
pe_rsc_trace(rsc, "Found existing action (%d) %s for %s on %s",
action->id, task, rsc ? rsc->id : "",
on_node ? on_node->details->uname : "");
g_list_free(possible_matches);
}
if (action == NULL) {
if (save_action) {
pe_rsc_trace(rsc, "Creating%s action %d: %s for %s on %s %d",
optional ? "" : " mandatory", data_set->action_id, key,
rsc ? rsc->id : "", on_node ? on_node->details->uname : "", optional);
}
action = calloc(1, sizeof(action_t));
if (save_action) {
action->id = data_set->action_id++;
} else {
action->id = 0;
}
action->rsc = rsc;
CRM_ASSERT(task != NULL);
action->task = strdup(task);
if (on_node) {
action->node = node_copy(on_node);
}
action->uuid = strdup(key);
pe_set_action_bit(action, pe_action_runnable);
if (optional) {
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
} else {
pe_clear_action_bit(action, pe_action_optional);
pe_rsc_trace(rsc, "Unset optional on %s", action->uuid);
}
/*
Implied by calloc()...
action->actions_before = NULL;
action->actions_after = NULL;
action->pseudo = FALSE;
action->dumped = FALSE;
action->processed = FALSE;
action->seen_count = 0;
*/
action->extra = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free);
action->meta = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free);
if (save_action) {
data_set->actions = g_list_prepend(data_set->actions, action);
if(rsc == NULL) {
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
}
if (rsc != NULL) {
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
unpack_operation(action, action->op_entry, rsc->container, data_set);
if (save_action) {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
if (save_action) {
pe_rsc_trace(rsc, "Action %d created", action->id);
}
}
if (optional == FALSE) {
pe_rsc_trace(rsc, "Unset optional on %s", action->uuid);
pe_clear_action_bit(action, pe_action_optional);
}
if (rsc != NULL) {
enum action_tasks a_task = text2task(action->task);
int warn_level = LOG_TRACE;
if (save_action) {
warn_level = LOG_WARNING;
}
if (is_set(action->flags, pe_action_have_node_attrs) == FALSE
&& action->node != NULL && action->op_entry != NULL) {
pe_set_action_bit(action, pe_action_have_node_attrs);
unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS,
action->node->details->attrs,
action->extra, NULL, FALSE, data_set->now);
}
if (is_set(action->flags, pe_action_pseudo)) {
/* leave untouched */
} else if (action->node == NULL) {
pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid);
pe_clear_action_bit(action, pe_action_runnable);
} else if (is_not_set(rsc->flags, pe_rsc_managed)
&& g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL) == NULL) {
crm_debug("Action %s (unmanaged)", action->uuid);
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
/* action->runnable = FALSE; */
} else if (action->node->details->online == FALSE) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
action->uuid, action->node->details->uname);
if (is_set(action->rsc->flags, pe_rsc_managed)
&& save_action && a_task == stop_rsc) {
pe_fence_node(data_set, action->node, "because node is unclean");
}
} else if (action->node->details->pending) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
action->uuid, action->node->details->uname);
} else if (action->needs == rsc_req_nothing) {
pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid);
pe_set_action_bit(action, pe_action_runnable);
#if 0
/*
* No point checking this
* - if we dont have quorum we can't stonith anyway
*/
} else if (action->needs == rsc_req_stonith) {
crm_trace("Action %s requires only stonith", action->uuid);
action->runnable = TRUE;
#endif
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_stop) {
pe_clear_action_bit(action, pe_action_runnable);
crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid);
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_freeze) {
pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role));
if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
pe_clear_action_bit(action, pe_action_runnable);
pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)",
action->node->details->uname, action->uuid);
}
} else {
pe_rsc_trace(rsc, "Action %s is runnable", action->uuid);
pe_set_action_bit(action, pe_action_runnable);
}
if (save_action) {
switch (a_task) {
case stop_rsc:
set_bit(rsc->flags, pe_rsc_stopping);
break;
case start_rsc:
clear_bit(rsc->flags, pe_rsc_starting);
if (is_set(action->flags, pe_action_runnable)) {
set_bit(rsc->flags, pe_rsc_starting);
}
break;
default:
break;
}
}
}
free(key);
return action;
}
static const char *
unpack_operation_on_fail(action_t * action)
{
const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) {
crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id);
return NULL;
} else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) {
/* demote on_fail defaults to master monitor value if present */
xmlNode *operation = NULL;
const char *name = NULL;
const char *role = NULL;
const char *on_fail = NULL;
const char *interval = NULL;
const char *enabled = NULL;
CRM_CHECK(action->rsc != NULL, return NULL);
for (operation = __xml_first_child(action->rsc->ops_xml);
operation && !value; operation = __xml_next_element(operation)) {
if (!crm_str_eq((const char *)operation->name, "op", TRUE)) {
continue;
}
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
enabled = crm_element_value(operation, "enabled");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!on_fail) {
continue;
} else if (enabled && !crm_is_true(enabled)) {
continue;
} else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) {
continue;
} else if (crm_get_interval(interval) <= 0) {
continue;
}
value = on_fail;
}
}
return value;
}
static xmlNode *
find_min_interval_mon(resource_t * rsc, gboolean include_disabled)
{
int number = 0;
int min_interval = -1;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
if (safe_str_neq(name, RSC_STATUS)) {
continue;
}
number = crm_get_interval(interval);
if (number < 0) {
continue;
}
if (min_interval < 0 || number < min_interval) {
min_interval = number;
op = operation;
}
}
}
return op;
}
void
unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container,
pe_working_set_t * data_set)
{
int value_i = 0;
unsigned long long interval = 0;
unsigned long long start_delay = 0;
char *value_ms = NULL;
const char *value = NULL;
const char *field = NULL;
CRM_CHECK(action->rsc != NULL, return);
unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL,
action->meta, NULL, FALSE, data_set->now);
if (xml_obj) {
xmlAttrPtr xIter = NULL;
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_obj, prop_name);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS,
NULL, action->meta, NULL, FALSE, data_set->now);
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS,
NULL, action->meta, NULL, FALSE, data_set->now);
g_hash_table_remove(action->meta, "id");
field = XML_LRM_ATTR_INTERVAL;
value = g_hash_table_lookup(action->meta, field);
if (value != NULL) {
interval = crm_get_interval(value);
if (interval > 0) {
value_ms = crm_itoa(interval);
g_hash_table_replace(action->meta, strdup(field), value_ms);
} else {
g_hash_table_remove(action->meta, field);
}
}
/* Begin compatibility code ("requires" set on start action not resource) */
value = g_hash_table_lookup(action->meta, "requires");
if (safe_str_neq(action->task, RSC_START)
&& safe_str_neq(action->task, RSC_PROMOTE)) {
action->needs = rsc_req_nothing;
value = "nothing (not start/promote)";
} else if (safe_str_eq(value, "nothing")) {
action->needs = rsc_req_nothing;
} else if (safe_str_eq(value, "quorum")) {
action->needs = rsc_req_quorum;
} else if (safe_str_eq(value, "unfencing")) {
action->needs = rsc_req_stonith;
set_bit(action->rsc->flags, pe_rsc_needs_unfencing);
if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_notice("%s requires unfencing but fencing is disabled", action->rsc->id);
}
} else if (is_set(data_set->flags, pe_flag_stonith_enabled)
&& safe_str_eq(value, "fencing")) {
action->needs = rsc_req_stonith;
if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_notice("%s requires fencing but fencing is disabled", action->rsc->id);
}
/* End compatibility code */
} else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = rsc_req_stonith;
value = "fencing (resource)";
} else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = rsc_req_quorum;
value = "quorum (resource)";
} else {
action->needs = rsc_req_nothing;
value = "nothing (resource)";
}
pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value);
value = unpack_operation_on_fail(action);
if (value == NULL) {
} else if (safe_str_eq(value, "block")) {
action->on_fail = action_fail_block;
g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
} else if (safe_str_eq(value, "fence")) {
action->on_fail = action_fail_fence;
value = "node fencing";
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense");
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
}
} else if (safe_str_eq(value, "standby")) {
action->on_fail = action_fail_standby;
value = "node standby";
} else if (safe_str_eq(value, "ignore")
|| safe_str_eq(value, "nothing")) {
action->on_fail = action_fail_ignore;
value = "ignore";
} else if (safe_str_eq(value, "migrate")) {
action->on_fail = action_fail_migrate;
value = "force migration";
} else if (safe_str_eq(value, "stop")) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
} else if (safe_str_eq(value, "restart")) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate)";
} else if (safe_str_eq(value, "restart-container")) {
if (container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* for baremetal remote nodes, ensure that any failure that results in
* dropping an active connection to a remote node results in fencing of
* the remote node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (value == NULL &&
is_rsc_baremetal_remote_node(action->rsc, data_set) &&
!(safe_str_eq(action->task, CRMD_ACTION_STATUS) && interval == 0) &&
(safe_str_neq(action->task, CRMD_ACTION_START))) {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fence baremetal remote node (default)";
} else {
value = "recover baremetal remote node connection (default)";
}
if (action->rsc->remote_reconnect_interval) {
action->fail_role = RSC_ROLE_STOPPED;
}
action->on_fail = action_fail_reset_remote;
} else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
action->on_fail = action_fail_fence;
value = "resource fence (default)";
} else {
action->on_fail = action_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
}
if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == RSC_ROLE_UNKNOWN) {
if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) {
action->fail_role = RSC_ROLE_SLAVE;
} else {
action->fail_role = RSC_ROLE_STARTED;
}
}
pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task,
role2text(action->fail_role));
field = XML_OP_ATTR_START_DELAY;
value = g_hash_table_lookup(action->meta, field);
if (value != NULL) {
value_i = crm_get_msec(value);
if (value_i < 0) {
value_i = 0;
}
start_delay = value_i;
value_ms = crm_itoa(value_i);
g_hash_table_replace(action->meta, strdup(field), value_ms);
} else if (interval > 0 && g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN)) {
crm_time_t *origin = NULL;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
origin = crm_time_new(value);
if (origin == NULL) {
crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s",
ID(xml_obj), value);
} else {
crm_time_t *delay = NULL;
int rc = crm_time_compare(origin, data_set->now);
long long delay_s = 0;
int interval_s = (interval / 1000);
crm_trace("Origin: %s, interval: %d", value, interval_s);
/* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */
while(rc > 0) {
crm_time_add_seconds(origin, -interval_s);
rc = crm_time_compare(origin, data_set->now);
}
/* Now find the first "multiple" that occurs after 'now' */
while (rc < 0) {
crm_time_add_seconds(origin, interval_s);
rc = crm_time_compare(origin, data_set->now);
}
delay = crm_time_calculate_duration(origin, data_set->now);
crm_time_log(LOG_TRACE, "origin", origin,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
crm_time_log(LOG_TRACE, "now", data_set->now,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration);
delay_s = crm_time_get_seconds(delay);
CRM_CHECK(delay_s >= 0, delay_s = 0);
start_delay = delay_s * 1000;
crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj));
g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
crm_itoa(start_delay));
crm_time_free(origin);
crm_time_free(delay);
}
}
field = XML_ATTR_TIMEOUT;
value = g_hash_table_lookup(action->meta, field);
if (value == NULL && xml_obj == NULL && safe_str_eq(action->task, RSC_STATUS) && interval == 0) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
pe_rsc_trace(action->rsc,
"\t%s uses the timeout value '%s' from the minimum interval monitor",
action->uuid, value);
}
}
if (value == NULL) {
value = pe_pref(data_set->config_hash, "default-action-timeout");
}
value_i = crm_get_msec(value);
if (value_i < 0) {
value_i = 0;
}
value_ms = crm_itoa(value_i);
g_hash_table_replace(action->meta, strdup(field), value_ms);
}
static xmlNode *
find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled)
{
unsigned long long number = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
number = crm_get_interval(interval);
match_key = generate_op_key(rsc->id, name, number);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = generate_op_key(rsc->clone_name, name, number);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
local_key = generate_op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = generate_op_key(rsc->id, "notify", 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(resource_t * rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
void
print_node(const char *pre_text, node_t * node, gboolean details)
{
if (node == NULL) {
crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
CRM_ASSERT(node->details);
crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
pre_text == NULL ? "" : pre_text,
pre_text == NULL ? "" : ": ",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
print_resource(LOG_DEBUG_4, "\t\t", rsc, FALSE);
}
}
}
/*
* Used by the HashTable for-loop
*/
void
print_str_str(gpointer key, gpointer value, gpointer user_data)
{
crm_trace("%s%s %s ==> %s",
user_data == NULL ? "" : (char *)user_data,
user_data == NULL ? "" : ": ", (char *)key, (char *)value);
}
void
print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details)
{
- long options = pe_print_log;
+ long options = pe_print_log | pe_print_pending;
if (rsc == NULL) {
do_crm_log(log_level - 1, "%s%s: ",
pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
if (details) {
options |= pe_print_details;
}
rsc->fns->print(rsc, pre_text, options, &log_level);
}
void
pe_free_action(action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* action_warpper_t* */
g_list_free_full(action->actions_after, free); /* action_warpper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
free(action->cancel_task);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
GListPtr
find_recurring_actions(GListPtr input, node_t * not_on_node)
{
const char *value = NULL;
GListPtr result = NULL;
GListPtr gIter = input;
CRM_CHECK(input != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
if (value == NULL) {
/* skip */
} else if (safe_str_eq(value, "0")) {
/* skip */
} else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) {
/* skip */
} else if (not_on_node == NULL) {
crm_trace("(null) Found: %s", action->uuid);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
} else if (action->node->details != not_on_node->details) {
crm_trace("Found: %s", action->uuid);
result = g_list_prepend(result, action);
}
}
return result;
}
enum action_tasks
get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic)
{
enum action_tasks task = text2task(name);
if (rsc == NULL) {
return task;
} else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
switch (task) {
case stopped_rsc:
case started_rsc:
case action_demoted:
case action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
return task - 1;
break;
default:
break;
}
}
return task;
}
action_t *
find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node)
{
GListPtr gIter = NULL;
CRM_CHECK(uuid || task, return NULL);
for (gIter = input; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (uuid != NULL && safe_str_neq(uuid, action->uuid)) {
continue;
} else if (task != NULL && safe_str_neq(task, action->task)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GListPtr
find_actions(GListPtr input, const char *key, const node_t *on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
crm_trace("Matching %s against %s", key, action->uuid);
if (safe_str_neq(key, action->uuid)) {
continue;
} else if (on_node == NULL) {
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
crm_trace("While looking for %s action on %s, "
"found an unallocated one. Assigning"
" it to the requested node...", key, on_node->details->uname);
action->node = node_copy(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
result = g_list_prepend(result, action);
}
}
return result;
}
GListPtr
find_actions_exact(GListPtr input, const char *key, node_t * on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
crm_trace("Matching %s against %s", key, action->uuid);
if (safe_str_neq(key, action->uuid)) {
crm_trace("Key mismatch: %s vs. %s", key, action->uuid);
continue;
} else if (on_node == NULL || action->node == NULL) {
crm_trace("on_node=%p, action->node=%p", on_node, action->node);
continue;
} else if (safe_str_eq(on_node->details->id, action->node->details->id)) {
result = g_list_prepend(result, action);
}
crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id);
}
return result;
}
static void
resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag)
{
node_t *match = NULL;
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = node_copy(node);
match->weight = merge_weights(score, node->weight);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = merge_weights(match->weight, score);
}
void
resource_location(resource_t * rsc, node_t * node, int score, const char *tag,
pe_working_set_t * data_set)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (data_set != NULL) {
GListPtr gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
resource_node_score(rsc, node, score, tag);
}
} else {
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
resource_node_score(rsc, node, score, tag);
}
}
if (node == NULL && score == -INFINITY) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
const char *a_xml_id = crm_element_value_const(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value_const(xml_b, XML_ATTR_ID);
if (safe_str_eq(a_xml_id, b_xml_id)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unliklely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why its happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_const_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_const_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (b_call_id >= 0 && a_call_id == b_call_id) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
int last_a = -1;
int last_b = -1;
crm_element_value_const_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_const_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %d vs %d", last_a, last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
int dummy = -1;
const char *a_magic = crm_element_value_const(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value_const(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) {
sort_return(0, "bad magic a");
}
if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (ie. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means its a shutdown operation and _always_ comes last
*/
if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations dont survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
time_t
get_effective_time(pe_working_set_t * data_set)
{
if(data_set) {
if (data_set->now == NULL) {
crm_trace("Recording a new 'now'");
data_set->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(data_set->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
struct fail_search {
resource_t *rsc;
pe_working_set_t * data_set;
int count;
long long last;
char *key;
};
static void
get_failcount_by_prefix(gpointer key_p, gpointer value, gpointer user_data)
{
struct fail_search *search = user_data;
const char *attr_id = key_p;
const char *match = strstr(attr_id, search->key);
resource_t *parent = NULL;
if (match == NULL) {
return;
}
/* we are only incrementing the failcounts here if the rsc
* that matches our prefix has the same uber parent as the rsc we're
* calculating the failcounts for. This prevents false positive matches
* where unrelated resources may have similar prefixes in their names.
*
* search->rsc is already set to be the uber parent. */
parent = uber_parent(pe_find_resource(search->data_set->resources, match));
if (parent == NULL || parent != search->rsc) {
return;
}
if (strstr(attr_id, "last-failure-") == attr_id) {
search->last = crm_int_helper(value, NULL);
} else if (strstr(attr_id, "fail-count-") == attr_id) {
search->count += char2score(value);
}
}
int
get_failcount(node_t * node, resource_t * rsc, time_t *last_failure, pe_working_set_t * data_set)
{
return get_failcount_full(node, rsc, last_failure, TRUE, NULL, data_set);
}
static gboolean
is_matched_failure(const char * rsc_id, xmlNode * conf_op_xml, xmlNode * lrm_op_xml)
{
gboolean matched = FALSE;
const char *conf_op_name = NULL;
int conf_op_interval = 0;
const char *lrm_op_task = NULL;
int lrm_op_interval = 0;
const char *lrm_op_id = NULL;
char *last_failure_key = NULL;
if (rsc_id == NULL || conf_op_xml == NULL || lrm_op_xml == NULL) {
return FALSE;
}
conf_op_name = crm_element_value(conf_op_xml, "name");
conf_op_interval = crm_get_msec(crm_element_value(conf_op_xml, "interval"));
lrm_op_task = crm_element_value(lrm_op_xml, XML_LRM_ATTR_TASK);
crm_element_value_int(lrm_op_xml, XML_LRM_ATTR_INTERVAL, &lrm_op_interval);
if (safe_str_eq(conf_op_name, lrm_op_task) == FALSE
|| conf_op_interval != lrm_op_interval) {
return FALSE;
}
lrm_op_id = ID(lrm_op_xml);
last_failure_key = generate_op_key(rsc_id, "last_failure", 0);
if (safe_str_eq(last_failure_key, lrm_op_id)) {
matched = TRUE;
} else {
char *expected_op_key = generate_op_key(rsc_id, conf_op_name, conf_op_interval);
if (safe_str_eq(expected_op_key, lrm_op_id)) {
int rc = 0;
int target_rc = get_target_rc(lrm_op_xml);
crm_element_value_int(lrm_op_xml, XML_LRM_ATTR_RC, &rc);
if (rc != target_rc) {
matched = TRUE;
}
}
free(expected_op_key);
}
free(last_failure_key);
return matched;
}
static gboolean
block_failure(node_t * node, resource_t * rsc, xmlNode * xml_op, pe_working_set_t * data_set)
{
char *xml_name = clone_strip(rsc->id);
char *xpath = crm_strdup_printf("//primitive[@id='%s']//op[@on-fail='block']", xml_name);
xmlXPathObject *xpathObj = xpath_search(rsc->xml, xpath);
gboolean should_block = FALSE;
free(xpath);
if (xpathObj) {
int max = numXpathResults(xpathObj);
int lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
xmlNode *pref = getXpathResult(xpathObj, lpc);
if (xml_op) {
should_block = is_matched_failure(xml_name, pref, xml_op);
if (should_block) {
break;
}
} else {
const char *conf_op_name = NULL;
int conf_op_interval = 0;
char *lrm_op_xpath = NULL;
xmlXPathObject *lrm_op_xpathObj = NULL;
conf_op_name = crm_element_value(pref, "name");
conf_op_interval = crm_get_msec(crm_element_value(pref, "interval"));
lrm_op_xpath = crm_strdup_printf("//node_state[@uname='%s']"
"//lrm_resource[@id='%s']"
"/lrm_rsc_op[@operation='%s'][@interval='%d']",
node->details->uname, xml_name,
conf_op_name, conf_op_interval);
lrm_op_xpathObj = xpath_search(data_set->input, lrm_op_xpath);
free(lrm_op_xpath);
if (lrm_op_xpathObj) {
int max2 = numXpathResults(lrm_op_xpathObj);
int lpc2 = 0;
for (lpc2 = 0; lpc2 < max2; lpc2++) {
xmlNode *lrm_op_xml = getXpathResult(lrm_op_xpathObj, lpc2);
should_block = is_matched_failure(xml_name, pref, lrm_op_xml);
if (should_block) {
break;
}
}
}
freeXpathObject(lrm_op_xpathObj);
if (should_block) {
break;
}
}
}
}
free(xml_name);
freeXpathObject(xpathObj);
return should_block;
}
int
get_failcount_full(node_t * node, resource_t * rsc, time_t *last_failure,
bool effective, xmlNode * xml_op, pe_working_set_t * data_set)
{
char *key = NULL;
const char *value = NULL;
struct fail_search search = { rsc, data_set, 0, 0, NULL };
/* Optimize the "normal" case */
key = crm_concat("fail-count", rsc->clone_name ? rsc->clone_name : rsc->id, '-');
value = g_hash_table_lookup(node->details->attrs, key);
search.count = char2score(value);
crm_trace("%s = %s", key, value);
free(key);
if (value) {
key = crm_concat("last-failure", rsc->clone_name ? rsc->clone_name : rsc->id, '-');
value = g_hash_table_lookup(node->details->attrs, key);
search.last = crm_int_helper(value, NULL);
free(key);
/* This block is still relevant once we omit anonymous instance numbers
* because stopped clones won't have clone_name set
*/
} else if (is_not_set(rsc->flags, pe_rsc_unique)) {
search.rsc = uber_parent(rsc);
search.key = clone_strip(rsc->id);
g_hash_table_foreach(node->details->attrs, get_failcount_by_prefix, &search);
free(search.key);
search.key = NULL;
}
if (search.count != 0 && search.last != 0 && last_failure) {
*last_failure = search.last;
}
if(search.count && rsc->failure_timeout) {
/* Never time-out if blocking failures are configured */
if (block_failure(node, rsc, xml_op, data_set)) {
pe_warn("Setting %s.failure-timeout=%d conflicts with on-fail=block: ignoring timeout", rsc->id, rsc->failure_timeout);
rsc->failure_timeout = 0;
#if 0
/* A good idea? */
} else if (rsc->container == NULL && is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
/* In this case, stop.on-fail defaults to block in unpack_operation() */
rsc->failure_timeout = 0;
#endif
}
}
if (effective && search.count != 0 && search.last != 0 && rsc->failure_timeout) {
if (search.last > 0) {
time_t now = get_effective_time(data_set);
if (now > (search.last + rsc->failure_timeout)) {
crm_debug("Failcount for %s on %s has expired (limit was %ds)",
search.rsc->id, node->details->uname, rsc->failure_timeout);
search.count = 0;
}
}
}
if (search.count != 0) {
char *score = score2char(search.count);
crm_info("%s has failed %s times on %s", search.rsc->id, score, node->details->uname);
free(score);
}
return search.count;
}
/* If it's a resource container, get its failcount plus all the failcounts of the resources within it */
int
get_failcount_all(node_t * node, resource_t * rsc, time_t *last_failure, pe_working_set_t * data_set)
{
int failcount_all = 0;
failcount_all = get_failcount(node, rsc, last_failure, data_set);
if (rsc->fillers) {
GListPtr gIter = NULL;
for (gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
resource_t *filler = (resource_t *) gIter->data;
time_t filler_last_failure = 0;
failcount_all += get_failcount(node, filler, &filler_last_failure, data_set);
if (last_failure && filler_last_failure > *last_failure) {
*last_failure = filler_last_failure;
}
}
if (failcount_all != 0) {
char *score = score2char(failcount_all);
crm_info("Container %s and the resources within it have failed %s times on %s",
rsc->id, score, node->details->uname);
free(score);
}
}
return failcount_all;
}
gboolean
get_target_role(resource_t * rsc, enum rsc_role_e * role)
{
enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (value == NULL || safe_str_eq("started", value)
|| safe_str_eq("default", value)) {
return FALSE;
}
local_role = text2role(value);
if (local_role == RSC_ROLE_UNKNOWN) {
crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value);
return FALSE;
} else if (local_role > RSC_ROLE_STARTED) {
if (uber_parent(rsc)->variant == pe_master) {
if (local_role > RSC_ROLE_SLAVE) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
crm_config_err("%s is not part of a master/slave resource, a %s of '%s' makes no sense",
rsc->id, XML_RSC_ATTR_TARGET_ROLE, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order)
{
GListPtr gIter = NULL;
action_wrapper_t *wrapper = NULL;
GListPtr list = NULL;
if (order == pe_order_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... its happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
action_wrapper_t *after = (action_wrapper_t *) gIter->data;
if (after->action == rh_action && (after->type & order)) {
return FALSE;
}
}
wrapper = calloc(1, sizeof(action_wrapper_t));
wrapper->action = rh_action;
wrapper->type = order;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = NULL;
/* order |= pe_order_implies_then; */
/* order ^= pe_order_implies_then; */
wrapper = calloc(1, sizeof(action_wrapper_t));
wrapper->action = lh_action;
wrapper->type = order;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
action_t *op = NULL;
if(data_set->singletons) {
op = g_hash_table_lookup(data_set->singletons, name);
}
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
set_bit(op->flags, pe_action_pseudo);
set_bit(op->flags, pe_action_runnable);
}
return op;
}
void
destroy_ticket(gpointer data)
{
ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
ticket_t *
ticket_new(const char *ticket_id, pe_working_set_t * data_set)
{
ticket_t *ticket = NULL;
if (ticket_id == NULL || strlen(ticket_id) == 0) {
return NULL;
}
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket);
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str, g_hash_destroy_str);
g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
static void
filter_parameters(xmlNode * param_set, const char *param_string, bool need_present)
{
int len = 0;
char *name = NULL;
char *match = NULL;
if (param_set == NULL) {
return;
}
if (param_set) {
xmlAttrPtr xIter = param_set->properties;
while (xIter) {
const char *prop_name = (const char *)xIter->name;
xIter = xIter->next;
name = NULL;
len = strlen(prop_name) + 3;
name = malloc(len);
if(name) {
sprintf(name, " %s ", prop_name);
name[len - 1] = 0;
match = strstr(param_string, name);
}
if (need_present && match == NULL) {
crm_trace("%s not found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
} else if (need_present == FALSE && match) {
crm_trace("%s found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
}
free(name);
}
}
}
op_digest_cache_t *
rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
pe_working_set_t * data_set)
{
op_digest_cache_t *data = NULL;
GHashTable *local_rsc_params = NULL;
action_t *action = NULL;
char *key = NULL;
int interval = 0;
const char *op_id = ID(xml_op);
const char *interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_all;
const char *digest_restart;
const char *secure_list;
const char *restart_list;
const char *op_version;
data = g_hash_table_lookup(node->details->digest_cache, op_id);
if (data) {
return data;
}
data = calloc(1, sizeof(op_digest_cache_t));
digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
/* key is freed in custom_action */
interval = crm_parse_int(interval_s, "0");
key = generate_op_key(rsc->id, task, interval);
action = custom_action(rsc, key, task, node, TRUE, FALSE, data_set);
key = NULL;
local_rsc_params = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str, g_hash_destroy_str);
get_rsc_attributes(local_rsc_params, rsc, node, data_set);
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
g_hash_table_foreach(action->extra, hash2field, data->params_all);
g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
filter_action_parameters(data->params_all, op_version);
data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
if (secure_list && is_set(data_set->flags, pe_flag_sanitized)) {
data->params_secure = copy_xml(data->params_all);
if (secure_list) {
filter_parameters(data->params_secure, secure_list, FALSE);
}
data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version);
}
if (digest_restart) {
data->params_restart = copy_xml(data->params_all);
if (restart_list) {
filter_parameters(data->params_restart, restart_list, TRUE);
}
data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version);
}
data->rc = RSC_DIGEST_MATCH;
if (digest_restart && strcmp(data->digest_restart_calc, digest_restart) != 0) {
data->rc = RSC_DIGEST_RESTART;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = RSC_DIGEST_UNKNOWN;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
data->rc = RSC_DIGEST_ALL;
}
g_hash_table_insert(node->details->digest_cache, strdup(op_id), data);
g_hash_table_destroy(local_rsc_params);
pe_free_action(action);
return data;
}
const char *rsc_printable_id(resource_t *rsc)
{
if (is_not_set(rsc->flags, pe_rsc_unique)) {
return ID(rsc->xml);
}
return rsc->id;
}
void
clear_bit_recursive(resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
clear_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
clear_bit_recursive(child_rsc, flag);
}
}
void
set_bit_recursive(resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
set_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
set_bit_recursive(child_rsc, flag);
}
}
action_t *
pe_fence_op(node_t * node, const char *op, bool optional, pe_working_set_t * data_set)
{
char *key = NULL;
action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
if(data_set->singletons) {
stonith_op = g_hash_table_lookup(data_set->singletons, key);
}
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, key, CRM_OP_FENCE, node, optional, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
} else {
free(key);
}
if(optional == FALSE) {
crm_trace("%s is no longer optional", stonith_op->uuid);
pe_clear_action_bit(stonith_op, pe_action_optional);
}
return stonith_op;
}
void
trigger_unfencing(
resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set)
{
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
/* No resources require it */
return;
} else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) {
/* Wasnt a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
action_t *unfence = pe_fence_op(node, "on", FALSE, data_set);
crm_notice("Unfencing %s: %s", node->details->uname, reason);
if(dependency) {
order_actions(unfence, dependency, pe_order_optional);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, data_set);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
tag_t *tag = NULL;
GListPtr gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(tag_t));
if (tag == NULL) {
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (crm_str_eq(existing_ref, obj_ref, TRUE)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
diff --git a/pengine/test10/696.summary b/pengine/test10/696.summary
index e19a5a5cf4..af16c933f5 100644
--- a/pengine/test10/696.summary
+++ b/pengine/test10/696.summary
@@ -1,61 +1,61 @@
Current cluster status:
Online: [ hadev1 hadev2 hadev3 ]
- DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2
- rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev3
- rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2
- rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3
+ DcIPaddr (ocf::heartbeat:IPaddr): Starting hadev2
+ rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev3 (Monitoring)
+ rsc_hadev2 (ocf::heartbeat:IPaddr): Starting hadev2
+ rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3 (Monitoring)
Clone Set: DoFencing [child_DoFencing] (unique)
- child_DoFencing:0 (stonith:ssh): Started hadev2
- child_DoFencing:1 (stonith:ssh): Started hadev3
+ child_DoFencing:0 (stonith:ssh): Started hadev2 (Monitoring)
+ child_DoFencing:1 (stonith:ssh): Started hadev3 (Monitoring)
child_DoFencing:2 (stonith:ssh): Stopped
Transition Summary:
* Move rsc_hadev1 (Started hadev3 -> hadev1)
* Start child_DoFencing:2 (hadev1)
Executing cluster transition:
* Resource action: DcIPaddr monitor on hadev3
* Resource action: DcIPaddr monitor on hadev1
* Resource action: rsc_hadev1 monitor on hadev2
* Resource action: rsc_hadev1 monitor on hadev1
* Resource action: rsc_hadev2 monitor on hadev3
* Resource action: rsc_hadev2 monitor on hadev1
* Resource action: rsc_hadev3 monitor=5000 on hadev3
* Resource action: rsc_hadev3 monitor on hadev2
* Resource action: rsc_hadev3 monitor on hadev1
* Resource action: child_DoFencing:0 monitor=5000 on hadev2
* Resource action: child_DoFencing:0 monitor on hadev3
* Resource action: child_DoFencing:0 monitor on hadev1
* Resource action: child_DoFencing:1 monitor=5000 on hadev3
* Resource action: child_DoFencing:1 monitor on hadev2
* Resource action: child_DoFencing:1 monitor on hadev1
* Resource action: child_DoFencing:2 monitor on hadev3
* Resource action: child_DoFencing:2 monitor on hadev2
* Resource action: child_DoFencing:2 monitor on hadev1
* Pseudo action: DoFencing_start_0
* Resource action: DcIPaddr start on hadev2
* Resource action: rsc_hadev1 stop on hadev3
* Resource action: rsc_hadev2 start on hadev2
* Resource action: child_DoFencing:2 start on hadev1
* Pseudo action: DoFencing_running_0
* Pseudo action: all_stopped
* Resource action: DcIPaddr monitor=5000 on hadev2
* Resource action: rsc_hadev1 start on hadev1
* Resource action: rsc_hadev2 monitor=5000 on hadev2
* Resource action: child_DoFencing:2 monitor=5000 on hadev1
* Resource action: rsc_hadev1 monitor=5000 on hadev1
Revised cluster status:
Online: [ hadev1 hadev2 hadev3 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev1
rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started hadev2
child_DoFencing:1 (stonith:ssh): Started hadev3
child_DoFencing:2 (stonith:ssh): Started hadev1
diff --git a/pengine/test10/726.summary b/pengine/test10/726.summary
index 962dbf213c..920aefee27 100644
--- a/pengine/test10/726.summary
+++ b/pengine/test10/726.summary
@@ -1,88 +1,88 @@
Current cluster status:
Online: [ ibm1 sgi2 test02 test03 ]
- DcIPaddr (ocf::heartbeat:IPaddr): Started test03
+ DcIPaddr (ocf::heartbeat:IPaddr): Started test03 (Monitoring)
rsc_sgi2 (ocf::heartbeat:IPaddr): Stopped
- rsc_ibm1 (ocf::heartbeat:IPaddr): Started test03
+ rsc_ibm1 (ocf::heartbeat:IPaddr): Started test03 (Monitoring)
rsc_test02 (ocf::heartbeat:IPaddr): Stopped
- rsc_test03 (ocf::heartbeat:IPaddr): Started test03
+ rsc_test03 (ocf::heartbeat:IPaddr): Started test03 (Monitoring)
Clone Set: DoFencing [child_DoFencing] (unique)
- child_DoFencing:0 (stonith:ssh): Started test02
- child_DoFencing:1 (stonith:ssh): Started test03
+ child_DoFencing:0 (stonith:ssh): Starting test02
+ child_DoFencing:1 (stonith:ssh): Starting test03
child_DoFencing:2 (stonith:ssh): Stopped
child_DoFencing:3 (stonith:ssh): Stopped
Transition Summary:
* Start rsc_sgi2 (sgi2)
* Move rsc_ibm1 (Started test03 -> ibm1)
* Start rsc_test02 (test02)
* Start child_DoFencing:2 (ibm1)
* Start child_DoFencing:3 (sgi2)
Executing cluster transition:
* Resource action: DcIPaddr monitor=5000 on test03
* Resource action: DcIPaddr monitor on test02
* Resource action: DcIPaddr monitor on sgi2
* Resource action: DcIPaddr monitor on ibm1
* Resource action: rsc_sgi2 monitor on test03
* Resource action: rsc_sgi2 monitor on test02
* Resource action: rsc_sgi2 monitor on sgi2
* Resource action: rsc_sgi2 monitor on ibm1
* Resource action: rsc_ibm1 monitor on test02
* Resource action: rsc_ibm1 monitor on sgi2
* Resource action: rsc_ibm1 monitor on ibm1
* Resource action: rsc_test02 monitor on test03
* Resource action: rsc_test02 monitor on test02
* Resource action: rsc_test02 monitor on sgi2
* Resource action: rsc_test02 monitor on ibm1
* Resource action: rsc_test03 monitor=5000 on test03
* Resource action: rsc_test03 monitor on test02
* Resource action: rsc_test03 monitor on sgi2
* Resource action: rsc_test03 monitor on ibm1
* Resource action: child_DoFencing:0 monitor on sgi2
* Resource action: child_DoFencing:0 monitor on ibm1
* Resource action: child_DoFencing:1 monitor on test02
* Resource action: child_DoFencing:1 monitor on sgi2
* Resource action: child_DoFencing:1 monitor on ibm1
* Resource action: child_DoFencing:2 monitor on test03
* Resource action: child_DoFencing:2 monitor on test02
* Resource action: child_DoFencing:2 monitor on sgi2
* Resource action: child_DoFencing:2 monitor on ibm1
* Resource action: child_DoFencing:3 monitor on test03
* Resource action: child_DoFencing:3 monitor on test02
* Resource action: child_DoFencing:3 monitor on sgi2
* Resource action: child_DoFencing:3 monitor on ibm1
* Pseudo action: DoFencing_start_0
* Resource action: rsc_sgi2 start on sgi2
* Resource action: rsc_ibm1 stop on test03
* Resource action: rsc_test02 start on test02
* Resource action: child_DoFencing:0 start on test02
* Resource action: child_DoFencing:1 start on test03
* Resource action: child_DoFencing:2 start on ibm1
* Resource action: child_DoFencing:3 start on sgi2
* Pseudo action: DoFencing_running_0
* Pseudo action: all_stopped
* Resource action: rsc_sgi2 monitor=5000 on sgi2
* Resource action: rsc_ibm1 start on ibm1
* Resource action: rsc_test02 monitor=5000 on test02
* Resource action: child_DoFencing:0 monitor=5000 on test02
* Resource action: child_DoFencing:1 monitor=5000 on test03
* Resource action: child_DoFencing:2 monitor=5000 on ibm1
* Resource action: child_DoFencing:3 monitor=5000 on sgi2
* Resource action: rsc_ibm1 monitor=5000 on ibm1
Revised cluster status:
Online: [ ibm1 sgi2 test02 test03 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started test03
rsc_sgi2 (ocf::heartbeat:IPaddr): Started sgi2
rsc_ibm1 (ocf::heartbeat:IPaddr): Started ibm1
rsc_test02 (ocf::heartbeat:IPaddr): Started test02
rsc_test03 (ocf::heartbeat:IPaddr): Started test03
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started test02
child_DoFencing:1 (stonith:ssh): Started test03
child_DoFencing:2 (stonith:ssh): Started ibm1
child_DoFencing:3 (stonith:ssh): Started sgi2
diff --git a/pengine/test10/735.summary b/pengine/test10/735.summary
index 71b7b71749..69538de590 100644
--- a/pengine/test10/735.summary
+++ b/pengine/test10/735.summary
@@ -1,51 +1,51 @@
Current cluster status:
Online: [ hadev2 hadev3 ]
OFFLINE: [ hadev1 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2
- rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev2
+ rsc_hadev1 (ocf::heartbeat:IPaddr): Starting hadev2
rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2
- rsc_hadev3 (ocf::heartbeat:IPaddr): Stopped
+ rsc_hadev3 (ocf::heartbeat:IPaddr): Starting
Clone Set: DoFencing [child_DoFencing] (unique)
- child_DoFencing:0 (stonith:ssh): Stopped
+ child_DoFencing:0 (stonith:ssh): Starting
child_DoFencing:1 (stonith:ssh): Stopped
child_DoFencing:2 (stonith:ssh): Stopped
Transition Summary:
* Move rsc_hadev1 (Started hadev2 -> hadev3)
* Start rsc_hadev3 (hadev3)
* Start child_DoFencing:0 (hadev2)
* Start child_DoFencing:1 (hadev3)
Executing cluster transition:
* Resource action: DcIPaddr monitor on hadev3
* Resource action: rsc_hadev1 stop on hadev2
* Resource action: rsc_hadev1 start on hadev3
* Resource action: rsc_hadev2 monitor on hadev3
* Resource action: rsc_hadev3 start on hadev3
* Resource action: child_DoFencing:0 monitor on hadev3
* Resource action: child_DoFencing:2 monitor on hadev3
* Pseudo action: DoFencing_start_0
* Pseudo action: all_stopped
* Resource action: rsc_hadev1 monitor=5000 on hadev3
* Resource action: rsc_hadev3 monitor=5000 on hadev3
* Resource action: child_DoFencing:0 start on hadev2
* Resource action: child_DoFencing:1 start on hadev3
* Pseudo action: DoFencing_running_0
* Resource action: child_DoFencing:0 monitor=5000 on hadev2
* Resource action: child_DoFencing:1 monitor=5000 on hadev3
Revised cluster status:
Online: [ hadev2 hadev3 ]
OFFLINE: [ hadev1 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2
rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started hadev2
child_DoFencing:1 (stonith:ssh): Started hadev3
child_DoFencing:2 (stonith:ssh): Stopped
diff --git a/pengine/test10/764.summary b/pengine/test10/764.summary
index da85bbaca4..2d43eae782 100644
--- a/pengine/test10/764.summary
+++ b/pengine/test10/764.summary
@@ -1,56 +1,56 @@
Current cluster status:
Online: [ posic041 posic043 ]
OFFLINE: [ posic042 posic044 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started posic043
rsc_posic041 (ocf::heartbeat:IPaddr): Started posic041
rsc_posic042 (ocf::heartbeat:IPaddr): Started posic041
rsc_posic043 (ocf::heartbeat:IPaddr): Started posic043
- rsc_posic044 (ocf::heartbeat:IPaddr): Started posic041
+ rsc_posic044 (ocf::heartbeat:IPaddr): Starting posic041
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started posic043
child_DoFencing:1 (stonith:ssh): Started posic041
child_DoFencing:2 (stonith:ssh): Stopped
child_DoFencing:3 (stonith:ssh): Stopped
Transition Summary:
* Stop DcIPaddr (Started posic043)
* Stop rsc_posic041 (Started posic041)
* Stop rsc_posic042 (Started posic041)
* Stop rsc_posic043 (Started posic043)
* Stop rsc_posic044 (Started posic041)
Executing cluster transition:
* Resource action: DcIPaddr monitor on posic041
* Resource action: rsc_posic041 monitor on posic043
* Resource action: rsc_posic042 monitor on posic043
* Resource action: rsc_posic043 monitor on posic041
* Resource action: rsc_posic044 monitor on posic043
* Resource action: child_DoFencing:0 monitor=5000 on posic043
* Resource action: child_DoFencing:1 monitor=5000 on posic041
* Resource action: child_DoFencing:1 monitor on posic043
* Resource action: child_DoFencing:2 monitor on posic041
* Resource action: child_DoFencing:3 monitor on posic041
* Resource action: DcIPaddr stop on posic043
* Resource action: rsc_posic041 stop on posic041
* Resource action: rsc_posic042 stop on posic041
* Resource action: rsc_posic043 stop on posic043
* Resource action: rsc_posic044 stop on posic041
* Pseudo action: all_stopped
Revised cluster status:
Online: [ posic041 posic043 ]
OFFLINE: [ posic042 posic044 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
rsc_posic041 (ocf::heartbeat:IPaddr): Stopped
rsc_posic042 (ocf::heartbeat:IPaddr): Stopped
rsc_posic043 (ocf::heartbeat:IPaddr): Stopped
rsc_posic044 (ocf::heartbeat:IPaddr): Started posic041
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started posic043
child_DoFencing:1 (stonith:ssh): Started posic041
child_DoFencing:2 (stonith:ssh): Stopped
child_DoFencing:3 (stonith:ssh): Stopped
diff --git a/pengine/test10/797.summary b/pengine/test10/797.summary
index f47fe79b05..6e78255393 100644
--- a/pengine/test10/797.summary
+++ b/pengine/test10/797.summary
@@ -1,72 +1,72 @@
Current cluster status:
Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline)
Online: [ c001n01 c001n02 c001n03 ]
DcIPaddr (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02
rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03
rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01
Clone Set: DoFencing [child_DoFencing] (unique)
- child_DoFencing:0 (stonith:ssh): Started [ c001n01 c001n03 ]
+ child_DoFencing:0 (stonith:ssh): Started (Monitoring)[ c001n01 c001n03 ]
child_DoFencing:1 (stonith:ssh): Started c001n02
child_DoFencing:2 (stonith:ssh): Started c001n03
child_DoFencing:3 (stonith:ssh): Stopped
Transition Summary:
* Stop DcIPaddr (Started c001n03)
* Stop rsc_c001n08 (Started c001n02)
* Stop rsc_c001n02 (Started c001n02)
* Stop rsc_c001n03 (Started c001n03)
* Stop rsc_c001n01 (Started c001n01)
* Restart child_DoFencing:0 (Started c001n01)
* Stop child_DoFencing:1 (c001n02)
Executing cluster transition:
* Resource action: DcIPaddr monitor on c001n02
* Resource action: DcIPaddr monitor on c001n01
* Resource action: DcIPaddr stop on c001n03
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n01
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: child_DoFencing:3 monitor on c001n01
* Pseudo action: DoFencing_stop_0
* Resource action: DcIPaddr delete on c001n03
* Resource action: rsc_c001n08 stop on c001n02
* Resource action: rsc_c001n02 stop on c001n02
* Resource action: rsc_c001n03 stop on c001n03
* Resource action: rsc_c001n01 stop on c001n01
* Resource action: child_DoFencing:0 stop on c001n03
* Resource action: child_DoFencing:0 stop on c001n01
* Resource action: child_DoFencing:1 stop on c001n02
* Pseudo action: DoFencing_stopped_0
* Pseudo action: DoFencing_start_0
* Cluster action: do_shutdown on c001n02
* Pseudo action: all_stopped
* Resource action: child_DoFencing:0 start on c001n01
* Resource action: child_DoFencing:0 monitor=5000 on c001n01
* Pseudo action: DoFencing_running_0
Revised cluster status:
Node c001n08 (6427cb5a-c7a5-4bdf-9892-a04ce56f4e6b): UNCLEAN (offline)
Online: [ c001n01 c001n02 c001n03 ]
DcIPaddr (ocf::heartbeat:IPaddr): Stopped
rsc_c001n08 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped
rsc_c001n01 (ocf::heartbeat:IPaddr): Stopped
Clone Set: DoFencing [child_DoFencing] (unique)
child_DoFencing:0 (stonith:ssh): Started c001n01
child_DoFencing:1 (stonith:ssh): Stopped
child_DoFencing:2 (stonith:ssh): Started c001n03
child_DoFencing:3 (stonith:ssh): Stopped
diff --git a/pengine/test10/bug-5186-partial-migrate.summary b/pengine/test10/bug-5186-partial-migrate.summary
index 5e62a23663..4d0b10e5c7 100644
--- a/pengine/test10/bug-5186-partial-migrate.summary
+++ b/pengine/test10/bug-5186-partial-migrate.summary
@@ -1,90 +1,90 @@
Current cluster status:
Node bl460g1n7 (3232261593): UNCLEAN (offline)
Online: [ bl460g1n6 bl460g1n8 ]
prmDummy (ocf::pacemaker:Dummy): Started bl460g1n7 (UNCLEAN)
- prmVM2 (ocf::heartbeat:VirtualDomain): Started bl460g1n7 (UNCLEAN)
+ prmVM2 (ocf::heartbeat:VirtualDomain): Migrating bl460g1n7 (UNCLEAN)
Resource Group: grpStonith6
prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8
prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8
Resource Group: grpStonith7
prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6
prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6
Resource Group: grpStonith8
prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n7 (UNCLEAN)
prmStonith8-2 (stonith:external/ipmi): Started bl460g1n7 (UNCLEAN)
Clone Set: clnDiskd1 [prmDiskd1]
prmDiskd1 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN)
Started: [ bl460g1n6 bl460g1n8 ]
Clone Set: clnDiskd2 [prmDiskd2]
prmDiskd2 (ocf::pacemaker:diskd): Started bl460g1n7 (UNCLEAN)
Started: [ bl460g1n6 bl460g1n8 ]
Clone Set: clnPing [prmPing]
prmPing (ocf::pacemaker:ping): Started bl460g1n7 (UNCLEAN)
Started: [ bl460g1n6 bl460g1n8 ]
Transition Summary:
* Move prmDummy (Started bl460g1n7 -> bl460g1n6)
* Move prmVM2 (Started bl460g1n7 -> bl460g1n8)
* Move prmStonith8-1 (Started bl460g1n7 -> bl460g1n6)
* Move prmStonith8-2 (Started bl460g1n7 -> bl460g1n6)
* Stop prmDiskd1:0 (bl460g1n7)
* Stop prmDiskd2:0 (bl460g1n7)
* Stop prmPing:0 (bl460g1n7)
Executing cluster transition:
* Resource action: prmVM2 stop on bl460g1n6
* Pseudo action: grpStonith8_stop_0
* Pseudo action: prmStonith8-2_stop_0
* Fencing bl460g1n7 (reboot)
* Pseudo action: stonith_complete
* Pseudo action: prmDummy_stop_0
* Pseudo action: prmVM2_stop_0
* Pseudo action: prmStonith8-1_stop_0
* Pseudo action: clnDiskd1_stop_0
* Pseudo action: clnDiskd2_stop_0
* Pseudo action: clnPing_stop_0
* Resource action: prmDummy start on bl460g1n6
* Resource action: prmVM2 start on bl460g1n8
* Pseudo action: grpStonith8_stopped_0
* Pseudo action: grpStonith8_start_0
* Resource action: prmStonith8-1 start on bl460g1n6
* Resource action: prmStonith8-2 start on bl460g1n6
* Pseudo action: prmDiskd1_stop_0
* Pseudo action: clnDiskd1_stopped_0
* Pseudo action: prmDiskd2_stop_0
* Pseudo action: clnDiskd2_stopped_0
* Pseudo action: prmPing_stop_0
* Pseudo action: clnPing_stopped_0
* Pseudo action: all_stopped
* Resource action: prmVM2 monitor=10000 on bl460g1n8
* Pseudo action: grpStonith8_running_0
* Resource action: prmStonith8-1 monitor=10000 on bl460g1n6
* Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6
Revised cluster status:
Online: [ bl460g1n6 bl460g1n8 ]
OFFLINE: [ bl460g1n7 ]
prmDummy (ocf::pacemaker:Dummy): Started bl460g1n6
prmVM2 (ocf::heartbeat:VirtualDomain): Started bl460g1n8
Resource Group: grpStonith6
prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8
prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8
Resource Group: grpStonith7
prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6
prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6
Resource Group: grpStonith8
prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n6
prmStonith8-2 (stonith:external/ipmi): Started bl460g1n6
Clone Set: clnDiskd1 [prmDiskd1]
Started: [ bl460g1n6 bl460g1n8 ]
Stopped: [ bl460g1n7 ]
Clone Set: clnDiskd2 [prmDiskd2]
Started: [ bl460g1n6 bl460g1n8 ]
Stopped: [ bl460g1n7 ]
Clone Set: clnPing [prmPing]
Started: [ bl460g1n6 bl460g1n8 ]
Stopped: [ bl460g1n7 ]
diff --git a/pengine/test10/mon-rsc-3.summary b/pengine/test10/mon-rsc-3.summary
index 4cb7be43d3..d8f1846cbc 100644
--- a/pengine/test10/mon-rsc-3.summary
+++ b/pengine/test10/mon-rsc-3.summary
@@ -1,18 +1,18 @@
Current cluster status:
Online: [ node1 node2 ]
- rsc1 (heartbeat:apache): Started node1
+ rsc1 (heartbeat:apache): Starting node1
Transition Summary:
Executing cluster transition:
* Resource action: rsc1 monitor on node2
* Resource action: rsc1 start on node1
* Resource action: rsc1 monitor=5000 on node1
Revised cluster status:
Online: [ node1 node2 ]
rsc1 (heartbeat:apache): Started node1
diff --git a/pengine/test10/mon-rsc-4.summary b/pengine/test10/mon-rsc-4.summary
index a532bec624..92b0f68946 100644
--- a/pengine/test10/mon-rsc-4.summary
+++ b/pengine/test10/mon-rsc-4.summary
@@ -1,23 +1,23 @@
Current cluster status:
Node node2 (uuid2): standby
Online: [ node1 ]
- rsc1 (heartbeat:apache): Started node2
+ rsc1 (heartbeat:apache): Starting node2
Transition Summary:
* Move rsc1 (Started node2 -> node1)
Executing cluster transition:
* Resource action: rsc1 monitor on node1
* Resource action: rsc1 stop on node2
* Resource action: rsc1 start on node1
* Pseudo action: all_stopped
* Resource action: rsc1 monitor=5000 on node1
Revised cluster status:
Node node2 (uuid2): standby
Online: [ node1 ]
rsc1 (heartbeat:apache): Started [ node1 node2 ]
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 5646903fac..41763fd370 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,4102 +1,4102 @@
/*
* Copyright (C) 2004-2015 Andrew Beekhof
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include <../lib/pengine/unpack.h>
#include <../pengine/pengine.h>
#include
extern void cleanup_alloc_calculations(pe_working_set_t * data_set);
void clean_up(int rc);
void crm_diff_update(const char *event, xmlNode * msg);
gboolean mon_refresh_display(gpointer user_data);
int cib_connect(gboolean full);
void mon_st_callback(stonith_t * st, stonith_event_t * e);
static char *get_node_display_name(node_t *node);
/*
* Definitions indicating which items to print
*/
#define mon_show_times (0x0001U)
#define mon_show_stack (0x0002U)
#define mon_show_dc (0x0004U)
#define mon_show_count (0x0008U)
#define mon_show_nodes (0x0010U)
#define mon_show_resources (0x0020U)
#define mon_show_attributes (0x0040U)
#define mon_show_failcounts (0x0080U)
#define mon_show_operations (0x0100U)
#define mon_show_tickets (0x0200U)
#define mon_show_bans (0x0400U)
#define mon_show_headers (mon_show_times | mon_show_stack | mon_show_dc | mon_show_count)
#define mon_show_default (mon_show_headers | mon_show_nodes | mon_show_resources)
#define mon_show_all (mon_show_default | mon_show_attributes | mon_show_failcounts \
| mon_show_operations | mon_show_tickets | mon_show_bans)
unsigned int show = mon_show_default;
/*
* Definitions indicating how to output
*/
enum mon_output_format_e {
mon_output_none,
mon_output_monitor,
mon_output_plain,
mon_output_console,
mon_output_xml,
mon_output_html,
mon_output_cgi
} output_format = mon_output_console;
char *output_filename = NULL; /* if sending output to a file, its name */
/* other globals */
char *xml_file = NULL;
char *pid_file = NULL;
char *snmp_target = NULL;
char *snmp_community = NULL;
gboolean group_by_node = FALSE;
gboolean inactive_resources = FALSE;
int reconnect_msec = 5000;
gboolean daemonize = FALSE;
GMainLoop *mainloop = NULL;
guint timer_id = 0;
GList *attr_list = NULL;
const char *crm_mail_host = NULL;
const char *crm_mail_prefix = NULL;
const char *crm_mail_from = NULL;
const char *crm_mail_to = NULL;
const char *external_agent = NULL;
const char *external_recipient = NULL;
cib_t *cib = NULL;
stonith_t *st = NULL;
xmlNode *current_cib = NULL;
gboolean one_shot = FALSE;
gboolean has_warnings = FALSE;
gboolean print_timing = FALSE;
gboolean watch_fencing = FALSE;
gboolean print_brief = FALSE;
-gboolean print_pending = FALSE;
+gboolean print_pending = TRUE;
gboolean print_clone_detail = FALSE;
/* FIXME allow, detect, and correctly interpret glob pattern or regex? */
const char *print_neg_location_prefix = "";
/* Never display node attributes whose name starts with one of these prefixes */
#define FILTER_STR { "shutdown", "terminate", "standby", "fail-count", \
"last-failure", "probe_complete", "#", NULL }
long last_refresh = 0;
crm_trigger_t *refresh_trigger = NULL;
/*
* 1.3.6.1.4.1.32723 has been assigned to the project by IANA
* http://www.iana.org/assignments/enterprise-numbers
*/
#define PACEMAKER_PREFIX "1.3.6.1.4.1.32723"
#define PACEMAKER_TRAP_PREFIX PACEMAKER_PREFIX ".1"
#define snmp_crm_trap_oid PACEMAKER_TRAP_PREFIX
#define snmp_crm_oid_node PACEMAKER_TRAP_PREFIX ".1"
#define snmp_crm_oid_rsc PACEMAKER_TRAP_PREFIX ".2"
#define snmp_crm_oid_task PACEMAKER_TRAP_PREFIX ".3"
#define snmp_crm_oid_desc PACEMAKER_TRAP_PREFIX ".4"
#define snmp_crm_oid_status PACEMAKER_TRAP_PREFIX ".5"
#define snmp_crm_oid_rc PACEMAKER_TRAP_PREFIX ".6"
#define snmp_crm_oid_trc PACEMAKER_TRAP_PREFIX ".7"
/* Define exit codes for monitoring-compatible output */
#define MON_STATUS_OK (0)
#define MON_STATUS_WARN (1)
/* Convenience macro for prettifying output (e.g. "node" vs "nodes") */
#define s_if_plural(i) (((i) == 1)? "" : "s")
#if CURSES_ENABLED
# define print_dot() if (output_format == mon_output_console) { \
printw("."); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, "."); \
}
#else
# define print_dot() fprintf(stdout, ".");
#endif
#if CURSES_ENABLED
# define print_as(fmt, args...) if (output_format == mon_output_console) { \
printw(fmt, ##args); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, fmt, ##args); \
}
#else
# define print_as(fmt, args...) fprintf(stdout, fmt, ##args);
#endif
static void
blank_screen(void)
{
#if CURSES_ENABLED
int lpc = 0;
for (lpc = 0; lpc < LINES; lpc++) {
move(lpc, 0);
clrtoeol();
}
move(0, 0);
refresh();
#endif
}
static gboolean
mon_timer_popped(gpointer data)
{
int rc = pcmk_ok;
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
if (timer_id > 0) {
g_source_remove(timer_id);
}
print_as("Reconnecting...\n");
rc = cib_connect(TRUE);
if (rc != pcmk_ok) {
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return FALSE;
}
static void
mon_cib_connection_destroy(gpointer user_data)
{
print_as("Connection to the CIB terminated\n");
if (cib) {
cib->cmds->signoff(cib);
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return;
}
/*
* Mainloop signal handler.
*/
static void
mon_shutdown(int nsig)
{
clean_up(EX_OK);
}
#if ON_DARWIN
# define sighandler_t sig_t
#endif
#if CURSES_ENABLED
# ifndef HAVE_SIGHANDLER_T
typedef void (*sighandler_t) (int);
# endif
static sighandler_t ncurses_winch_handler;
static void
mon_winresize(int nsig)
{
static int not_done;
int lines = 0, cols = 0;
if (!not_done++) {
if (ncurses_winch_handler)
/* the original ncurses WINCH signal handler does the
* magic of retrieving the new window size;
* otherwise, we'd have to use ioctl or tgetent */
(*ncurses_winch_handler) (SIGWINCH);
getmaxyx(stdscr, lines, cols);
resizeterm(lines, cols);
mainloop_set_trigger(refresh_trigger);
}
not_done--;
}
#endif
int
cib_connect(gboolean full)
{
int rc = pcmk_ok;
static gboolean need_pass = TRUE;
CRM_CHECK(cib != NULL, return -EINVAL);
if (getenv("CIB_passwd") != NULL) {
need_pass = FALSE;
}
if (watch_fencing && st == NULL) {
st = stonith_api_new();
}
if (watch_fencing && st->state == stonith_disconnected) {
crm_trace("Connecting to stonith");
rc = st->cmds->connect(st, crm_system_name, NULL);
if (rc == pcmk_ok) {
crm_trace("Setting up stonith callbacks");
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback);
}
}
if (cib->state != cib_connected_query && cib->state != cib_connected_command) {
crm_trace("Connecting to the CIB");
if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) {
need_pass = FALSE;
print_as("Password:");
}
rc = cib->cmds->signon(cib, crm_system_name, cib_query);
if (rc != pcmk_ok) {
return rc;
}
rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call);
if (rc == pcmk_ok) {
mon_refresh_display(NULL);
}
if (rc == pcmk_ok && full) {
if (rc == pcmk_ok) {
rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy);
if (rc == -EPROTONOSUPPORT) {
print_as
("Notification setup not supported, won't be able to reconnect after failure");
if (output_format == mon_output_console) {
sleep(2);
}
rc = pcmk_ok;
}
}
if (rc == pcmk_ok) {
cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
}
if (rc != pcmk_ok) {
print_as("Notification setup failed, could not monitor CIB actions");
if (output_format == mon_output_console) {
sleep(2);
}
clean_up(-rc);
}
}
}
return rc;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"quiet", 0, 0, 'Q', "\tDisplay only essential output" },
{"-spacer-", 1, 0, '-', "\nModes:"},
{"as-html", 1, 0, 'h', "\tWrite cluster status to the named html file"},
{"as-xml", 0, 0, 'X', "\t\tWrite cluster status as xml to stdout. This will enable one-shot mode."},
{"web-cgi", 0, 0, 'w', "\t\tWeb mode with output suitable for cgi"},
{"simple-status", 0, 0, 's', "\tDisplay the cluster status once as a simple one line output (suitable for nagios)"},
{"snmp-traps", 1, 0, 'S', "\tSend SNMP traps to this station", !ENABLE_SNMP},
{"snmp-community", 1, 0, 'C', "Specify community for SNMP traps(default is NULL)", !ENABLE_SNMP},
{"mail-to", 1, 0, 'T', "\tSend Mail alerts to this user. See also --mail-from, --mail-host, --mail-prefix", !ENABLE_ESMTP},
{"-spacer-", 1, 0, '-', "\nDisplay Options:"},
{"group-by-node", 0, 0, 'n', "\tGroup resources by node" },
{"inactive", 0, 0, 'r', "\t\tDisplay inactive resources" },
{"failcounts", 0, 0, 'f', "\tDisplay resource fail counts"},
{"operations", 0, 0, 'o', "\tDisplay resource operation history" },
{"timing-details", 0, 0, 't', "\tDisplay resource operation history with timing details" },
{"tickets", 0, 0, 'c', "\t\tDisplay cluster tickets"},
{"watch-fencing", 0, 0, 'W', "\tListen for fencing events. For use with --external-agent, --mail-to and/or --snmp-traps where supported"},
{"neg-locations", 2, 0, 'L', "Display negative location constraints [optionally filtered by id prefix]"},
{"show-node-attributes", 0, 0, 'A', "Display node attributes" },
{"hide-headers", 0, 0, 'D', "\tHide all headers" },
{"show-detail", 0, 0, 'R', "\tShow more details (node IDs, individual clone instances)" },
{"brief", 0, 0, 'b', "\t\tBrief output" },
- {"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled" },
+ {"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"interval", 1, 0, 'i', "\tUpdate frequency in seconds" },
{"one-shot", 0, 0, '1', "\t\tDisplay the cluster status once on the console and exit"},
{"disable-ncurses",0, 0, 'N', "\tDisable the use of ncurses", !CURSES_ENABLED},
{"daemonize", 0, 0, 'd', "\tRun in the background as a daemon"},
{"pid-file", 1, 0, 'p', "\t(Advanced) Daemon pid file location"},
{"mail-from", 1, 0, 'F', "\tMail alerts should come from the named user", !ENABLE_ESMTP},
{"mail-host", 1, 0, 'H', "\tMail alerts should be sent via the named host", !ENABLE_ESMTP},
{"mail-prefix", 1, 0, 'P', "Subjects for mail alerts should start with this string", !ENABLE_ESMTP},
{"external-agent", 1, 0, 'E', "A program to run when resource operations take place."},
{"external-recipient",1, 0, 'e', "A recipient for your program (assuming you want the program to send something to someone)."},
{"xml-file", 1, 0, 'x', NULL, pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "Display the cluster status on the console with updates as they occur:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display the cluster status on the console just once then exit:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon -1", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display your cluster status, group resources by node, and include inactive resources in the list:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --group-by-node --inactive", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it write the cluster status to an HTML file:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --as-html /path/to/docroot/filename.html", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon and export the current cluster status as xml to stdout, then exit.:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --as-xml", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it send email alerts:", pcmk_option_paragraph|!ENABLE_ESMTP},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --mail-to user@example.com --mail-host mail.example.com", pcmk_option_example|!ENABLE_ESMTP},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it send SNMP alerts:", pcmk_option_paragraph|!ENABLE_SNMP},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --snmp-traps snmptrapd.example.com", pcmk_option_example|!ENABLE_SNMP},
{NULL, 0, 0, 0}
};
/* *INDENT-ON* */
#if CURSES_ENABLED
static const char *
get_option_desc(char c)
{
int lpc;
for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
if (long_options[lpc].name[0] == '-')
continue;
if (long_options[lpc].val == c) {
const char * tab = NULL;
tab = strrchr(long_options[lpc].desc, '\t');
return tab ? ++tab : long_options[lpc].desc;
}
}
return NULL;
}
#define print_option_help(option, condition) \
print_as("%c %c: \t%s\n", ((condition)? '*': ' '), option, get_option_desc(option));
static gboolean
detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer unused)
{
int c;
gboolean config_mode = FALSE;
while (1) {
/* Get user input */
c = getchar();
switch (c) {
case 'c':
show ^= mon_show_tickets;
break;
case 'f':
show ^= mon_show_failcounts;
break;
case 'n':
group_by_node = ! group_by_node;
break;
case 'o':
show ^= mon_show_operations;
if ((show & mon_show_operations) == 0) {
print_timing = 0;
}
break;
case 'r':
inactive_resources = ! inactive_resources;
break;
case 'R':
print_clone_detail = ! print_clone_detail;
break;
case 't':
print_timing = ! print_timing;
if (print_timing) {
show |= mon_show_operations;
}
break;
case 'A':
show ^= mon_show_attributes;
break;
case 'L':
show ^= mon_show_bans;
break;
case 'D':
/* If any header is shown, clear them all, otherwise set them all */
if (show & mon_show_headers) {
show &= ~mon_show_headers;
} else {
show |= mon_show_headers;
}
break;
case 'b':
print_brief = ! print_brief;
break;
case 'j':
print_pending = ! print_pending;
break;
case '?':
config_mode = TRUE;
break;
default:
goto refresh;
}
if (!config_mode)
goto refresh;
blank_screen();
print_as("Display option change mode\n");
print_as("\n");
print_option_help('c', show & mon_show_tickets);
print_option_help('f', show & mon_show_failcounts);
print_option_help('n', group_by_node);
print_option_help('o', show & mon_show_operations);
print_option_help('r', inactive_resources);
print_option_help('t', print_timing);
print_option_help('A', show & mon_show_attributes);
print_option_help('L', show & mon_show_bans);
print_option_help('D', (show & mon_show_headers) == 0);
print_option_help('R', print_clone_detail);
print_option_help('b', print_brief);
print_option_help('j', print_pending);
print_as("\n");
print_as("Toggle fields via field letter, type any other key to return");
}
refresh:
mon_refresh_display(NULL);
return TRUE;
}
#endif
int
main(int argc, char **argv)
{
int flag;
int argerr = 0;
int exit_code = 0;
int option_index = 0;
pid_file = strdup("/tmp/ClusterMon.pid");
crm_log_cli_init("crm_mon");
crm_set_options(NULL, "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
#if !defined (ON_DARWIN) && !defined (ON_BSD)
/* prevent zombies */
signal(SIGCLD, SIG_IGN);
#endif
if (strcmp(crm_system_name, "crm_mon.cgi") == 0) {
output_format = mon_output_cgi;
one_shot = TRUE;
}
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'Q':
show &= ~mon_show_times;
break;
case 'i':
reconnect_msec = crm_get_msec(optarg);
break;
case 'n':
group_by_node = TRUE;
break;
case 'r':
inactive_resources = TRUE;
break;
case 'W':
watch_fencing = TRUE;
break;
case 'd':
daemonize = TRUE;
break;
case 't':
print_timing = TRUE;
show |= mon_show_operations;
break;
case 'o':
show |= mon_show_operations;
break;
case 'f':
show |= mon_show_failcounts;
break;
case 'A':
show |= mon_show_attributes;
break;
case 'L':
show |= mon_show_bans;
print_neg_location_prefix = optarg? optarg : "";
break;
case 'D':
show &= ~mon_show_headers;
break;
case 'b':
print_brief = TRUE;
break;
case 'j':
print_pending = TRUE;
break;
case 'R':
print_clone_detail = TRUE;
break;
case 'c':
show |= mon_show_tickets;
break;
case 'p':
free(pid_file);
if(optarg == NULL) {
return crm_help(flag, EX_USAGE);
}
pid_file = strdup(optarg);
break;
case 'x':
if(optarg == NULL) {
return crm_help(flag, EX_USAGE);
}
xml_file = strdup(optarg);
one_shot = TRUE;
break;
case 'h':
if(optarg == NULL) {
return crm_help(flag, EX_USAGE);
}
output_format = mon_output_html;
output_filename = strdup(optarg);
umask(S_IWGRP | S_IWOTH);
break;
case 'X':
output_format = mon_output_xml;
one_shot = TRUE;
break;
case 'w':
output_format = mon_output_cgi;
one_shot = TRUE;
break;
case 's':
output_format = mon_output_monitor;
one_shot = TRUE;
break;
case 'S':
snmp_target = optarg;
break;
case 'T':
crm_mail_to = optarg;
break;
case 'F':
crm_mail_from = optarg;
break;
case 'H':
crm_mail_host = optarg;
break;
case 'P':
crm_mail_prefix = optarg;
break;
case 'E':
external_agent = optarg;
break;
case 'e':
external_recipient = optarg;
break;
case '1':
one_shot = TRUE;
break;
case 'N':
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
break;
case 'C':
snmp_community = optarg;
break;
case '$':
case '?':
return crm_help(flag, EX_OK);
break;
default:
printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
++argerr;
break;
}
}
if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\n");
}
if (argerr) {
return crm_help('?', EX_USAGE);
}
/* XML output always prints everything */
if (output_format == mon_output_xml) {
show = mon_show_all;
print_timing = TRUE;
}
if (one_shot) {
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
} else if (daemonize) {
if ((output_format == mon_output_console) || (output_format == mon_output_plain)) {
output_format = mon_output_none;
}
crm_enable_stderr(FALSE);
if ((output_format != mon_output_html) && (output_format != mon_output_xml)
&& !snmp_target && !crm_mail_to && !external_agent) {
printf
("Looks like you forgot to specify one or more of: --as-html, --as-xml, --mail-to, --snmp-target, --external-agent\n");
return crm_help('?', EX_USAGE);
}
crm_make_daemon(crm_system_name, TRUE, pid_file);
} else if (output_format == mon_output_console) {
#if CURSES_ENABLED
initscr();
cbreak();
noecho();
crm_enable_stderr(FALSE);
#else
one_shot = TRUE;
output_format = mon_output_plain;
printf("Defaulting to one-shot mode\n");
printf("You need to have curses available at compile time to enable console mode\n");
#endif
}
crm_info("Starting %s", crm_system_name);
if (xml_file != NULL) {
current_cib = filename2xml(xml_file);
mon_refresh_display(NULL);
return exit_code;
}
if (current_cib == NULL) {
cib = cib_new();
do {
if (!one_shot) {
print_as("Attempting connection to the cluster...\n");
}
exit_code = cib_connect(!one_shot);
if (one_shot) {
break;
} else if (exit_code != pcmk_ok) {
sleep(reconnect_msec / 1000);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
}
} while (exit_code == -ENOTCONN);
if (exit_code != pcmk_ok) {
if (output_format == mon_output_monitor) {
printf("CLUSTER WARN: Connection to cluster failed: %s\n", pcmk_strerror(exit_code));
clean_up(MON_STATUS_WARN);
} else {
print_as("\nConnection to cluster failed: %s\n", pcmk_strerror(exit_code));
}
if (output_format == mon_output_console) {
sleep(2);
}
clean_up(-exit_code);
}
}
if (one_shot) {
return exit_code;
}
mainloop = g_main_new(FALSE);
mainloop_add_signal(SIGTERM, mon_shutdown);
mainloop_add_signal(SIGINT, mon_shutdown);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
ncurses_winch_handler = signal(SIGWINCH, mon_winresize);
if (ncurses_winch_handler == SIG_DFL ||
ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR)
ncurses_winch_handler = NULL;
g_io_add_watch(g_io_channel_unix_new(STDIN_FILENO), G_IO_IN, detect_user_input, NULL);
}
#endif
refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
g_main_run(mainloop);
g_main_destroy(mainloop);
crm_info("Exiting %s", crm_system_name);
clean_up(0);
return 0; /* never reached */
}
#define mon_warn(fmt...) do { \
if (!has_warnings) { \
print_as("CLUSTER WARN:"); \
} else { \
print_as(","); \
} \
print_as(fmt); \
has_warnings = TRUE; \
} while(0)
static int
count_resources(pe_working_set_t * data_set, resource_t * rsc)
{
int count = 0;
GListPtr gIter = NULL;
if (rsc == NULL) {
gIter = data_set->resources;
} else if (rsc->children) {
gIter = rsc->children;
} else {
return is_not_set(rsc->flags, pe_rsc_orphan);
}
for (; gIter != NULL; gIter = gIter->next) {
count += count_resources(data_set, gIter->data);
}
return count;
}
/*!
* \internal
* \brief Print one-line status suitable for use with monitoring software
*
* \param[in] data_set Working set of CIB state
*
* \note This function's output (and the return code when the program exits)
* should conform to https://www.monitoring-plugins.org/doc/guidelines.html
*/
static void
print_simple_status(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int nodes_online = 0;
int nodes_standby = 0;
int nodes_maintenance = 0;
if (data_set->dc_node == NULL) {
mon_warn(" No DC");
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node->details->standby && node->details->online) {
nodes_standby++;
} else if (node->details->maintenance && node->details->online) {
nodes_maintenance++;
} else if (node->details->online) {
nodes_online++;
} else {
mon_warn(" offline node: %s", node->details->uname);
}
}
if (!has_warnings) {
int nresources = count_resources(data_set, NULL);
print_as("CLUSTER OK: %d node%s online", nodes_online, s_if_plural(nodes_online));
if (nodes_standby > 0) {
print_as(", %d standby node%s", nodes_standby, s_if_plural(nodes_standby));
}
if (nodes_maintenance > 0) {
print_as(", %d maintenance node%s", nodes_maintenance, s_if_plural(nodes_maintenance));
}
print_as(", %d resource%s configured", nresources, s_if_plural(nresources));
}
print_as("\n");
}
/*!
* \internal
* \brief Print a [name]=[value][units] pair, optionally using time string
*
* \param[in] stream File stream to display output to
* \param[in] name Name to display
* \param[in] value Value to display (or NULL to convert time instead)
* \param[in] units Units to display (or NULL for no units)
* \param[in] epoch_time Epoch time to convert if value is NULL
*/
static void
print_nvpair(FILE *stream, const char *name, const char *value,
const char *units, time_t epoch_time)
{
/* print name= */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" %s=", name);
break;
case mon_output_html:
case mon_output_cgi:
case mon_output_xml:
fprintf(stream, " %s=", name);
break;
default:
break;
}
/* If we have a value (and optionally units), print it */
if (value) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("%s%s", value, (units? units : ""));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "%s%s", value, (units? units : ""));
break;
case mon_output_xml:
fprintf(stream, "\"%s%s\"", value, (units? units : ""));
break;
default:
break;
}
/* Otherwise print user-friendly time string */
} else {
char *date_str, *c;
date_str = asctime(localtime(&epoch_time));
for (c = date_str; c != '\0'; ++c) {
if (*c == '\n') {
*c = '\0';
break;
}
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("'%s'", date_str);
break;
case mon_output_html:
case mon_output_cgi:
case mon_output_xml:
fprintf(stream, "\"%s\"", date_str);
break;
default:
break;
}
}
}
/*!
* \internal
* \brief Print whatever is needed to start a node section
*
* \param[in] stream File stream to display output to
* \param[in] node Node to print
*/
static void
print_node_start(FILE *stream, node_t *node)
{
char *node_name;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
node_name = get_node_display_name(node);
print_as("* Node %s:\n", node_name);
free(node_name);
break;
case mon_output_html:
case mon_output_cgi:
node_name = get_node_display_name(node);
fprintf(stream, " Node: %s
\n \n", node_name);
free(node_name);
break;
case mon_output_xml:
fprintf(stream, " \n", node->details->uname);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever is needed to end a node section
*
* \param[in] stream File stream to display output to
*/
static void
print_node_end(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print heading for resource history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node that ran this resource
* \param[in] rsc Resource to print
* \param[in] rsc_id ID of resource to print
* \param[in] all Whether to print every resource or just failed ones
*/
static void
print_rsc_history_start(FILE *stream, pe_working_set_t *data_set, node_t *node,
resource_t *rsc, const char *rsc_id, gboolean all)
{
time_t last_failure = 0;
int failcount = rsc? get_failcount_full(node, rsc, &last_failure, FALSE, NULL, data_set) : 0;
if (!all && !failcount && (last_failure <= 0)) {
return;
}
/* Print resource ID */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" %s:", rsc_id);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %s:", rsc_id);
break;
case mon_output_xml:
fprintf(stream, " 0)) {
/* Print migration threshold */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" migration-threshold=%d", rsc->migration_threshold);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " migration-threshold=%d", rsc->migration_threshold);
break;
case mon_output_xml:
fprintf(stream, " orphan=\"false\" migration-threshold=\"%d\"",
rsc->migration_threshold);
break;
default:
break;
}
/* Print fail count if any */
if (failcount > 0) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" fail-count=%d", failcount);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " fail-count=%d", failcount);
break;
case mon_output_xml:
fprintf(stream, " fail-count=\"%d\"", failcount);
break;
default:
break;
}
}
/* Print last failure time if any */
if (last_failure > 0) {
print_nvpair(stream, "last-failure", NULL, NULL, last_failure);
}
}
/* End the heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "\n \n");
break;
case mon_output_xml:
fprintf(stream, ">\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print closing for resource history
*
* \param[in] stream File stream to display output to
*/
static void
print_rsc_history_end(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print operation history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node this operation is for
* \param[in] xml_op Root of XML tree describing this operation
* \param[in] task Task parsed from this operation's XML
* \param[in] interval Interval parsed from this operation's XML
* \param[in] rc Return code parsed from this operation's XML
*/
static void
print_op_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
xmlNode *xml_op, const char *task, const char *interval, int rc)
{
const char *value = NULL;
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
/* Begin the operation description */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" + (%s) %s:", call, task);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " (%s) %s:", call, task);
break;
case mon_output_xml:
fprintf(stream, " 0) {
print_nvpair(stream, attr, NULL, NULL, int_value);
}
}
attr = XML_RSC_OP_LAST_RUN;
value = crm_element_value(xml_op, attr);
if (value) {
int_value = crm_parse_int(value, NULL);
if (int_value > 0) {
print_nvpair(stream, attr, NULL, NULL, int_value);
}
}
attr = XML_RSC_OP_T_EXEC;
value = crm_element_value(xml_op, attr);
if (value) {
print_nvpair(stream, attr, value, "ms", 0);
}
attr = XML_RSC_OP_T_QUEUE;
value = crm_element_value(xml_op, attr);
if (value) {
print_nvpair(stream, attr, value, "ms", 0);
}
}
/* End the operation description */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" rc=%d (%s)\n", rc, services_ocf_exitcode_str(rc));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " rc=%d (%s)\n", rc, services_ocf_exitcode_str(rc));
break;
case mon_output_xml:
fprintf(stream, " rc=\"%d\" rc_text=\"%s\" />\n", rc, services_ocf_exitcode_str(rc));
break;
default:
break;
}
}
/*!
* \internal
* \brief Print resource operation/failure history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node that ran this resource
* \param[in] rsc_entry Root of XML tree describing resource status
* \param[in] operations Whether to print operations or just failcounts
*/
static void
print_rsc_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
xmlNode *rsc_entry, gboolean operations)
{
GListPtr gIter = NULL;
GListPtr op_list = NULL;
gboolean printed = FALSE;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
xmlNode *rsc_op = NULL;
/* If we're not showing operations, just print the resource failure summary */
if (operations == FALSE) {
print_rsc_history_start(stream, data_set, node, rsc, rsc_id, FALSE);
print_rsc_history_end(stream);
return;
}
/* Create a list of this resource's operations */
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_append(op_list, rsc_op);
}
}
op_list = g_list_sort(op_list, sort_op_by_callid);
/* Print each operation */
for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *xml_op = (xmlNode *) gIter->data;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
int rc = crm_parse_int(op_rc, "0");
/* Display 0-interval monitors as "probe" */
if (safe_str_eq(task, CRMD_ACTION_STATUS) && safe_str_eq(interval, "0")) {
task = "probe";
}
/* Ignore notifies and some probes */
if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (rc == 7))) {
continue;
}
/* If this is the first printed operation, print heading for resource */
if (printed == FALSE) {
printed = TRUE;
print_rsc_history_start(stream, data_set, node, rsc, rsc_id, TRUE);
}
/* Print the operation */
print_op_history(stream, data_set, node, xml_op, task, interval, rc);
}
/* Free the list we created (no need to free the individual items) */
g_list_free(op_list);
/* If we printed anything, close the resource */
if (printed) {
print_rsc_history_end(stream);
}
}
/*!
* \internal
* \brief Print node operation/failure history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node_state Root of XML tree describing node status
* \param[in] operations Whether to print operations or just failcounts
*/
static void
print_node_history(FILE *stream, pe_working_set_t *data_set,
xmlNode *node_state, gboolean operations)
{
node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
xmlNode *lrm_rsc = NULL;
xmlNode *rsc_entry = NULL;
if (node && node->details && node->details->online) {
print_node_start(stream, node);
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
/* Print history of each of the node's resources */
for (rsc_entry = __xml_first_child(lrm_rsc); rsc_entry != NULL;
rsc_entry = __xml_next(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
print_rsc_history(stream, data_set, node, rsc_entry, operations);
}
}
print_node_end(stream);
}
}
/*!
* \internal
* \brief Print extended information about an attribute if appropriate
*
* \param[in] data_set Working set of CIB state
*
* \return TRUE if extended information was printed, FALSE otherwise
* \note Currently, extended information is only supported for ping/pingd
* resources, for which a message will be printed if connectivity is lost
* or degraded.
*/
static gboolean
print_attr_msg(FILE *stream, node_t * node, GListPtr rsc_list, const char *attrname, const char *attrvalue)
{
GListPtr gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
if (rsc->children != NULL) {
if (print_attr_msg(stream, node, rsc->children, attrname, attrvalue)) {
return TRUE;
}
}
if (safe_str_eq(type, "ping") || safe_str_eq(type, "pingd")) {
const char *name = g_hash_table_lookup(rsc->parameters, "name");
if (name == NULL) {
name = "pingd";
}
/* To identify the resource with the attribute name. */
if (safe_str_eq(name, attrname)) {
int host_list_num = 0;
int expected_score = 0;
int value = crm_parse_int(attrvalue, "0");
const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
if(hosts) {
char **host_list = g_strsplit(hosts, " ", 0);
host_list_num = g_strv_length(host_list);
g_strfreev(host_list);
}
/* pingd multiplier is the same as the default value. */
expected_score = host_list_num * crm_parse_int(multiplier, "1");
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (value <= 0) {
print_as("\t: Connectivity is lost");
} else if (value < expected_score) {
print_as("\t: Connectivity is degraded (Expected=%d)", expected_score);
}
break;
case mon_output_html:
case mon_output_cgi:
if (value <= 0) {
fprintf(stream, " (connectivity is lost)");
} else if (value < expected_score) {
fprintf(stream, " (connectivity is degraded -- expected %d)",
expected_score);
}
break;
case mon_output_xml:
fprintf(stream, " expected=\"%d\"", expected_score);
break;
default:
break;
}
return TRUE;
}
}
}
return FALSE;
}
static int
compare_attribute(gconstpointer a, gconstpointer b)
{
int rc;
rc = strcmp((const char *)a, (const char *)b);
return rc;
}
static void
create_attr_list(gpointer name, gpointer value, gpointer data)
{
int i;
const char *filt_str[] = FILTER_STR;
CRM_CHECK(name != NULL, return);
/* filtering automatic attributes */
for (i = 0; filt_str[i] != NULL; i++) {
if (g_str_has_prefix(name, filt_str[i])) {
return;
}
}
attr_list = g_list_insert_sorted(attr_list, name, compare_attribute);
}
/* structure for passing multiple user data to g_list_foreach() */
struct mon_attr_data {
FILE *stream;
node_t *node;
};
static void
print_node_attribute(gpointer name, gpointer user_data)
{
const char *value = NULL;
struct mon_attr_data *data = (struct mon_attr_data *) user_data;
value = g_hash_table_lookup(data->node->details->attrs, name);
/* Print attribute name and value */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" + %-32s\t: %-10s", (char *)name, value);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(data->stream, " %s: %s",
(char *)name, value);
break;
case mon_output_xml:
fprintf(data->stream,
" stream, data->node, data->node->details->running_rsc,
name, value);
/* Close out the attribute */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(data->stream, "\n");
break;
case mon_output_xml:
fprintf(data->stream, " />\n");
break;
default:
break;
}
}
static void
print_node_summary(FILE *stream, pe_working_set_t * data_set, gboolean operations)
{
xmlNode *node_state = NULL;
xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
/* Print heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (operations) {
print_as("\nOperations:\n");
} else {
print_as("\nMigration Summary:\n");
}
break;
case mon_output_html:
case mon_output_cgi:
if (operations) {
fprintf(stream, "
\n Operations
\n");
} else {
fprintf(stream, "
\n Migration Summary
\n");
}
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each node in the CIB status */
for (node_state = __xml_first_child(cib_status); node_state != NULL;
node_state = __xml_next(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
print_node_history(stream, data_set, node_state, operations);
}
}
/* Close section */
switch (output_format) {
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
static void
print_ticket(gpointer name, gpointer value, gpointer data)
{
ticket_t *ticket = (ticket_t *) value;
FILE *stream = (FILE *) data;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("* %s:\t%s%s", ticket->id,
(ticket->granted? "granted" : "revoked"),
(ticket->standby? " [standby]" : ""));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %s: %s%s", ticket->id,
(ticket->granted? "granted" : "revoked"),
(ticket->standby? " [standby]" : ""));
break;
case mon_output_xml:
fprintf(stream, " id, (ticket->granted? "granted" : "revoked"),
(ticket->standby? "true" : "false"));
break;
default:
break;
}
if (ticket->last_granted > -1) {
print_nvpair(stdout, "last-granted", NULL, NULL, ticket->last_granted);
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "\n");
break;
case mon_output_xml:
fprintf(stream, " />\n");
break;
default:
break;
}
}
static void
print_cluster_tickets(FILE *stream, pe_working_set_t * data_set)
{
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nTickets:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Tickets
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each ticket */
g_hash_table_foreach(data_set->tickets, print_ticket, stream);
/* Close section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Return human-friendly string representing node name
*
* The returned string will be in the format
* uname[@hostUname] [(nodeID)]
* "@hostUname" will be printed if the node is a guest node.
* "(nodeID)" will be printed if the node ID is different from the node uname,
* and detailed output has been requested.
*
* \param[in] node Node to represent
* \return Newly allocated string with representation of node name
* \note It is the caller's responsibility to free the result with free().
*/
static char *
get_node_display_name(node_t *node)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
/* Host is displayed only if this is a guest node */
if (is_container_remote_node(node)) {
if (node->details->remote_rsc->running_on) {
/* running_on is a list, but guest nodes will have exactly one entry
* unless they are in the process of migrating, in which case they
* will have two; either way, we can use the first item in the list
*/
node_t *host_node = (node_t *) node->details->remote_rsc->running_on->data;
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_clone_detail && safe_str_neq(node->details->uname, node->details->id)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
/*!
* \internal
* \brief Print a negative location constraint
*
* \param[in] stream File stream to display output to
* \param[in] node Node affected by constraint
* \param[in] location Constraint to print
*/
static void print_ban(FILE *stream, node_t *node, rsc_to_node_t *location)
{
char *node_name = NULL;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
node_name = get_node_display_name(node);
print_as(" %s\tprevents %s from running %son %s\n",
location->id, location->rsc_lh->id,
((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""),
node_name);
break;
case mon_output_html:
case mon_output_cgi:
node_name = get_node_display_name(node);
fprintf(stream, " %s prevents %s from running %son %s\n",
location->id, location->rsc_lh->id,
((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""),
node_name);
break;
case mon_output_xml:
fprintf(stream,
" \n",
location->id, location->rsc_lh->id, node->details->uname, node->weight,
((location->role_filter == RSC_ROLE_MASTER)? "true" : "false"));
break;
default:
break;
}
free(node_name);
}
/*!
* \internal
* \brief Print section for negative location constraints
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set corresponding to CIB status to display
*/
static void print_neg_locations(FILE *stream, pe_working_set_t *data_set)
{
GListPtr gIter, gIter2;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nNegative Location Constraints:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Negative Location Constraints
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each ban */
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
rsc_to_node_t *location = (rsc_to_node_t *) gIter->data;
if (!g_str_has_prefix(location->id, print_neg_location_prefix))
continue;
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
node_t *node = (node_t *) gIter2->data;
if (node->weight < 0) {
print_ban(stream, node, location);
}
}
}
/* Close section */
switch (output_format) {
case mon_output_cgi:
case mon_output_html:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
static void
crm_mon_get_parameters(resource_t *rsc, pe_working_set_t * data_set)
{
get_rsc_attributes(rsc->parameters, rsc, NULL, data_set);
crm_trace("Beekhof: unpacked params for %s (%d)", rsc->id, g_hash_table_size(rsc->parameters));
if(rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
}
}
/*!
* \internal
* \brief Print node attributes section
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_node_attributes(FILE *stream, pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nNode Attributes:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Node Attributes
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Unpack all resource parameters (it would be more efficient to do this
* only when needed for the first time in print_attr_msg())
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
/* Display each node's attributes */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
struct mon_attr_data data;
data.stream = stream;
data.node = (node_t *) gIter->data;
if (data.node && data.node->details && data.node->details->online) {
print_node_start(stream, data.node);
g_hash_table_foreach(data.node->details->attrs, create_attr_list, NULL);
g_list_foreach(attr_list, print_node_attribute, &data);
g_list_free(attr_list);
attr_list = NULL;
print_node_end(stream);
}
}
/* Print section footer */
switch (output_format) {
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Return resource display options corresponding to command-line choices
*
* \return Bitmask of pe_print_options suitable for resource print functions
*/
static int
get_resource_display_options(void)
{
int print_opts;
/* Determine basic output format */
switch (output_format) {
case mon_output_console:
print_opts = pe_print_ncurses;
break;
case mon_output_html:
case mon_output_cgi:
print_opts = pe_print_html;
break;
case mon_output_xml:
print_opts = pe_print_xml;
break;
default:
print_opts = pe_print_printf;
break;
}
/* Add optional display elements */
if (print_pending) {
print_opts |= pe_print_pending;
}
if (print_clone_detail) {
print_opts |= pe_print_clone_details;
}
if (!inactive_resources) {
print_opts |= pe_print_clone_active;
}
if (print_brief) {
print_opts |= pe_print_brief;
}
return print_opts;
}
/*!
* \internal
* \brief Return human-friendly string representing current time
*
* \return Current time as string (as by ctime() but without newline) on success
* or "Could not determine current time" on error
* \note The return value points to a statically allocated string which might be
* overwritten by subsequent calls to any of the C library date and time functions.
*/
static const char *
crm_now_string(void)
{
time_t a_time = time(NULL);
char *since_epoch = ctime(&a_time);
if ((a_time == (time_t) -1) || (since_epoch == NULL)) {
return "Could not determine current time";
}
since_epoch[strlen(since_epoch) - 1] = EOS; /* trim newline */
return (since_epoch);
}
/*!
* \internal
* \brief Print header for cluster summary if needed
*
* \param[in] stream File stream to display output to
*/
static void
print_cluster_summary_header(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Cluster Summary
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print footer for cluster summary if needed
*
* \param[in] stream File stream to display output to
*/
static void
print_cluster_summary_footer(FILE *stream)
{
switch (output_format) {
case mon_output_cgi:
case mon_output_html:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print times the display was last updated and CIB last changed
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_times(FILE *stream, pe_working_set_t *data_set)
{
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Last updated: %s", crm_now_string());
print_as("\t\tLast change: %s", last_written ? last_written : "");
if (user) {
print_as(" by %s", user);
}
if (client) {
print_as(" via %s", client);
}
if (origin) {
print_as(" on %s", origin);
}
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Last updated: %s
\n", crm_now_string());
fprintf(stream, " Last change: %s", last_written ? last_written : "");
if (user) {
fprintf(stream, " by %s", user);
}
if (client) {
fprintf(stream, " via %s", client);
}
if (origin) {
fprintf(stream, " on %s", origin);
}
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n", crm_now_string());
fprintf(stream, " \n",
last_written ? last_written : "", user ? user : "",
client ? client : "", origin ? origin : "");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster stack
*
* \param[in] stream File stream to display output to
* \param[in] stack_s Stack name
*/
static void
print_cluster_stack(FILE *stream, const char *stack_s)
{
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Stack: %s\n", stack_s);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Stack: %s
\n", stack_s);
break;
case mon_output_xml:
fprintf(stream, " \n", stack_s);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print current DC and its version
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_dc(FILE *stream, pe_working_set_t *data_set)
{
node_t *dc = data_set->dc_node;
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = dc? get_node_display_name(dc) : NULL;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Current DC: ");
if (dc) {
print_as("%s (version %s) - partition %s quorum\n",
dc_name, (dc_version_s? dc_version_s : "unknown"),
(crm_is_true(quorum) ? "with" : "WITHOUT"));
} else {
print_as("NONE\n");
}
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Current DC: ");
if (dc) {
fprintf(stream, "%s (version %s) - partition %s quorum",
dc_name, (dc_version_s? dc_version_s : "unknown"),
(crm_is_true(quorum)? "with" : "WITHOUT"));
} else {
fprintf(stream, "NONE");
}
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " details->uname, dc->details->id,
(crm_is_true(quorum) ? "true" : "false"));
} else {
fprintf(stream, "present=\"false\"");
}
fprintf(stream, " />\n");
break;
default:
break;
}
free(dc_name);
}
/*!
* \internal
* \brief Print counts of configured nodes and resources
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
* \param[in] stack_s Stack name
*/
static void
print_cluster_counts(FILE *stream, pe_working_set_t *data_set, const char *stack_s)
{
int nnodes = g_list_length(data_set->nodes);
int nresources = count_resources(data_set, NULL);
xmlNode *quorum_node = get_xpath_object("//nvpair[@name='" XML_ATTR_EXPECTED_VOTES "']",
data_set->input, LOG_DEBUG);
const char *quorum_votes = quorum_node?
crm_element_value(quorum_node, XML_NVPAIR_ATTR_VALUE)
: "unknown";
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (stack_s && strstr(stack_s, "classic openais") != NULL) {
print_as(", %s expected votes", quorum_votes);
}
if(is_set(data_set->flags, pe_flag_maintenance_mode)) {
print_as("\n *** Resource management is DISABLED ***");
print_as("\n The cluster will not attempt to start, stop or recover services");
print_as("\n");
}
print_as("\n%d node%s and %d resource%s configured",
nnodes, s_if_plural(nnodes),
nresources, s_if_plural(nresources));
if(data_set->disabled_resources || data_set->blocked_resources) {
print_as(": %d resource%s DISABLED and %d BLOCKED from being started due to failures",
data_set->disabled_resources, s_if_plural(data_set->disabled_resources),
data_set->blocked_resources);
}
print_as("\n\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %d node%s configured", nnodes, s_if_plural(nnodes));
if (stack_s && strstr(stack_s, "classic openais") != NULL) {
fprintf(stream, " (%s expected votes)", quorum_votes);
}
fprintf(stream, "
\n");
fprintf(stream, " %d resource%s configured
\n",
nresources, s_if_plural(nresources));
break;
case mon_output_xml:
fprintf(stream,
" \n",
g_list_length(data_set->nodes), quorum_votes);
fprintf(stream,
" \n",
count_resources(data_set, NULL));
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster-wide options
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*
* \note Currently this is only implemented for HTML and XML output, and
* prints only a few options. If there is demand, more could be added.
*/
static void
print_cluster_options(FILE *stream, pe_working_set_t *data_set)
{
switch (output_format) {
case mon_output_html:
fprintf(stream, "
\n Config Options
\n");
fprintf(stream, " \n");
fprintf(stream, " STONITH of failed nodes | %s |
\n",
is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
fprintf(stream, " Cluster is | %ssymmetric |
\n",
is_set(data_set->flags, pe_flag_symmetric_cluster)? "" : "a");
fprintf(stream, " No Quorum Policy | ");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "Freeze resources");
break;
case no_quorum_stop:
fprintf(stream, "Stop ALL resources");
break;
case no_quorum_ignore:
fprintf(stream, "Ignore");
break;
case no_quorum_suicide:
fprintf(stream, "Suicide");
break;
}
fprintf(stream, " |
\n
\n \n");
break;
case mon_output_xml:
fprintf(stream, " flags, pe_flag_stonith_enabled)?
"true" : "false");
fprintf(stream, " symmetric-cluster=\"%s\"",
is_set(data_set->flags, pe_flag_symmetric_cluster)?
"true" : "false");
fprintf(stream, " no-quorum-policy=\"");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "freeze");
break;
case no_quorum_stop:
fprintf(stream, "stop");
break;
case no_quorum_ignore:
fprintf(stream, "ignore");
break;
case no_quorum_suicide:
fprintf(stream, "suicide");
break;
}
fprintf(stream, "\" />\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Get the name of the stack in use (or "unknown" if not available)
*
* \param[in] data_set Working set of CIB state
*
* \return String representing stack name
*/
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
/*!
* \internal
* \brief Print a summary of cluster-wide information
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_summary(FILE *stream, pe_working_set_t *data_set)
{
const char *stack_s = get_cluster_stack(data_set);
gboolean header_printed = FALSE;
if (show & mon_show_stack) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_stack(stream, stack_s);
}
/* Always print DC if none, even if not requested */
if ((data_set->dc_node == NULL) || (show & mon_show_dc)) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_dc(stream, data_set);
}
if (show & mon_show_times) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_times(stream, data_set);
}
if (is_set(data_set->flags, pe_flag_maintenance_mode)
|| data_set->disabled_resources
|| data_set->blocked_resources
|| is_set(show, mon_show_count)) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_counts(stream, data_set, stack_s);
}
/* There is not a separate option for showing cluster options, so show with
* stack for now; a separate option could be added if there is demand
*/
if (show & mon_show_stack) {
print_cluster_options(stream, data_set);
}
if (header_printed) {
print_cluster_summary_footer(stream);
}
}
/*!
* \internal
* \brief Print a failed action
*
* \param[in] stream File stream to display output to
* \param[in] xml_op Root of XML tree describing failed action
*/
static void
print_failed_action(FILE *stream, xmlNode *xml_op)
{
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *op_key_attr = "op_key";
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
char *exit_reason_cleaned;
/* If no op_key was given, use id instead */
if (op_key == NULL) {
op_key = ID(xml_op);
op_key_attr = "id";
}
/* If no exit reason was given, use "none" */
if (exit_reason == NULL) {
exit_reason = "none";
}
/* Print common action information */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("* %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key, node, services_ocf_exitcode_str(rc), rc,
call, services_lrm_status_str(status), exit_reason);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key, node, services_ocf_exitcode_str(rc), rc,
call, services_lrm_status_str(status), exit_reason);
break;
case mon_output_xml:
exit_reason_cleaned = crm_xml_escape(exit_reason);
fprintf(stream, " \n");
break;
case mon_output_xml:
fprintf(stream, " />\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print a section for failed actions
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_failed_actions(FILE *stream, pe_working_set_t *data_set)
{
xmlNode *xml_op = NULL;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nFailed Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Failed Actions
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each failed action */
for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
print_failed_action(stream, xml_op);
}
/* End section */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster status to screen
*
* This uses the global display preferences set by command-line options
* to display cluster status in a human-friendly way.
*
* \param[in] data_set Working set of CIB state
*/
static void
print_status(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int print_opts = get_resource_display_options();
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
if (output_format == mon_output_console) {
blank_screen();
}
print_cluster_summary(stdout, data_set);
/* Gather node information (and print if in bad state or grouping by node) */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = get_node_display_name(node);
/* Get node mode */
if (node->details->unclean) {
if (node->details->online) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
node_mode = "standby";
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
if (is_container_remote_node(node)) {
online_guest_nodes = add_list_element(online_guest_nodes, node_name);
} else if (is_baremetal_remote_node(node)) {
online_remote_nodes = add_list_element(online_remote_nodes, node_name);
} else {
online_nodes = add_list_element(online_nodes, node_name);
}
free(node_name);
continue;
}
} else {
node_mode = "OFFLINE";
if (group_by_node == FALSE) {
if (is_baremetal_remote_node(node)) {
offline_remote_nodes = add_list_element(offline_remote_nodes, node_name);
} else if (is_container_remote_node(node)) {
/* ignore offline guest nodes */
} else {
offline_nodes = add_list_element(offline_nodes, node_name);
}
free(node_name);
continue;
}
}
/* If we get here, node is in bad state, or we're grouping by node */
/* Print the node name and status */
if (is_container_remote_node(node)) {
print_as("Guest");
} else if (is_baremetal_remote_node(node)) {
print_as("Remote");
}
print_as("Node %s: %s\n", node_name, node_mode);
/* If we're grouping by node, print its resources */
if (group_by_node) {
if (print_brief) {
print_rscs_brief(node->details->running_rsc, "\t", print_opts | pe_print_rsconly,
stdout, FALSE);
} else {
GListPtr gIter2 = NULL;
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc->fns->print(rsc, "\t", print_opts | pe_print_rsconly, stdout);
}
}
}
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
print_as("Online: [%s ]\n", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
print_as("OFFLINE: [%s ]\n", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
print_as("RemoteOnline: [%s ]\n", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
print_as("RemoteOFFLINE: [%s ]\n", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
print_as("GuestOnline: [%s ]\n", online_guest_nodes);
free(online_guest_nodes);
}
/* If we haven't already displayed resources grouped by node,
* or we need to print inactive resources, print a resources section */
if (group_by_node == FALSE || inactive_resources) {
/* If we're printing inactive resources, display a heading */
if (inactive_resources) {
if (group_by_node == FALSE) {
print_as("\nFull list of resources:\n");
} else {
print_as("\nInactive resources:\n");
}
}
print_as("\n");
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (print_brief && group_by_node == FALSE) {
print_rscs_brief(data_set->resources, NULL, print_opts, stdout,
inactive_resources);
}
/* For each resource, display it if appropriate */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Always ignore inactive orphan resources (deleted but not yet gone from CIB) */
if (is_set(rsc->flags, pe_rsc_orphan) && (is_active == FALSE)) {
continue;
}
/* If we already printed resources grouped by node,
* only print inactive resources, if that was requested */
if (group_by_node == TRUE) {
if ((is_active == FALSE) && inactive_resources) {
rsc->fns->print(rsc, NULL, print_opts, stdout);
}
continue;
}
/* Otherwise, print resource if it's at least partially active
* or we're displaying inactive resources,
* except for primitive resources already counted in a brief summary */
if (!(print_brief && (rsc->variant == pe_native))
&& (partially_active || inactive_resources)) {
rsc->fns->print(rsc, NULL, print_opts, stdout);
}
}
}
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stdout, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stdout, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stdout, data_set);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stdout, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stdout, data_set);
}
#if CURSES_ENABLED
if (output_format == mon_output_console) {
refresh();
}
#endif
}
/*!
* \internal
* \brief Print cluster status in XML format
*
* \param[in] data_set Working set of CIB state
*/
static void
print_xml_status(pe_working_set_t * data_set)
{
FILE *stream = stdout;
GListPtr gIter = NULL;
int print_opts = get_resource_display_options();
fprintf(stream, "\n");
fprintf(stream, "\n", VERSION);
print_cluster_summary(stream, data_set);
/*** NODES ***/
fprintf(stream, " \n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_type = "unknown";
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
fprintf(stream, " details->uname);
fprintf(stream, "id=\"%s\" ", node->details->id);
fprintf(stream, "online=\"%s\" ", node->details->online ? "true" : "false");
fprintf(stream, "standby=\"%s\" ", node->details->standby ? "true" : "false");
fprintf(stream, "standby_onfail=\"%s\" ", node->details->standby_onfail ? "true" : "false");
fprintf(stream, "maintenance=\"%s\" ", node->details->maintenance ? "true" : "false");
fprintf(stream, "pending=\"%s\" ", node->details->pending ? "true" : "false");
fprintf(stream, "unclean=\"%s\" ", node->details->unclean ? "true" : "false");
fprintf(stream, "shutdown=\"%s\" ", node->details->shutdown ? "true" : "false");
fprintf(stream, "expected_up=\"%s\" ", node->details->expected_up ? "true" : "false");
fprintf(stream, "is_dc=\"%s\" ", node->details->is_dc ? "true" : "false");
fprintf(stream, "resources_running=\"%d\" ", g_list_length(node->details->running_rsc));
fprintf(stream, "type=\"%s\" ", node_type);
if (is_container_remote_node(node)) {
fprintf(stream, "id_as_resource=\"%s\" ", node->details->remote_rsc->container->id);
}
if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, ">\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
rsc->fns->print(rsc, " ", print_opts | pe_print_rsconly, stream);
}
fprintf(stream, " \n");
} else {
fprintf(stream, "/>\n");
}
}
fprintf(stream, " \n");
/*** RESOURCES ***/
if (group_by_node == FALSE || inactive_resources) {
fprintf(stream, " \n");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
if (is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) {
continue;
} else if (group_by_node == FALSE) {
if (partially_active || inactive_resources) {
rsc->fns->print(rsc, " ", print_opts, stream);
}
} else if (is_active == FALSE && inactive_resources) {
rsc->fns->print(rsc, " ", print_opts, stream);
}
}
fprintf(stream, " \n");
}
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stream, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stream, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stream, data_set);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stream, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stream, data_set);
}
fprintf(stream, "\n");
fflush(stream);
fclose(stream);
}
/*!
* \internal
* \brief Print cluster status in HTML format (with HTTP headers if CGI)
*
* \param[in] data_set Working set of CIB state
* \param[in] filename Name of file to write HTML to (ignored if CGI)
*
* \return 0 on success, -1 on error
*/
static int
print_html_status(pe_working_set_t * data_set, const char *filename)
{
FILE *stream;
GListPtr gIter = NULL;
char *filename_tmp = NULL;
int print_opts = get_resource_display_options();
if (output_format == mon_output_cgi) {
stream = stdout;
fprintf(stream, "Content-type: text/html\n\n");
} else {
filename_tmp = crm_concat(filename, "tmp", '.');
stream = fopen(filename_tmp, "w");
if (stream == NULL) {
crm_perror(LOG_ERR, "Cannot open %s for writing", filename_tmp);
free(filename_tmp);
return -1;
}
}
fprintf(stream, "\n");
fprintf(stream, " \n");
fprintf(stream, " Cluster status\n");
fprintf(stream, " \n", reconnect_msec / 1000);
fprintf(stream, " \n");
fprintf(stream, "\n");
print_cluster_summary(stream, data_set);
/*** NODE LIST ***/
fprintf(stream, "
\n Node List
\n");
fprintf(stream, "\n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
char *node_name = get_node_display_name(node);
fprintf(stream, "- Node: %s: ", node_name);
if (node->details->standby_onfail && node->details->online) {
fprintf(stream, "standby (on-fail)\n");
} else if (node->details->standby && node->details->online) {
fprintf(stream, "standby\n");
} else if (node->details->standby) {
fprintf(stream, "OFFLINE (standby)\n");
} else if (node->details->maintenance && node->details->online) {
fprintf(stream, "maintenance\n");
} else if (node->details->maintenance) {
fprintf(stream, "OFFLINE (maintenance)\n");
} else if (node->details->online) {
fprintf(stream, "online\n");
} else {
fprintf(stream, "OFFLINE\n");
}
if (print_brief && group_by_node) {
fprintf(stream, "
\n");
print_rscs_brief(node->details->running_rsc, NULL, print_opts | pe_print_rsconly,
stream, FALSE);
fprintf(stream, "
\n");
} else if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, "\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
fprintf(stream, "- ");
rsc->fns->print(rsc, NULL, print_opts | pe_print_rsconly, stream);
fprintf(stream, "
\n");
}
fprintf(stream, "
\n");
}
fprintf(stream, " \n");
free(node_name);
}
fprintf(stream, "
\n");
if (group_by_node && inactive_resources) {
fprintf(stream, "Inactive Resources
\n");
} else if (group_by_node == FALSE) {
fprintf(stream, "
\n Resource List
\n");
}
if (group_by_node == FALSE || inactive_resources) {
if (print_brief && group_by_node == FALSE) {
print_rscs_brief(data_set->resources, NULL, print_opts, stream,
inactive_resources);
}
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
if (print_brief && group_by_node == FALSE
&& rsc->variant == pe_native) {
continue;
}
if (is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) {
continue;
} else if (group_by_node == FALSE) {
if (partially_active || inactive_resources) {
rsc->fns->print(rsc, NULL, print_opts, stream);
}
} else if (is_active == FALSE && inactive_resources) {
rsc->fns->print(rsc, NULL, print_opts, stream);
}
}
}
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stream, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stream, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stream, data_set);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stream, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stream, data_set);
}
fprintf(stream, "\n");
fprintf(stream, "\n");
fflush(stream);
fclose(stream);
if (output_format != mon_output_cgi) {
if (rename(filename_tmp, filename) != 0) {
crm_perror(LOG_ERR, "Unable to rename %s->%s", filename_tmp, filename);
}
free(filename_tmp);
}
return 0;
}
#if ENABLE_SNMP
# include
# include
# include
# include
# include
# include
# define add_snmp_field(list, oid_string, value) do { \
oid name[MAX_OID_LEN]; \
size_t name_length = MAX_OID_LEN; \
if (snmp_parse_oid(oid_string, name, &name_length)) { \
int s_rc = snmp_add_var(list, name, name_length, 's', (value)); \
if(s_rc != 0) { \
crm_err("Could not add %s=%s rc=%d", oid_string, value, s_rc); \
} else { \
crm_trace("Added %s=%s", oid_string, value); \
} \
} else { \
crm_err("Could not parse OID: %s", oid_string); \
} \
} while(0) \
# define add_snmp_field_int(list, oid_string, value) do { \
oid name[MAX_OID_LEN]; \
size_t name_length = MAX_OID_LEN; \
if (snmp_parse_oid(oid_string, name, &name_length)) { \
if(NULL == snmp_pdu_add_variable( \
list, name, name_length, ASN_INTEGER, \
(u_char *) & value, sizeof(value))) { \
crm_err("Could not add %s=%d", oid_string, value); \
} else { \
crm_trace("Added %s=%d", oid_string, value); \
} \
} else { \
crm_err("Could not parse OID: %s", oid_string); \
} \
} while(0) \
static int
snmp_input(int operation, netsnmp_session * session, int reqid, netsnmp_pdu * pdu, void *magic)
{
return 1;
}
static netsnmp_session *
crm_snmp_init(const char *target, char *community)
{
static netsnmp_session *session = NULL;
# ifdef NETSNMPV53
char target53[128];
snprintf(target53, sizeof(target53), "%s:162", target);
# endif
if (session) {
return session;
}
if (target == NULL) {
return NULL;
}
if (get_crm_log_level() > LOG_INFO) {
char *debug_tokens = strdup("run:shell,snmptrap,tdomain");
debug_register_tokens(debug_tokens);
snmp_set_do_debugging(1);
}
session = calloc(1, sizeof(netsnmp_session));
snmp_sess_init(session);
session->version = SNMP_VERSION_2c;
session->callback = snmp_input;
session->callback_magic = NULL;
if (community) {
session->community_len = strlen(community);
session->community = (unsigned char *)community;
}
session = snmp_add(session,
# ifdef NETSNMPV53
netsnmp_tdomain_transport(target53, 0, "udp"),
# else
netsnmp_transport_open_client("snmptrap", target),
# endif
NULL, NULL);
if (session == NULL) {
snmp_sess_perror("Could not create snmp transport", session);
}
return session;
}
#endif
static int
send_snmp_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
int ret = 1;
#if ENABLE_SNMP
static oid snmptrap_oid[] = { 1, 3, 6, 1, 6, 3, 1, 1, 4, 1, 0 };
static oid sysuptime_oid[] = { 1, 3, 6, 1, 2, 1, 1, 3, 0 };
netsnmp_pdu *trap_pdu;
netsnmp_session *session = crm_snmp_init(snmp_target, snmp_community);
trap_pdu = snmp_pdu_create(SNMP_MSG_TRAP2);
if (!trap_pdu) {
crm_err("Failed to create SNMP notification");
return SNMPERR_GENERR;
}
if (1) {
/* send uptime */
char csysuptime[20];
time_t now = time(NULL);
sprintf(csysuptime, "%lld", (long long) now);
snmp_add_var(trap_pdu, sysuptime_oid, sizeof(sysuptime_oid) / sizeof(oid), 't', csysuptime);
}
/* Indicate what the trap is by setting snmpTrapOid.0 */
ret =
snmp_add_var(trap_pdu, snmptrap_oid, sizeof(snmptrap_oid) / sizeof(oid), 'o',
snmp_crm_trap_oid);
if (ret != 0) {
crm_err("Failed set snmpTrapOid.0=%s", snmp_crm_trap_oid);
return ret;
}
/* Add extries to the trap */
if (rsc) {
add_snmp_field(trap_pdu, snmp_crm_oid_rsc, rsc);
}
add_snmp_field(trap_pdu, snmp_crm_oid_node, node);
add_snmp_field(trap_pdu, snmp_crm_oid_task, task);
add_snmp_field(trap_pdu, snmp_crm_oid_desc, desc);
add_snmp_field_int(trap_pdu, snmp_crm_oid_rc, rc);
add_snmp_field_int(trap_pdu, snmp_crm_oid_trc, target_rc);
add_snmp_field_int(trap_pdu, snmp_crm_oid_status, status);
/* Send and cleanup */
ret = snmp_send(session, trap_pdu);
if (ret == 0) {
/* error */
snmp_sess_perror("Could not send SNMP trap", session);
snmp_free_pdu(trap_pdu);
ret = SNMPERR_GENERR;
} else {
ret = SNMPERR_SUCCESS;
}
#else
crm_err("Sending SNMP traps is not supported by this installation");
#endif
return ret;
}
#if ENABLE_ESMTP
# include
# include
static void
print_recipient_status(smtp_recipient_t recipient, const char *mailbox, void *arg)
{
const smtp_status_t *status;
status = smtp_recipient_status(recipient);
printf("%s: %d %s", mailbox, status->code, status->text);
}
static void
event_cb(smtp_session_t session, int event_no, void *arg, ...)
{
int *ok;
va_list alist;
va_start(alist, arg);
switch (event_no) {
case SMTP_EV_CONNECT:
case SMTP_EV_MAILSTATUS:
case SMTP_EV_RCPTSTATUS:
case SMTP_EV_MESSAGEDATA:
case SMTP_EV_MESSAGESENT:
case SMTP_EV_DISCONNECT:
break;
case SMTP_EV_WEAK_CIPHER:{
int bits = va_arg(alist, long);
ok = va_arg(alist, int *);
crm_debug("SMTP_EV_WEAK_CIPHER, bits=%d - accepted.", bits);
*ok = 1;
break;
}
case SMTP_EV_STARTTLS_OK:
crm_debug("SMTP_EV_STARTTLS_OK - TLS started here.");
break;
case SMTP_EV_INVALID_PEER_CERTIFICATE:{
long vfy_result = va_arg(alist, long);
ok = va_arg(alist, int *);
/* There is a table in handle_invalid_peer_certificate() of mail-file.c */
crm_err("SMTP_EV_INVALID_PEER_CERTIFICATE: %ld", vfy_result);
*ok = 1;
break;
}
case SMTP_EV_NO_PEER_CERTIFICATE:
ok = va_arg(alist, int *);
crm_debug("SMTP_EV_NO_PEER_CERTIFICATE - accepted.");
*ok = 1;
break;
case SMTP_EV_WRONG_PEER_CERTIFICATE:
ok = va_arg(alist, int *);
crm_debug("SMTP_EV_WRONG_PEER_CERTIFICATE - accepted.");
*ok = 1;
break;
case SMTP_EV_NO_CLIENT_CERTIFICATE:
ok = va_arg(alist, int *);
crm_debug("SMTP_EV_NO_CLIENT_CERTIFICATE - accepted.");
*ok = 1;
break;
default:
crm_debug("Got event: %d - ignored.\n", event_no);
}
va_end(alist);
}
#endif
#define BODY_MAX 2048
#if ENABLE_ESMTP
static void
crm_smtp_debug(const char *buf, int buflen, int writing, void *arg)
{
char type = 0;
int lpc = 0, last = 0, level = *(int *)arg;
if (writing == SMTP_CB_HEADERS) {
type = 'H';
} else if (writing) {
type = 'C';
} else {
type = 'S';
}
for (; lpc < buflen; lpc++) {
switch (buf[lpc]) {
case 0:
case '\n':
if (last > 0) {
do_crm_log(level, " %.*s", lpc - last, buf + last);
} else {
do_crm_log(level, "%c: %.*s", type, lpc - last, buf + last);
}
last = lpc + 1;
break;
}
}
}
#endif
static int
send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
pid_t pid;
/*setenv needs chars, these are ints */
char *rc_s = crm_itoa(rc);
char *status_s = crm_itoa(status);
char *target_rc_s = crm_itoa(target_rc);
crm_debug("Sending external notification to '%s' via '%s'", external_recipient, external_agent);
if(rsc) {
setenv("CRM_notify_rsc", rsc, 1);
}
setenv("CRM_notify_recipient", external_recipient, 1);
setenv("CRM_notify_node", node, 1);
setenv("CRM_notify_task", task, 1);
setenv("CRM_notify_desc", desc, 1);
setenv("CRM_notify_rc", rc_s, 1);
setenv("CRM_notify_target_rc", target_rc_s, 1);
setenv("CRM_notify_status", status_s, 1);
pid = fork();
if (pid == -1) {
crm_perror(LOG_ERR, "notification fork() failed.");
}
if (pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
execl(external_agent, external_agent, NULL);
}
crm_trace("Finished running custom notification program '%s'.", external_agent);
free(target_rc_s);
free(status_s);
free(rc_s);
return 0;
}
static int
send_smtp_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
#if ENABLE_ESMTP
smtp_session_t session;
smtp_message_t message;
auth_context_t authctx;
struct sigaction sa;
int len = 25; /* Note: Check extra padding on the Subject line below */
int noauth = 1;
int smtp_debug = LOG_DEBUG;
char crm_mail_body[BODY_MAX];
char *crm_mail_subject = NULL;
memset(&sa, 0, sizeof(struct sigaction));
if (node == NULL) {
node = "-";
}
if (rsc == NULL) {
rsc = "-";
}
if (desc == NULL) {
desc = "-";
}
if (crm_mail_to == NULL) {
return 1;
}
if (crm_mail_host == NULL) {
crm_mail_host = "localhost:25";
}
if (crm_mail_prefix == NULL) {
crm_mail_prefix = "Cluster notification";
}
crm_debug("Sending '%s' mail to %s via %s", crm_mail_prefix, crm_mail_to, crm_mail_host);
len += strlen(crm_mail_prefix);
len += strlen(task);
len += strlen(rsc);
len += strlen(node);
len += strlen(desc);
len++;
crm_mail_subject = calloc(1, len);
/* If you edit this line, ensure you allocate enough memory for it by altering 'len' above */
snprintf(crm_mail_subject, len, "%s - %s event for %s on %s: %s\r\n", crm_mail_prefix, task,
rsc, node, desc);
len = 0;
len += snprintf(crm_mail_body + len, BODY_MAX - len, "\r\n%s\r\n", crm_mail_prefix);
len += snprintf(crm_mail_body + len, BODY_MAX - len, "====\r\n\r\n");
if (rc == target_rc) {
len += snprintf(crm_mail_body + len, BODY_MAX - len,
"Completed operation %s for resource %s on %s\r\n", task, rsc, node);
} else {
len += snprintf(crm_mail_body + len, BODY_MAX - len,
"Operation %s for resource %s on %s failed: %s\r\n", task, rsc, node, desc);
}
len += snprintf(crm_mail_body + len, BODY_MAX - len, "\r\nDetails:\r\n");
len += snprintf(crm_mail_body + len, BODY_MAX - len,
"\toperation status: (%d) %s\r\n", status, services_lrm_status_str(status));
if (status == PCMK_LRM_OP_DONE) {
len += snprintf(crm_mail_body + len, BODY_MAX - len,
"\tscript returned: (%d) %s\r\n", rc, services_ocf_exitcode_str(rc));
len += snprintf(crm_mail_body + len, BODY_MAX - len,
"\texpected return value: (%d) %s\r\n", target_rc,
services_ocf_exitcode_str(target_rc));
}
auth_client_init();
session = smtp_create_session();
message = smtp_add_message(session);
smtp_starttls_enable(session, Starttls_ENABLED);
sa.sa_handler = SIG_IGN;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGPIPE, &sa, NULL);
smtp_set_server(session, crm_mail_host);
authctx = auth_create_context();
auth_set_mechanism_flags(authctx, AUTH_PLUGIN_PLAIN, 0);
smtp_set_eventcb(session, event_cb, NULL);
/* Now tell libESMTP it can use the SMTP AUTH extension.
*/
if (!noauth) {
crm_debug("Adding authentication context");
smtp_auth_set_context(session, authctx);
}
if (crm_mail_from == NULL) {
struct utsname us;
char auto_from[BODY_MAX];
CRM_ASSERT(uname(&us) == 0);
snprintf(auto_from, BODY_MAX, "crm_mon@%s", us.nodename);
smtp_set_reverse_path(message, auto_from);
} else {
/* NULL is ok */
smtp_set_reverse_path(message, crm_mail_from);
}
smtp_set_header(message, "To", NULL /*phrase */ , NULL /*addr */ ); /* "Phrase" */
smtp_add_recipient(message, crm_mail_to);
/* Set the Subject: header and override any subject line in the message headers. */
smtp_set_header(message, "Subject", crm_mail_subject);
smtp_set_header_option(message, "Subject", Hdr_OVERRIDE, 1);
smtp_set_message_str(message, crm_mail_body);
smtp_set_monitorcb(session, crm_smtp_debug, &smtp_debug, 1);
if (smtp_start_session(session)) {
char buf[128];
int rc = smtp_errno();
crm_err("SMTP server problem: %s (%d)", smtp_strerror(rc, buf, sizeof buf), rc);
} else {
char buf[128];
int rc = smtp_errno();
const smtp_status_t *smtp_status = smtp_message_transfer_status(message);
if (rc != 0) {
crm_err("SMTP server problem: %s (%d)", smtp_strerror(rc, buf, sizeof buf), rc);
}
crm_info("Send status: %d %s", smtp_status->code, crm_str(smtp_status->text));
smtp_enumerate_recipients(message, print_recipient_status, NULL);
}
smtp_destroy_session(session);
auth_destroy_context(authctx);
auth_client_exit();
#endif
return 0;
}
static void
handle_rsc_op(xmlNode * xml, const char *node_id)
{
int rc = -1;
int status = -1;
int action = -1;
int interval = 0;
int target_rc = -1;
int transition_num = -1;
gboolean notify = TRUE;
char *rsc = NULL;
char *task = NULL;
const char *desc = NULL;
const char *magic = NULL;
const char *id = NULL;
char *update_te_uuid = NULL;
const char *node = NULL;
xmlNode *n = xml;
xmlNode * rsc_op = xml;
if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) {
xmlNode *cIter;
for(cIter = xml->children; cIter; cIter = cIter->next) {
handle_rsc_op(cIter, node_id);
}
return;
}
id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY);
if (id == NULL) {
/* Compatibility with <= 1.1.5 */
id = ID(rsc_op);
}
magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC);
if (magic == NULL) {
/* non-change */
return;
}
if (FALSE == decode_transition_magic(magic, &update_te_uuid, &transition_num, &action,
&status, &rc, &target_rc)) {
crm_err("Invalid event %s detected for %s", magic, id);
return;
}
if (parse_op_key(id, &rsc, &task, &interval) == FALSE) {
crm_err("Invalid event detected for %s", id);
goto bail;
}
node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET);
while (n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) {
n = n->parent;
}
if(node == NULL && n) {
node = crm_element_value(n, XML_ATTR_UNAME);
}
if (node == NULL && n) {
node = ID(n);
}
if (node == NULL) {
node = node_id;
}
if (node == NULL) {
crm_err("No node detected for event %s (%s)", magic, id);
goto bail;
}
/* look up where we expected it to be? */
desc = pcmk_strerror(pcmk_ok);
if (status == PCMK_LRM_OP_DONE && target_rc == rc) {
crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
if (rc == PCMK_OCF_NOT_RUNNING) {
notify = FALSE;
}
} else if (status == PCMK_LRM_OP_DONE) {
desc = services_ocf_exitcode_str(rc);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
} else {
desc = services_lrm_status_str(status);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
}
if (notify && snmp_target) {
send_snmp_trap(node, rsc, task, target_rc, rc, status, desc);
}
if (notify && crm_mail_to) {
send_smtp_trap(node, rsc, task, target_rc, rc, status, desc);
}
if (notify && external_agent) {
send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
}
bail:
free(update_te_uuid);
free(rsc);
free(task);
}
static gboolean
mon_trigger_refresh(gpointer user_data)
{
mainloop_set_trigger(refresh_trigger);
return FALSE;
}
#define NODE_PATT "/lrm[@id="
static char *get_node_from_xpath(const char *xpath)
{
char *nodeid = NULL;
char *tmp = strstr(xpath, NODE_PATT);
if(tmp) {
tmp += strlen(NODE_PATT);
tmp += 1;
nodeid = strdup(tmp);
tmp = strstr(nodeid, "\'");
CRM_ASSERT(tmp);
tmp[0] = 0;
}
return nodeid;
}
static void crm_diff_update_v2(const char *event, xmlNode * msg)
{
xmlNode *change = NULL;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) {
const char *name = NULL;
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
xmlNode *match = NULL;
const char *node = NULL;
if(op == NULL) {
continue;
} else if(strcmp(op, "create") == 0) {
match = change->children;
} else if(strcmp(op, "move") == 0) {
continue;
} else if(strcmp(op, "delete") == 0) {
continue;
} else if(strcmp(op, "modify") == 0) {
match = first_named_child(change, XML_DIFF_RESULT);
if(match) {
match = match->children;
}
}
if(match) {
name = (const char *)match->name;
}
crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name);
if(xpath == NULL) {
/* Version field, ignore */
} else if(name == NULL) {
crm_debug("No result for %s operation to %s", op, xpath);
CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0);
} else if(strcmp(name, XML_TAG_CIB) == 0) {
xmlNode *state = NULL;
xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS);
for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) {
xmlNode *state = NULL;
for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATE) == 0) {
node = crm_element_value(match, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(match);
}
handle_rsc_op(match, node);
} else if(strcmp(name, XML_CIB_TAG_LRM) == 0) {
node = ID(match);
handle_rsc_op(match, node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else {
crm_err("Ignoring %s operation for %s %p, %s", op, xpath, match, name);
}
}
}
static void crm_diff_update_v1(const char *event, xmlNode * msg)
{
/* Process operation updates */
xmlXPathObject *xpathObj = xpath_search(msg,
"//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
"//" XML_LRM_TAG_RSC_OP);
int lpc = 0, max = numXpathResults(xpathObj);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *rsc_op = getXpathResult(xpathObj, lpc);
handle_rsc_op(rsc_op, NULL);
}
freeXpathObject(xpathObj);
}
void
crm_diff_update(const char *event, xmlNode * msg)
{
int rc = -1;
long now = time(NULL);
static bool stale = FALSE;
static int updates = 0;
static mainloop_timer_t *refresh_timer = NULL;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
print_dot();
if(refresh_timer == NULL) {
refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL);
}
if (current_cib != NULL) {
rc = xml_apply_patchset(current_cib, diff, TRUE);
switch (rc) {
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
break;
case pcmk_ok:
updates++;
break;
default:
crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
}
}
if (current_cib == NULL) {
crm_trace("Re-requesting the full cib");
cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call);
}
if (crm_mail_to || snmp_target || external_agent) {
int format = 0;
crm_element_value_int(diff, "format", &format);
switch(format) {
case 1:
crm_diff_update_v1(event, msg);
break;
case 2:
crm_diff_update_v2(event, msg);
break;
default:
crm_err("Unknown patch format: %d", format);
}
}
if (current_cib == NULL) {
if(!stale) {
print_as("--- Stale data ---");
}
stale = TRUE;
return;
}
stale = FALSE;
/* Refresh
* - immediately if the last update was more than 5s ago
* - every 10 updates
* - at most 2s after the last update
*/
if ((now - last_refresh) > (reconnect_msec / 1000)) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else if(updates > 10) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else {
mainloop_timer_start(refresh_timer);
}
}
gboolean
mon_refresh_display(gpointer user_data)
{
xmlNode *cib_copy = copy_xml(current_cib);
pe_working_set_t data_set;
last_refresh = time(NULL);
if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) {
if (cib) {
cib->cmds->signoff(cib);
}
print_as("Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation));
if (output_format == mon_output_console) {
sleep(2);
}
clean_up(EX_USAGE);
return FALSE;
}
set_working_set_defaults(&data_set);
data_set.input = cib_copy;
cluster_status(&data_set);
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
if (show & (mon_show_bans | mon_show_tickets)) {
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input);
unpack_constraints(cib_constraints, &data_set);
}
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
if (print_html_status(&data_set, output_filename) != 0) {
fprintf(stderr, "Critical: Unable to output html file\n");
clean_up(EX_USAGE);
}
break;
case mon_output_xml:
print_xml_status(&data_set);
break;
case mon_output_monitor:
print_simple_status(&data_set);
if (has_warnings) {
clean_up(MON_STATUS_WARN);
}
break;
case mon_output_plain:
case mon_output_console:
print_status(&data_set);
break;
case mon_output_none:
break;
}
cleanup_alloc_calculations(&data_set);
return TRUE;
}
void
mon_st_callback(stonith_t * st, stonith_event_t * e)
{
char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)",
e->operation, e->origin, e->target, pcmk_strerror(e->result),
e->id);
if (snmp_target) {
send_snmp_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
}
if (crm_mail_to) {
send_smtp_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
}
if (external_agent) {
send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
}
free(desc);
}
/*
* De-init ncurses, signoff from the CIB and deallocate memory.
*/
void
clean_up(int rc)
{
#if ENABLE_SNMP
netsnmp_session *session = crm_snmp_init(NULL, NULL);
if (session) {
snmp_close(session);
snmp_shutdown("snmpapp");
}
#endif
#if CURSES_ENABLED
if (output_format == mon_output_console) {
output_format = mon_output_plain;
echo();
nocbreak();
endwin();
}
#endif
if (cib != NULL) {
cib->cmds->signoff(cib);
cib_delete(cib);
cib = NULL;
}
free(output_filename);
free(xml_file);
free(pid_file);
if (rc >= 0) {
crm_exit(rc);
}
return;
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index eb0f88b4de..450abd1589 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,1058 +1,1058 @@
/*
* Copyright (C) 2004 Andrew Beekhof
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
bool BE_QUIET = FALSE;
bool scope_master = FALSE;
int cib_options = cib_sync_call;
GMainLoop *mainloop = NULL;
#define message_timeout_ms 60*1000
static gboolean
resource_ipc_timeout(gpointer data)
{
fprintf(stderr, "No messages received in %d seconds.. aborting\n",
(int)message_timeout_ms / 1000);
crm_err("No messages received in %d seconds", (int)message_timeout_ms / 1000);
return crm_exit(-1);
}
static void
resource_ipc_connection_destroy(gpointer user_data)
{
crm_info("Connection to CRMd was terminated");
crm_exit(1);
}
static void
start_mainloop(void)
{
if (crmd_replies_needed == 0) {
return;
}
mainloop = g_main_new(FALSE);
fprintf(stderr, "Waiting for %d replies from the CRMd", crmd_replies_needed);
crm_debug("Waiting for %d replies from the CRMd", crmd_replies_needed);
g_timeout_add(message_timeout_ms, resource_ipc_timeout, NULL);
g_main_run(mainloop);
}
static int
resource_ipc_callback(const char *buffer, ssize_t length, gpointer userdata)
{
xmlNode *msg = string2xml(buffer);
fprintf(stderr, ".");
crm_log_xml_trace(msg, "[inbound]");
crmd_replies_needed--;
if (crmd_replies_needed == 0) {
fprintf(stderr, " OK\n");
crm_debug("Got all the replies we expected");
return crm_exit(pcmk_ok);
}
free_xml(msg);
return 0;
}
struct ipc_client_callbacks crm_callbacks = {
.dispatch = resource_ipc_callback,
.destroy = resource_ipc_connection_destroy,
};
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\t\tThis text"},
{"version", 0, 0, '$', "\t\tVersion information" },
{"verbose", 0, 0, 'V', "\t\tIncrease debug output"},
{"quiet", 0, 0, 'Q', "\t\tPrint only the value on stdout\n"},
{"resource", 1, 0, 'r', "\tResource ID" },
{"-spacer-",1, 0, '-', "\nQueries:"},
{"list", 0, 0, 'L', "\t\tList all cluster resources"},
{"list-raw", 0, 0, 'l', "\tList the IDs of all instantiated resources (no groups/clones/...)"},
{"list-cts", 0, 0, 'c', NULL, pcmk_option_hidden},
{"list-operations", 0, 0, 'O', "\tList active resource operations. Optionally filtered by resource (-r) and/or node (-N)"},
- {"list-all-operations", 0, 0, 'o', "List all resource operations. Optionally filtered by resource (-r) and/or node (-N)"},
- {"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled\n"},
+ {"list-all-operations", 0, 0, 'o', "List all resource operations. Optionally filtered by resource (-r) and/or node (-N)\n"},
+ {"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled\n", pcmk_option_hidden},
{"list-standards", 0, 0, 0, "\tList supported standards"},
{"list-ocf-providers", 0, 0, 0, "List all available OCF providers"},
{"list-agents", 1, 0, 0, "List all agents available for the named standard and/or provider."},
{"list-ocf-alternatives", 1, 0, 0, "List all available providers for the named OCF agent\n"},
{"show-metadata", 1, 0, 0, "Show the metadata for the named class:provider:agent"},
{"query-xml", 0, 0, 'q', "\tQuery the definition of a resource (template expanded)"},
{"query-xml-raw", 0, 0, 'w', "\tQuery the definition of a resource (raw xml)"},
{"locate", 0, 0, 'W', "\t\tDisplay the current location(s) of a resource"},
{"stack", 0, 0, 'A', "\t\tDisplay the prerequisites and dependents of a resource"},
{"constraints",0, 0, 'a', "\tDisplay the (co)location constraints that apply to a resource"},
{"-spacer-", 1, 0, '-', "\nCommands:"},
{"cleanup", 0, 0, 'C', "\t\tDelete the resource history and re-check the current state. Optional: --resource"},
{"set-parameter", 1, 0, 'p', "Set the named parameter for a resource. See also -m, --meta"},
{"get-parameter", 1, 0, 'g', "Display the named parameter for a resource. See also -m, --meta"},
{"delete-parameter",1, 0, 'd', "Delete the named parameter for a resource. See also -m, --meta"},
{"get-property", 1, 0, 'G', "Display the 'class', 'type' or 'provider' of a resource", pcmk_option_hidden},
{"set-property", 1, 0, 'S', "(Advanced) Set the class, type or provider of a resource", pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nResource location:"},
{
"move", 0, 0, 'M',
"\t\tMove a resource from its current location to the named destination.\n "
"\t\t\t\tRequires: --host. Optional: --lifetime, --master\n\n"
"\t\t\t\tNOTE: This may prevent the resource from running on the previous location node until the implicit constraints expire or are removed with --unban\n"
},
{
"ban", 0, 0, 'B',
"\t\tPrevent the named resource from running on the named --host. \n"
"\t\t\t\tRequires: --resource. Optional: --host, --lifetime, --master\n\n"
"\t\t\t\tIf --host is not specified, it defaults to:\n"
"\t\t\t\t * the current location for primitives and groups, or\n\n"
"\t\t\t\t * the current location of the master for m/s resources with master-max=1\n\n"
"\t\t\t\tAll other situations result in an error as there is no sane default.\n\n"
"\t\t\t\tNOTE: This will prevent the resource from running on this node until the constraint expires or is removed with --clear\n"
},
{
"clear", 0, 0, 'U', "\t\tRemove all constraints created by the --ban and/or --move commands. \n"
"\t\t\t\tRequires: --resource. Optional: --host, --master\n\n"
"\t\t\t\tIf --host is not specified, all constraints created by --ban and --move will be removed for the named resource.\n"
},
{"lifetime", 1, 0, 'u', "\tLifespan of constraints created by the --ban and --move commands"},
{
"master", 0, 0, 0,
"\t\tLimit the scope of the --ban, --move and --clear commands to the Master role.\n"
"\t\t\t\tFor --ban and --move, the previous master can still remain active in the Slave role."
},
{"-spacer-", 1, 0, '-', "\nAdvanced Commands:"},
{"delete", 0, 0, 'D', "\t\t(Advanced) Delete a resource from the CIB"},
{"fail", 0, 0, 'F', "\t\t(Advanced) Tell the cluster this resource has failed"},
{"restart", 0, 0, 0, "\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it"},
{"wait", 0, 0, 0, "\t\t(Advanced) Wait until the cluster settles into a stable state"},
{"force-demote",0,0, 0, "\t(Advanced) Bypass the cluster and demote a resource on the local node. Additional detail with -V"},
{"force-stop", 0, 0, 0, "\t(Advanced) Bypass the cluster and stop a resource on the local node. Additional detail with -V"},
{"force-start",0, 0, 0, "\t(Advanced) Bypass the cluster and start a resource on the local node. Additional detail with -V"},
{"force-promote",0,0, 0, "\t(Advanced) Bypass the cluster and promote a resource on the local node. Additional detail with -V"},
{"force-check",0, 0, 0, "\t(Advanced) Bypass the cluster and check the state of a resource on the local node. Additional detail with -V\n"},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"node", 1, 0, 'N', "\tHost uname"},
{"recursive", 0, 0, 0, "\tFollow colocation chains when using --set-parameter"},
{"resource-type", 1, 0, 't', "Resource type (primitive, clone, group, ...)"},
{"parameter-value", 1, 0, 'v', "Value to use with -p or -S"},
{"meta", 0, 0, 'm', "\t\tModify a resource's configuration option rather than one which is passed to the resource agent script. For use with -p, -g, -d"},
{"utilization", 0, 0, 'z', "\tModify a resource's utilization attribute. For use with -p, -g, -d"},
{"set-name", 1, 0, 's', "\t(Advanced) ID of the instance_attributes object to change"},
{"nvpair", 1, 0, 'i', "\t(Advanced) ID of the nvpair object to change/delete"},
{"timeout", 1, 0, 'T', "\t(Advanced) Abort if command does not finish in this time (with --restart or --wait)"},
{"force", 0, 0, 'f', "\n" /* Is this actually true anymore?
"\t\tForce the resource to move by creating a rule for the current location and a score of -INFINITY"
"\n\t\tThis should be used if the resource's stickiness and constraint scores total more than INFINITY (Currently 100,000)"
"\n\t\tNOTE: This will prevent the resource from running on this node until the constraint is removed with -U or the --lifetime duration expires\n"*/ },
{"xml-file", 1, 0, 'x', NULL, pcmk_option_hidden},\
/* legacy options */
{"host-uname", 1, 0, 'H', NULL, pcmk_option_hidden},
{"migrate", 0, 0, 'M', NULL, pcmk_option_hidden},
{"un-migrate", 0, 0, 'U', NULL, pcmk_option_hidden},
{"un-move", 0, 0, 'U', NULL, pcmk_option_hidden},
{"refresh", 0, 0, 'R', NULL, pcmk_option_hidden},
{"reprobe", 0, 0, 'P', NULL, pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "List the configured resources:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --list", pcmk_option_example},
{"-spacer-", 1, 0, '-', "List the available OCF agents:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --list-agents ocf", pcmk_option_example},
{"-spacer-", 1, 0, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display the current location of 'myResource':", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --locate", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Move 'myResource' to another machine:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Move 'myResource' to a specific machine:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --un-move", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Tell the cluster that 'myResource' failed:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --fail", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "The cluster will not attempt to start or stop the resource under any circumstances."},
{"-spacer-", 1, 0, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."},
{"-spacer-", 1, 0, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
int
main(int argc, char **argv)
{
char rsc_cmd = 'L';
const char *rsc_id = NULL;
const char *host_uname = NULL;
const char *prop_name = NULL;
const char *prop_value = NULL;
const char *rsc_type = NULL;
const char *prop_id = NULL;
const char *prop_set = NULL;
const char *rsc_long_cmd = NULL;
const char *longname = NULL;
GHashTable *override_params = NULL;
char *xml_file = NULL;
crm_ipc_t *crmd_channel = NULL;
pe_working_set_t data_set;
cib_t *cib_conn = NULL;
bool recursive = FALSE;
char *our_pid = NULL;
/* Not all commands set these appropriately, but the defaults here are
* sufficient to get the logic right. */
bool require_resource = TRUE; /* whether command requires that resource be specified */
bool require_dataset = TRUE; /* whether command requires populated dataset instance */
bool require_crmd = FALSE; /* whether command requires connection to CRMd */
int rc = pcmk_ok;
int option_index = 0;
int timeout_ms = 0;
int argerr = 0;
int flag;
crm_log_cli_init("crm_resource");
crm_set_options(NULL, "(query|command) [options]", long_options,
"Perform tasks related to cluster resources.\nAllows resources to be queried (definition and location), modified, and moved around the cluster.\n");
if (argc < 2) {
crm_help('?', EX_USAGE);
}
while (1) {
flag = crm_get_option_long(argc, argv, &option_index, &longname);
if (flag == -1)
break;
switch (flag) {
case 0: /* long options with no short equivalent */
if (safe_str_eq("master", longname)) {
scope_master = TRUE;
} else if(safe_str_eq(longname, "recursive")) {
recursive = TRUE;
} else if (safe_str_eq("wait", longname)) {
rsc_cmd = flag;
rsc_long_cmd = longname;
require_resource = FALSE;
require_dataset = FALSE;
} else if (
safe_str_eq("restart", longname)
|| safe_str_eq("force-demote", longname)
|| safe_str_eq("force-stop", longname)
|| safe_str_eq("force-start", longname)
|| safe_str_eq("force-promote", longname)
|| safe_str_eq("force-check", longname)) {
rsc_cmd = flag;
rsc_long_cmd = longname;
crm_log_args(argc, argv);
} else if (safe_str_eq("list-ocf-providers", longname)
|| safe_str_eq("list-ocf-alternatives", longname)
|| safe_str_eq("list-standards", longname)) {
const char *text = NULL;
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
lrmd_t *lrmd_conn = lrmd_api_new();
if (safe_str_eq("list-ocf-providers", longname)
|| safe_str_eq("list-ocf-alternatives", longname)) {
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list);
text = "OCF providers";
} else if (safe_str_eq("list-standards", longname)) {
rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
text = "standards";
}
if (rc > 0) {
rc = 0;
for (iter = list; iter != NULL; iter = iter->next) {
rc++;
printf("%s\n", iter->val);
}
lrmd_list_freeall(list);
} else if (optarg) {
fprintf(stderr, "No %s found for %s\n", text, optarg);
} else {
fprintf(stderr, "No %s found\n", text);
}
lrmd_api_delete(lrmd_conn);
return crm_exit(rc);
} else if (safe_str_eq("show-metadata", longname)) {
char standard[512];
char provider[512];
char type[512];
char *metadata = NULL;
lrmd_t *lrmd_conn = lrmd_api_new();
rc = sscanf(optarg, "%[^:]:%[^:]:%s", standard, provider, type);
if (rc == 3) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type,
&metadata, 0);
} else if (rc == 2) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, NULL, provider,
&metadata, 0);
} else if (rc < 2) {
fprintf(stderr,
"Please specify standard:type or standard:provider:type, not %s\n",
optarg);
rc = -EINVAL;
}
if (metadata) {
printf("%s\n", metadata);
} else {
fprintf(stderr, "Metadata query for %s failed: %d\n", optarg, rc);
}
lrmd_api_delete(lrmd_conn);
return crm_exit(rc);
} else if (safe_str_eq("list-agents", longname)) {
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
char *provider = strchr (optarg, ':');
lrmd_t *lrmd_conn = lrmd_api_new();
if (provider) {
*provider++ = 0;
}
rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider);
if (rc > 0) {
rc = 0;
for (iter = list; iter != NULL; iter = iter->next) {
printf("%s\n", iter->val);
rc++;
}
lrmd_list_freeall(list);
rc = 0;
} else {
fprintf(stderr, "No agents found for standard=%s, provider=%s\n",
optarg, (provider? provider : "*"));
rc = -1;
}
lrmd_api_delete(lrmd_conn);
return crm_exit(rc);
} else {
crm_err("Unhandled long option: %s", longname);
}
break;
case 'V':
do_trace = TRUE;
crm_bump_log_level(argc, argv);
break;
case '$':
case '?':
crm_help(flag, EX_OK);
break;
case 'x':
xml_file = strdup(optarg);
break;
case 'Q':
BE_QUIET = TRUE;
break;
case 'm':
attr_set_type = XML_TAG_META_SETS;
break;
case 'z':
attr_set_type = XML_TAG_UTILIZATION;
break;
case 'u':
move_lifetime = strdup(optarg);
break;
case 'f':
do_force = TRUE;
crm_log_args(argc, argv);
break;
case 'i':
prop_id = optarg;
break;
case 's':
prop_set = optarg;
break;
case 'r':
rsc_id = optarg;
break;
case 'v':
prop_value = optarg;
break;
case 't':
rsc_type = optarg;
break;
case 'T':
timeout_ms = crm_get_msec(optarg);
break;
case 'C':
case 'R':
case 'P':
crm_log_args(argc, argv);
require_resource = FALSE;
require_crmd = TRUE;
rsc_cmd = 'C';
break;
case 'F':
crm_log_args(argc, argv);
require_crmd = TRUE;
rsc_cmd = flag;
break;
case 'U':
case 'B':
case 'M':
case 'D':
crm_log_args(argc, argv);
rsc_cmd = flag;
break;
case 'L':
case 'c':
case 'l':
case 'q':
case 'w':
case 'W':
case 'O':
case 'o':
case 'A':
case 'a':
rsc_cmd = flag;
break;
case 'j':
print_pending = TRUE;
break;
case 'p':
case 'd':
case 'S':
crm_log_args(argc, argv);
prop_name = optarg;
rsc_cmd = flag;
break;
case 'G':
case 'g':
prop_name = optarg;
rsc_cmd = flag;
break;
case 'h':
case 'H':
case 'N':
crm_trace("Option %c => %s", flag, optarg);
host_uname = optarg;
break;
default:
CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag);
++argerr;
break;
}
}
if (optind < argc
&& argv[optind] != NULL
&& rsc_cmd == 0
&& rsc_long_cmd) {
override_params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
while (optind < argc && argv[optind] != NULL) {
char *name = calloc(1, strlen(argv[optind]));
char *value = calloc(1, strlen(argv[optind]));
int rc = sscanf(argv[optind], "%[^=]=%s", name, value);
if(rc == 2) {
g_hash_table_replace(override_params, name, value);
} else {
CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd);
free(value);
free(name);
argerr++;
}
optind++;
}
} else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) {
CMD_ERR("non-option ARGV-elements: ");
while (optind < argc && argv[optind] != NULL) {
CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]);
optind++;
argerr++;
}
}
if (optind > argc) {
++argerr;
}
if (argerr) {
CMD_ERR("Invalid option(s) supplied, use --help for valid usage");
return crm_exit(EX_USAGE);
}
our_pid = calloc(1, 11);
if (our_pid != NULL) {
snprintf(our_pid, 10, "%d", getpid());
our_pid[10] = '\0';
}
if (do_force) {
crm_debug("Forcing...");
cib_options |= cib_quorum_override;
}
data_set.input = NULL; /* make clean-up easier */
/* If user specified resource, look for it, even if it's optional for command */
if (rsc_id) {
require_resource = TRUE;
}
/* We need a dataset to find a resource, even if command doesn't need it */
if (require_resource) {
require_dataset = TRUE;
}
/* Establish a connection to the CIB */
cib_conn = cib_new();
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
if (rc != pcmk_ok) {
CMD_ERR("Error signing on to the CIB service: %s", pcmk_strerror(rc));
return crm_exit(rc);
}
/* Populate working set from XML file if specified or CIB query otherwise */
if (require_dataset) {
xmlNode *cib_xml_copy = NULL;
if (xml_file != NULL) {
cib_xml_copy = filename2xml(xml_file);
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
}
if(rc != pcmk_ok) {
goto bail;
}
/* Populate the working set instance */
set_working_set_defaults(&data_set);
rc = update_working_set_xml(&data_set, &cib_xml_copy);
if (rc != pcmk_ok) {
goto bail;
}
cluster_status(&data_set);
/* Set rc to -ENXIO if no resource matching rsc_id is found.
* This does not bail, but is handled later for certain commands.
* That handling could be done here instead if all flags above set
* require_resource appropriately. */
if (require_resource && rsc_id && (find_rsc_or_clone(rsc_id, &data_set) == NULL)) {
rc = -ENXIO;
}
}
/* Establish a connection to the CRMd if needed */
if (require_crmd) {
xmlNode *xml = NULL;
mainloop_io_t *source =
mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks);
crmd_channel = mainloop_get_ipc_client(source);
if (crmd_channel == NULL) {
CMD_ERR("Error signing on to the CRMd service");
rc = -ENOTCONN;
goto bail;
}
xml = create_hello_message(our_pid, crm_system_name, "0", "1");
crm_ipc_send(crmd_channel, xml, 0, 0, NULL);
free_xml(xml);
}
/* Handle rsc_cmd appropriately */
if (rsc_cmd == 'L') {
rc = pcmk_ok;
cli_resource_print_list(&data_set, FALSE);
} else if (rsc_cmd == 'l') {
int found = 0;
GListPtr lpc = NULL;
rc = pcmk_ok;
for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
resource_t *rsc = (resource_t *) lpc->data;
found++;
cli_resource_print_raw(rsc);
}
if (found == 0) {
printf("NO resources configured\n");
rc = -ENXIO;
}
} else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) {
resource_t *rsc = NULL;
rc = -ENXIO;
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
goto bail;
}
rsc = pe_find_resource(data_set.resources, rsc_id);
rc = -EINVAL;
if (rsc == NULL) {
CMD_ERR("Resource '%s' not restarted: unknown", rsc_id);
goto bail;
}
rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn);
} else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) {
rc = wait_till_stable(timeout_ms, cib_conn);
} else if (rsc_cmd == 0 && rsc_long_cmd) { /* force-(stop|start|check) */
rc = cli_resource_execute(rsc_id, rsc_long_cmd, override_params, cib_conn, &data_set);
} else if (rsc_cmd == 'A' || rsc_cmd == 'a') {
GListPtr lpc = NULL;
resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input);
if (rsc == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
unpack_constraints(cib_constraints, &data_set);
for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
resource_t *r = (resource_t *) lpc->data;
clear_bit(r->flags, pe_rsc_allocating);
}
cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1);
fprintf(stdout, "* %s\n", rsc->id);
cli_resource_print_location(rsc, NULL);
for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
resource_t *r = (resource_t *) lpc->data;
clear_bit(r->flags, pe_rsc_allocating);
}
cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1);
} else if (rsc_cmd == 'c') {
int found = 0;
GListPtr lpc = NULL;
rc = pcmk_ok;
for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
resource_t *rsc = (resource_t *) lpc->data;
cli_resource_print_cts(rsc);
found++;
}
cli_resource_print_cts_constraints(&data_set);
} else if (rsc_cmd == 'F') {
rc = cli_resource_fail(crmd_channel, host_uname, rsc_id, &data_set);
if (rc == pcmk_ok) {
start_mainloop();
}
} else if (rsc_cmd == 'O') {
rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, &data_set);
} else if (rsc_cmd == 'o') {
rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, &data_set);
/* All remaining commands require that resource exist */
} else if (rc == -ENXIO) {
CMD_ERR("Resource '%s' not found: %s", crm_str(rsc_id), pcmk_strerror(rc));
} else if (rsc_cmd == 'W') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
rc = cli_resource_search(rsc_id, &data_set);
if (rc >= 0) {
rc = pcmk_ok;
}
} else if (rsc_cmd == 'q') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
rc = cli_resource_print(rsc_id, &data_set, TRUE);
} else if (rsc_cmd == 'w') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
rc = cli_resource_print(rsc_id, &data_set, FALSE);
} else if (rsc_cmd == 'U') {
node_t *dest = NULL;
if (rsc_id == NULL) {
CMD_ERR("No value specified for --resource");
rc = -ENXIO;
goto bail;
}
if (host_uname) {
dest = pe_find_node(data_set.nodes, host_uname);
if (dest == NULL) {
CMD_ERR("Unknown node: %s", host_uname);
rc = -ENXIO;
goto bail;
}
rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn);
} else {
rc = cli_resource_clear(rsc_id, NULL, data_set.nodes, cib_conn);
}
} else if (rsc_cmd == 'M' && host_uname) {
rc = cli_resource_move(rsc_id, host_uname, cib_conn, &data_set);
} else if (rsc_cmd == 'B' && host_uname) {
resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
node_t *dest = pe_find_node(data_set.nodes, host_uname);
rc = -ENXIO;
if (rsc_id == NULL) {
CMD_ERR("No value specified for --resource");
goto bail;
} else if(rsc == NULL) {
CMD_ERR("Resource '%s' not moved: unknown", rsc_id);
} else if (dest == NULL) {
CMD_ERR("Error performing operation: node '%s' is unknown", host_uname);
goto bail;
}
rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn);
} else if (rsc_cmd == 'B' || rsc_cmd == 'M') {
resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
rc = -ENXIO;
if (rsc_id == NULL) {
CMD_ERR("No value specified for --resource");
goto bail;
}
rc = -EINVAL;
if(rsc == NULL) {
CMD_ERR("Resource '%s' not moved: unknown", rsc_id);
} else if(g_list_length(rsc->running_on) == 1) {
node_t *current = rsc->running_on->data;
rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
} else if(rsc->variant == pe_master) {
int count = 0;
GListPtr iter = NULL;
node_t *current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
resource_t *child = (resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if(child_role == RSC_ROLE_MASTER) {
count++;
current = child->running_on->data;
}
}
if(count == 1 && current) {
rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
} else {
CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).", rsc_id, g_list_length(rsc->running_on), count);
CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --host ", rsc_id);
CMD_ERR("You can prevent '%s' from being promoted at a specific location with:"
" --ban --master --host ", rsc_id);
}
} else {
CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, g_list_length(rsc->running_on));
CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --host ", rsc_id);
}
} else if (rsc_cmd == 'G') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
rc = cli_resource_print_property(rsc_id, prop_name, &data_set);
} else if (rsc_cmd == 'S') {
xmlNode *msg_data = NULL;
if (prop_value == NULL || strlen(prop_value) == 0) {
CMD_ERR("You need to supply a value with the -v option");
rc = -EINVAL;
goto bail;
} else if (cib_conn == NULL) {
rc = -ENOTCONN;
goto bail;
}
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
CRM_LOG_ASSERT(rsc_type != NULL);
CRM_LOG_ASSERT(prop_name != NULL);
CRM_LOG_ASSERT(prop_value != NULL);
msg_data = create_xml_node(NULL, rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
crm_xml_add(msg_data, prop_name, prop_value);
rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
free_xml(msg_data);
} else if (rsc_cmd == 'g') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
rc = cli_resource_print_attribute(rsc_id, prop_name, &data_set);
} else if (rsc_cmd == 'p') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
if (prop_value == NULL || strlen(prop_value) == 0) {
CMD_ERR("You need to supply a value with the -v option");
rc = -EINVAL;
goto bail;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc_id, prop_set, prop_id, prop_name,
prop_value, recursive, cib_conn, &data_set);
} else if (rsc_cmd == 'd') {
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc_id, prop_set, prop_id, prop_name, cib_conn, &data_set);
} else if (rsc_cmd == 'C' && rsc_id) {
resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
if(do_force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Re-checking the state of %s for %s on %s", rsc->id, rsc_id, host_uname);
if(rsc) {
crmd_replies_needed = 0;
rc = cli_resource_delete(cib_conn, crmd_channel, host_uname, rsc, &data_set);
} else {
rc = -ENODEV;
}
if(rc == pcmk_ok && BE_QUIET == FALSE) {
/* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */
cli_resource_check(cib_conn, rsc);
}
if (rc == pcmk_ok) {
start_mainloop();
}
} else if (rsc_cmd == 'C') {
#if HAVE_ATOMIC_ATTRD
const char *router_node = host_uname;
xmlNode *msg_data = NULL;
xmlNode *cmd = NULL;
if (host_uname) {
node_t *node = pe_find_node(data_set.nodes, host_uname);
if (node && is_remote_node(node)) {
if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) {
CMD_ERR("No lrmd connection detected to remote node %s", host_uname);
return -ENXIO;
}
node = node->details->remote_rsc->running_on->data;
router_node = node->details->uname;
}
}
msg_data = create_xml_node(NULL, "crm-resource-reprobe-op");
crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
if (safe_str_neq(router_node, host_uname)) {
crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
}
cmd = create_request(CRM_OP_REPROBE, msg_data, router_node,
CRM_SYSTEM_CRMD, crm_system_name, our_pid);
free_xml(msg_data);
crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes");
rc = attrd_update_delegate(
NULL, 'u', host_uname, "fail-count-*", NULL, XML_CIB_TAG_STATUS, NULL, NULL, NULL, attrd_opt_none);
if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
start_mainloop();
}
free_xml(cmd);
#else
GListPtr rIter = NULL;
crmd_replies_needed = 0;
for (rIter = data_set.resources; rIter; rIter = rIter->next) {
resource_t *rsc = rIter->data;
cli_resource_delete(cib_conn, crmd_channel, host_uname, rsc, &data_set);
}
start_mainloop();
#endif
} else if (rsc_cmd == 'D') {
xmlNode *msg_data = NULL;
if (rsc_id == NULL) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
if (rsc_type == NULL) {
CMD_ERR("You need to specify a resource type with -t");
rc = -ENXIO;
goto bail;
} else if (cib_conn == NULL) {
rc = -ENOTCONN;
goto bail;
}
msg_data = create_xml_node(NULL, rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
rc = cib_conn->cmds->delete(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
free_xml(msg_data);
} else {
CMD_ERR("Unknown command: %c", rsc_cmd);
}
bail:
if (data_set.input != NULL) {
cleanup_alloc_calculations(&data_set);
}
if (cib_conn != NULL) {
cib_conn->cmds->signoff(cib_conn);
cib_delete(cib_conn);
}
if (rc == -pcmk_err_no_quorum) {
CMD_ERR("Error performing operation: %s", pcmk_strerror(rc));
CMD_ERR("Try using -f");
} else if (rc != pcmk_ok) {
CMD_ERR("Error performing operation: %s", pcmk_strerror(rc));
}
return crm_exit(rc);
}
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index 946b9e339d..5e84723302 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -1,397 +1,397 @@
/*
* Copyright (C) 2004 Andrew Beekhof
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include
-bool print_pending = FALSE;
+bool print_pending = TRUE;
#define cons_string(x) x?x:"NA"
void
cli_resource_print_cts_constraints(pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
xmlNode *lifetime = NULL;
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
for (xml_obj = __xml_first_child(cib_constraints); xml_obj != NULL;
xml_obj = __xml_next(xml_obj)) {
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
continue;
}
lifetime = first_named_child(xml_obj, "lifetime");
if (test_ruleset(lifetime, NULL, data_set->now) == FALSE) {
continue;
}
if (safe_str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj))) {
printf("Constraint %s %s %s %s %s %s %s\n",
crm_element_name(xml_obj),
cons_string(crm_element_value(xml_obj, XML_ATTR_ID)),
cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)),
cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)),
cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)),
cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)),
cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE)));
} else if (safe_str_eq(XML_CONS_TAG_RSC_LOCATION, crm_element_name(xml_obj))) {
/* unpack_location(xml_obj, data_set); */
}
}
}
void
cli_resource_print_cts(resource_t * rsc)
{
GListPtr lpc = NULL;
const char *host = NULL;
bool needs_quorum = TRUE;
const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (safe_str_eq(rclass, "stonith")) {
xmlNode *op = NULL;
needs_quorum = FALSE;
for (op = __xml_first_child(rsc->ops_xml); op != NULL; op = __xml_next(op)) {
if (crm_str_eq((const char *)op->name, "op", TRUE)) {
const char *name = crm_element_value(op, "name");
if (safe_str_neq(name, CRMD_ACTION_START)) {
const char *value = crm_element_value(op, "requires");
if (safe_str_eq(value, "nothing")) {
needs_quorum = FALSE;
}
break;
}
}
}
}
if (rsc->running_on != NULL && g_list_length(rsc->running_on) == 1) {
node_t *tmp = rsc->running_on->data;
host = tmp->details->uname;
}
printf("Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx\n",
crm_element_name(rsc->xml), rsc->id,
rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA",
rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags,
rsc->flags);
for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
resource_t *child = (resource_t *) lpc->data;
cli_resource_print_cts(child);
}
}
void
cli_resource_print_raw(resource_t * rsc)
{
GListPtr lpc = NULL;
GListPtr children = rsc->children;
if (children == NULL) {
printf("%s\n", rsc->id);
}
for (lpc = children; lpc != NULL; lpc = lpc->next) {
resource_t *child = (resource_t *) lpc->data;
cli_resource_print_raw(child);
}
}
int
cli_resource_print_list(pe_working_set_t * data_set, bool raw)
{
int found = 0;
GListPtr lpc = NULL;
int opts = pe_print_printf | pe_print_rsconly;
if (print_pending) {
opts |= pe_print_pending;
}
for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
resource_t *rsc = (resource_t *) lpc->data;
if (is_set(rsc->flags, pe_rsc_orphan)
&& rsc->fns->active(rsc, TRUE) == FALSE) {
continue;
}
rsc->fns->print(rsc, NULL, opts, stdout);
found++;
}
if (found == 0) {
printf("NO resources configured\n");
return -ENXIO;
}
return 0;
}
int
cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active,
pe_working_set_t * data_set)
{
resource_t *rsc = NULL;
int opts = pe_print_printf | pe_print_rsconly | pe_print_suppres_nl;
GListPtr ops = find_operations(rsc_id, host_uname, active, data_set);
GListPtr lpc = NULL;
if (print_pending) {
opts |= pe_print_pending;
}
for (lpc = ops; lpc != NULL; lpc = lpc->next) {
xmlNode *xml_op = (xmlNode *) lpc->data;
const char *op_rsc = crm_element_value(xml_op, "resource");
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS);
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int status = crm_parse_int(status_s, "0");
rsc = pe_find_resource(data_set->resources, op_rsc);
if(rsc) {
rsc->fns->print(rsc, "", opts, stdout);
} else {
fprintf(stdout, "Unknown resource %s", op_rsc);
}
fprintf(stdout, ": %s (node=%s, call=%s, rc=%s",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
crm_element_value(xml_op, XML_LRM_ATTR_RC));
if (last) {
time_t run_at = crm_parse_int(last, "0");
fprintf(stdout, ", last-rc-change=%s, exec=%sms",
crm_strip_trailing_newline(ctime(&run_at)), crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
}
fprintf(stdout, "): %s\n", services_lrm_status_str(status));
}
return pcmk_ok;
}
void
cli_resource_print_location(resource_t * rsc, const char *prefix)
{
GListPtr lpc = NULL;
GListPtr list = rsc->rsc_location;
int offset = 0;
if (prefix) {
offset = strlen(prefix) - 2;
}
for (lpc = list; lpc != NULL; lpc = lpc->next) {
rsc_to_node_t *cons = (rsc_to_node_t *) lpc->data;
GListPtr lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
node_t *node = (node_t *) lpc2->data;
char *score = score2char(node->weight);
fprintf(stdout, "%s: Node %-*s (score=%s, id=%s)\n",
prefix ? prefix : " ", 71 - offset, node->details->uname, score, cons->id);
free(score);
}
}
}
void
cli_resource_print_colocation(resource_t * rsc, bool dependents, bool recursive, int offset)
{
char *prefix = NULL;
GListPtr lpc = NULL;
GListPtr list = rsc->rsc_cons;
prefix = calloc(1, (offset * 4) + 1);
memset(prefix, ' ', offset * 4);
if (dependents) {
list = rsc->rsc_cons_lhs;
}
if (is_set(rsc->flags, pe_rsc_allocating)) {
/* Break colocation loops */
printf("loop %s\n", rsc->id);
free(prefix);
return;
}
set_bit(rsc->flags, pe_rsc_allocating);
for (lpc = list; lpc != NULL; lpc = lpc->next) {
rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
char *score = NULL;
resource_t *peer = cons->rsc_rh;
if (dependents) {
peer = cons->rsc_lh;
}
if (is_set(peer->flags, pe_rsc_allocating)) {
if (dependents == FALSE) {
fprintf(stdout, "%s%-*s (id=%s - loop)\n", prefix, 80 - (4 * offset), peer->id,
cons->id);
}
continue;
}
if (dependents && recursive) {
cli_resource_print_colocation(peer, dependents, recursive, offset + 1);
}
score = score2char(cons->score);
if (cons->role_rh > RSC_ROLE_STARTED) {
fprintf(stdout, "%s%-*s (score=%s, %s role=%s, id=%s)\n", prefix, 80 - (4 * offset),
peer->id, score, dependents ? "needs" : "with", role2text(cons->role_rh),
cons->id);
} else {
fprintf(stdout, "%s%-*s (score=%s, id=%s)\n", prefix, 80 - (4 * offset),
peer->id, score, cons->id);
}
cli_resource_print_location(peer, prefix);
free(score);
if (!dependents && recursive) {
cli_resource_print_colocation(peer, dependents, recursive, offset + 1);
}
}
free(prefix);
}
int
cli_resource_print(const char *rsc, pe_working_set_t * data_set, bool expanded)
{
char *rsc_xml = NULL;
resource_t *the_rsc = find_rsc_or_clone(rsc, data_set);
int opts = pe_print_printf;
if (the_rsc == NULL) {
return -ENXIO;
}
if (print_pending) {
opts |= pe_print_pending;
}
the_rsc->fns->print(the_rsc, NULL, opts, stdout);
if (expanded) {
rsc_xml = dump_xml_formatted(the_rsc->xml);
} else {
if (the_rsc->orig_xml) {
rsc_xml = dump_xml_formatted(the_rsc->orig_xml);
} else {
rsc_xml = dump_xml_formatted(the_rsc->xml);
}
}
fprintf(stdout, "%sxml:\n%s\n", expanded ? "" : "raw ", rsc_xml);
free(rsc_xml);
return 0;
}
int
cli_resource_print_attribute(const char *rsc, const char *attr, pe_working_set_t * data_set)
{
int rc = -ENXIO;
node_t *current = NULL;
GHashTable *params = NULL;
resource_t *the_rsc = find_rsc_or_clone(rsc, data_set);
const char *value = NULL;
if (the_rsc == NULL) {
return -ENXIO;
}
if (g_list_length(the_rsc->running_on) == 1) {
current = the_rsc->running_on->data;
} else if (g_list_length(the_rsc->running_on) > 1) {
CMD_ERR("%s is active on more than one node,"
" returning the default value for %s", the_rsc->id, crm_str(value));
}
params = g_hash_table_new_full(crm_str_hash, g_str_equal,
g_hash_destroy_str, g_hash_destroy_str);
if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
get_rsc_attributes(params, the_rsc, current, data_set);
} else if (safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
/* No need to redirect to the parent */
get_meta_attributes(params, the_rsc, current, data_set);
} else {
unpack_instance_attributes(data_set->input, the_rsc->xml, XML_TAG_UTILIZATION, NULL,
params, NULL, FALSE, data_set->now);
}
crm_debug("Looking up %s in %s", attr, the_rsc->id);
value = g_hash_table_lookup(params, attr);
if (value != NULL) {
fprintf(stdout, "%s\n", value);
rc = 0;
} else {
CMD_ERR("Attribute '%s' not found for '%s'", attr, the_rsc->id);
}
g_hash_table_destroy(params);
return rc;
}
int
cli_resource_print_property(const char *rsc, const char *attr, pe_working_set_t * data_set)
{
const char *value = NULL;
resource_t *the_rsc = pe_find_resource(data_set->resources, rsc);
if (the_rsc == NULL) {
return -ENXIO;
}
value = crm_element_value(the_rsc->xml, attr);
if (value != NULL) {
fprintf(stdout, "%s\n", value);
return 0;
}
return -ENXIO;
}
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index e870688805..96fea53c7c 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -1,927 +1,927 @@
/*
* Copyright (C) 2009 Andrew Beekhof
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "fake_transition.h"
cib_t *global_cib = NULL;
GListPtr op_fail = NULL;
bool action_numbers = FALSE;
gboolean quiet = FALSE;
-gboolean print_pending = FALSE;
+gboolean print_pending = TRUE;
char *temp_shadow = NULL;
extern gboolean bringing_nodes_online;
#define quiet_log(fmt, args...) do { \
if(quiet == FALSE) { \
printf(fmt , ##args); \
} \
} while(0)
extern void cleanup_alloc_calculations(pe_working_set_t * data_set);
extern xmlNode *do_calculations(pe_working_set_t * data_set, xmlNode * xml_input, crm_time_t * now);
char *use_date = NULL;
static void
get_date(pe_working_set_t * data_set)
{
int value = 0;
time_t original_date = 0;
crm_element_value_int(data_set->input, "execution-date", &value);
original_date = value;
if (use_date) {
data_set->now = crm_time_new(use_date);
} else if(original_date) {
char *when = NULL;
data_set->now = crm_time_new(NULL);
crm_time_set_timet(data_set->now, &original_date);
when = crm_time_as_string(data_set->now, crm_time_log_date|crm_time_log_timeofday);
printf("Using the original execution date of: %s\n", when);
free(when);
}
}
static void
print_cluster_status(pe_working_set_t * data_set, long options)
{
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_remote_containers = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
GListPtr gIter = NULL;
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = NULL;
if (is_container_remote_node(node)) {
node_name = crm_strdup_printf("%s:%s", node->details->uname, node->details->remote_rsc->container->id);
} else {
node_name = crm_strdup_printf("%s", node->details->uname);
}
if (node->details->unclean) {
if (node->details->online && node->details->unclean) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
node_mode = "standby";
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
if (is_container_remote_node(node)) {
online_remote_containers = add_list_element(online_remote_containers, node_name);
} else if (is_baremetal_remote_node(node)) {
online_remote_nodes = add_list_element(online_remote_nodes, node_name);
} else {
online_nodes = add_list_element(online_nodes, node_name);
}
free(node_name);
continue;
} else {
if (is_baremetal_remote_node(node)) {
offline_remote_nodes = add_list_element(offline_remote_nodes, node_name);
} else if (is_container_remote_node(node)) {
/* ignore offline container nodes */
} else {
offline_nodes = add_list_element(offline_nodes, node_name);
}
free(node_name);
continue;
}
if (is_container_remote_node(node)) {
printf("ContainerNode %s: %s\n", node_name, node_mode);
} else if (is_baremetal_remote_node(node)) {
printf("RemoteNode %s: %s\n", node_name, node_mode);
} else if (safe_str_eq(node->details->uname, node->details->id)) {
printf("Node %s: %s\n", node_name, node_mode);
} else {
printf("Node %s (%s): %s\n", node_name, node->details->id, node_mode);
}
free(node_name);
}
if (online_nodes) {
printf("Online: [%s ]\n", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
printf("OFFLINE: [%s ]\n", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
printf("RemoteOnline: [%s ]\n", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
printf("RemoteOFFLINE: [%s ]\n", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_remote_containers) {
printf("Containers: [%s ]\n", online_remote_containers);
free(online_remote_containers);
}
fprintf(stdout, "\n");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (is_set(rsc->flags, pe_rsc_orphan)
&& rsc->role == RSC_ROLE_STOPPED) {
continue;
}
rsc->fns->print(rsc, NULL, pe_print_printf | options, stdout);
}
fprintf(stdout, "\n");
}
static char *
create_action_name(action_t * action)
{
char *action_name = NULL;
const char *prefix = NULL;
const char *action_host = NULL;
const char *task = action->task;
if (action->node) {
action_host = action->node->details->uname;
} else if (is_not_set(action->flags, pe_action_pseudo)) {
action_host = "";
}
if (safe_str_eq(action->task, RSC_CANCEL)) {
prefix = "Cancel ";
task = "monitor"; /* TO-DO: Hack! */
}
if (action->rsc && action->rsc->clone_name) {
char *key = NULL;
const char *name = action->rsc->clone_name;
const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
int interval = crm_parse_int(interval_s, "0");
if (safe_str_eq(action->task, RSC_NOTIFY)
|| safe_str_eq(action->task, RSC_NOTIFIED)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_key_type");
const char *n_task = g_hash_table_lookup(action->meta, "notify_key_operation");
CRM_ASSERT(n_type != NULL);
CRM_ASSERT(n_task != NULL);
key = generate_notify_key(name, n_type, n_task);
} else {
key = generate_op_key(name, task, interval);
}
if (action_host) {
action_name = crm_strdup_printf("%s%s %s", prefix ? prefix : "", key, action_host);
} else {
action_name = crm_strdup_printf("%s%s", prefix ? prefix : "", key);
}
free(key);
} else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
action_name = crm_strdup_printf("%s%s '%s' %s", prefix ? prefix : "", action->task, op, action_host);
} else if (action->rsc && action_host) {
action_name = crm_strdup_printf("%s%s %s", prefix ? prefix : "", action->uuid, action_host);
} else if (action_host) {
action_name = crm_strdup_printf("%s%s %s", prefix ? prefix : "", action->task, action_host);
} else {
action_name = crm_strdup_printf("%s", action->uuid);
}
if(action_numbers) {
char *with_id = crm_strdup_printf("%s (%d)", action_name, action->id);
free(action_name);
action_name = with_id;
}
return action_name;
}
static void
create_dotfile(pe_working_set_t * data_set, const char *dot_file, gboolean all_actions)
{
GListPtr gIter = NULL;
FILE *dot_strm = fopen(dot_file, "w");
if (dot_strm == NULL) {
crm_perror(LOG_ERR, "Could not open %s for writing", dot_file);
return;
}
fprintf(dot_strm, " digraph \"g\" {\n");
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
const char *style = "dashed";
const char *font = "black";
const char *color = "black";
char *action_name = create_action_name(action);
crm_trace("Action %d: %s %s %p", action->id, action_name, action->uuid, action);
if (is_set(action->flags, pe_action_pseudo)) {
font = "orange";
}
if (is_set(action->flags, pe_action_dumped)) {
style = "bold";
color = "green";
} else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) {
color = "red";
font = "purple";
if (all_actions == FALSE) {
goto dont_write;
}
} else if (is_set(action->flags, pe_action_optional)) {
color = "blue";
if (all_actions == FALSE) {
goto dont_write;
}
} else {
color = "red";
CRM_CHECK(is_set(action->flags, pe_action_runnable) == FALSE,;
);
}
set_bit(action->flags, pe_action_dumped);
crm_trace("\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]",
action_name, style, color, font);
fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n",
action_name, style, color, font);
dont_write:
free(action_name);
}
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
GListPtr gIter2 = NULL;
for (gIter2 = action->actions_before; gIter2 != NULL; gIter2 = gIter2->next) {
action_wrapper_t *before = (action_wrapper_t *) gIter2->data;
char *before_name = NULL;
char *after_name = NULL;
const char *style = "dashed";
gboolean optional = TRUE;
if (before->state == pe_link_dumped) {
optional = FALSE;
style = "bold";
} else if (is_set(action->flags, pe_action_pseudo)
&& (before->type & pe_order_stonith_stop)) {
continue;
} else if (before->state == pe_link_dup) {
continue;
} else if (before->type == pe_order_none) {
continue;
} else if (is_set(before->action->flags, pe_action_dumped)
&& is_set(action->flags, pe_action_dumped)
&& before->type != pe_order_load) {
optional = FALSE;
}
if (all_actions || optional == FALSE) {
before_name = create_action_name(before->action);
after_name = create_action_name(action);
crm_trace("\"%s\" -> \"%s\" [ style = %s]",
before_name, after_name, style);
fprintf(dot_strm, "\"%s\" -> \"%s\" [ style = %s]\n",
before_name, after_name, style);
free(before_name);
free(after_name);
}
}
}
fprintf(dot_strm, "}\n");
if (dot_strm != NULL) {
fflush(dot_strm);
fclose(dot_strm);
}
}
static void
setup_input(const char *input, const char *output)
{
int rc = pcmk_ok;
cib_t *cib_conn = NULL;
xmlNode *cib_object = NULL;
char *local_output = NULL;
if (input == NULL) {
/* Use live CIB */
cib_conn = cib_new();
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
if (rc == pcmk_ok) {
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_object, cib_scope_local | cib_sync_call);
}
cib_conn->cmds->signoff(cib_conn);
cib_delete(cib_conn);
cib_conn = NULL;
if (rc != pcmk_ok) {
fprintf(stderr, "Live CIB query failed: %s (%d)\n", pcmk_strerror(rc), rc);
crm_exit(rc);
} else if (cib_object == NULL) {
fprintf(stderr, "Live CIB query failed: empty result\n");
crm_exit(ENOTCONN);
}
} else if (safe_str_eq(input, "-")) {
cib_object = filename2xml(NULL);
} else {
cib_object = filename2xml(input);
}
if (get_object_root(XML_CIB_TAG_STATUS, cib_object) == NULL) {
create_xml_node(cib_object, XML_CIB_TAG_STATUS);
}
if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) {
free_xml(cib_object);
crm_exit(ENOKEY);
}
if (validate_xml(cib_object, NULL, FALSE) != TRUE) {
free_xml(cib_object);
crm_exit(pcmk_err_schema_validation);
}
if (output == NULL) {
char *pid = crm_itoa(getpid());
local_output = get_shadow_file(pid);
temp_shadow = strdup(local_output);
output = local_output;
free(pid);
}
rc = write_xml_file(cib_object, output, FALSE);
free_xml(cib_object);
cib_object = NULL;
if (rc < 0) {
fprintf(stderr, "Could not create '%s': %s\n", output, strerror(errno));
crm_exit(rc);
}
setenv("CIB_file", output, 1);
free(local_output);
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"quiet", 0, 0, 'Q', "\tDisplay only essentialoutput"},
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"-spacer-", 0, 0, '-', "\nOperations:"},
{"run", 0, 0, 'R', "\tDetermine the cluster's response to the given configuration and status"},
{"simulate", 0, 0, 'S', "Simulate the transition's execution and display the resulting cluster status"},
{"in-place", 0, 0, 'X', "Simulate the transition's execution and store the result back to the input file"},
{"show-scores", 0, 0, 's', "Show allocation scores"},
{"show-utilization", 0, 0, 'U', "Show utilization information"},
{"profile", 1, 0, 'P', "Run all tests in the named directory to create profiling data"},
- {"pending", 0, 0, 'j', "\tDisplay pending state if 'record-pending' is enabled"},
+ {"pending", 0, 0, 'j', "\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden},
{"-spacer-", 0, 0, '-', "\nSynthetic Cluster Events:"},
{"node-up", 1, 0, 'u', "\tBring a node online"},
{"node-down", 1, 0, 'd', "\tTake a node offline"},
{"node-fail", 1, 0, 'f', "\tMark a node as failed"},
{"op-inject", 1, 0, 'i', "\tGenerate a failure for the cluster to react to in the simulation"},
{"-spacer-", 0, 0, '-', "\t\tValue is of the form ${resource}_${task}_${interval}@${node}=${rc}."},
{"-spacer-", 0, 0, '-', "\t\tEg. memcached_monitor_20000@bart.example.com=7"},
{"-spacer-", 0, 0, '-', "\t\tFor more information on OCF return codes, refer to: http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/s-ocf-return-codes.html"},
{"op-fail", 1, 0, 'F', "\tIf the specified task occurs during the simulation, have it fail with return code ${rc}"},
{"-spacer-", 0, 0, '-', "\t\tValue is of the form ${resource}_${task}_${interval}@${node}=${rc}."},
{"-spacer-", 0, 0, '-', "\t\tEg. memcached_stop_0@bart.example.com=1\n"},
{"-spacer-", 0, 0, '-', "\t\tThe transition will normally stop at the failed action. Save the result with --save-output and re-run with --xml-file"},
{"set-datetime", 1, 0, 't', "Set date/time"},
{"quorum", 1, 0, 'q', "\tSpecify a value for quorum"},
{"watchdog", 1, 0, 'w', "\tAssume a watchdog device is active"},
{"ticket-grant", 1, 0, 'g', "Grant a ticket"},
{"ticket-revoke", 1, 0, 'r', "Revoke a ticket"},
{"ticket-standby", 1, 0, 'b', "Make a ticket standby"},
{"ticket-activate", 1, 0, 'e', "Activate a ticket"},
{"-spacer-", 0, 0, '-', "\nOutput Options:"},
{"save-input", 1, 0, 'I', "\tSave the input configuration to the named file"},
{"save-output", 1, 0, 'O', "Save the output configuration to the named file"},
{"save-graph", 1, 0, 'G', "\tSave the transition graph (XML format) to the named file"},
{"save-dotfile", 1, 0, 'D', "Save the transition graph (DOT format) to the named file"},
{"all-actions", 0, 0, 'a', "\tDisplay all possible actions in the DOT graph - even ones not part of the transition"},
{"-spacer-", 0, 0, '-', "\nData Source:"},
{"live-check", 0, 0, 'L', "\tConnect to the CIB and use the current contents as input"},
{"xml-file", 1, 0, 'x', "\tRetrieve XML from the named file"},
{"xml-pipe", 0, 0, 'p', "\tRetrieve XML from stdin"},
{"-spacer-", 0, 0, '-', "\nExamples:\n"},
{"-spacer-", 0, 0, '-', "Pretend a recurring monitor action found memcached stopped on node fred.example.com and, during recovery, that the memcached stop action failed", pcmk_option_paragraph},
{"-spacer-", 0, 0, '-', " crm_simulate -LS --op-inject memcached:0_monitor_20000@bart.example.com=7 --op-fail memcached:0_stop_0@fred.example.com=1 --save-output /tmp/memcached-test.xml", pcmk_option_example},
{"-spacer-", 0, 0, '-', "Now see what the reaction to the stop failure would be", pcmk_option_paragraph},
{"-spacer-", 0, 0, '-', " crm_simulate -S --xml-file /tmp/memcached-test.xml", pcmk_option_example},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
static void
profile_one(const char *xml_file)
{
xmlNode *cib_object = NULL;
pe_working_set_t data_set;
printf("* Testing %s\n", xml_file);
cib_object = filename2xml(xml_file);
if (get_object_root(XML_CIB_TAG_STATUS, cib_object) == NULL) {
create_xml_node(cib_object, XML_CIB_TAG_STATUS);
}
if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) {
free_xml(cib_object);
return;
}
if (validate_xml(cib_object, NULL, FALSE) != TRUE) {
free_xml(cib_object);
return;
}
set_working_set_defaults(&data_set);
data_set.input = cib_object;
get_date(&data_set);
do_calculations(&data_set, cib_object, NULL);
cleanup_alloc_calculations(&data_set);
}
#ifndef FILENAME_MAX
# define FILENAME_MAX 512
#endif
static int
profile_all(const char *dir)
{
struct dirent **namelist;
int lpc = 0;
int file_num = scandir(dir, &namelist, 0, alphasort);
if (file_num > 0) {
struct stat prop;
char buffer[FILENAME_MAX + 1];
while (file_num--) {
if ('.' == namelist[file_num]->d_name[0]) {
free(namelist[file_num]);
continue;
} else if (strstr(namelist[file_num]->d_name, ".xml") == NULL) {
free(namelist[file_num]);
continue;
}
lpc++;
snprintf(buffer, FILENAME_MAX, "%s/%s", dir, namelist[file_num]->d_name);
if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) {
profile_one(buffer);
}
free(namelist[file_num]);
}
free(namelist);
}
return lpc;
}
static int
count_resources(pe_working_set_t * data_set, resource_t * rsc)
{
int count = 0;
GListPtr gIter = NULL;
if (rsc == NULL) {
gIter = data_set->resources;
} else if (rsc->children) {
gIter = rsc->children;
} else {
return is_not_set(rsc->flags, pe_rsc_orphan);
}
for (; gIter != NULL; gIter = gIter->next) {
count += count_resources(data_set, gIter->data);
}
return count;
}
int
main(int argc, char **argv)
{
int rc = 0;
guint modified = 0;
gboolean store = FALSE;
gboolean process = FALSE;
gboolean simulate = FALSE;
gboolean all_actions = FALSE;
gboolean have_stdout = FALSE;
pe_working_set_t data_set;
const char *xml_file = "-";
const char *quorum = NULL;
const char *watchdog = NULL;
const char *test_dir = NULL;
const char *dot_file = NULL;
const char *graph_file = NULL;
const char *input_file = NULL;
const char *output_file = NULL;
int flag = 0;
int index = 0;
int argerr = 0;
GListPtr node_up = NULL;
GListPtr node_down = NULL;
GListPtr node_fail = NULL;
GListPtr op_inject = NULL;
GListPtr ticket_grant = NULL;
GListPtr ticket_revoke = NULL;
GListPtr ticket_standby = NULL;
GListPtr ticket_activate = NULL;
xmlNode *input = NULL;
crm_log_cli_init("crm_simulate");
crm_set_options(NULL, "datasource operation [additional options]",
long_options, "Tool for simulating the cluster's response to events");
if (argc < 2) {
crm_help('?', EX_USAGE);
}
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1)
break;
switch (flag) {
case 'V':
if (have_stdout == FALSE) {
/* Redirect stderr to stdout so we can grep the output */
have_stdout = TRUE;
close(STDERR_FILENO);
dup2(STDOUT_FILENO, STDERR_FILENO);
}
crm_bump_log_level(argc, argv);
action_numbers = TRUE;
break;
case '?':
case '$':
crm_help(flag, EX_OK);
break;
case 'p':
xml_file = "-";
break;
case 'Q':
quiet = TRUE;
break;
case 'L':
xml_file = NULL;
break;
case 'x':
xml_file = optarg;
break;
case 'u':
modified++;
bringing_nodes_online = TRUE;
node_up = g_list_append(node_up, optarg);
break;
case 'd':
modified++;
node_down = g_list_append(node_down, optarg);
break;
case 'f':
modified++;
node_fail = g_list_append(node_fail, optarg);
break;
case 't':
use_date = strdup(optarg);
break;
case 'i':
modified++;
op_inject = g_list_append(op_inject, optarg);
break;
case 'F':
process = TRUE;
simulate = TRUE;
op_fail = g_list_append(op_fail, optarg);
break;
case 'w':
modified++;
watchdog = optarg;
break;
case 'q':
modified++;
quorum = optarg;
break;
case 'g':
modified++;
ticket_grant = g_list_append(ticket_grant, optarg);
break;
case 'r':
modified++;
ticket_revoke = g_list_append(ticket_revoke, optarg);
break;
case 'b':
modified++;
ticket_standby = g_list_append(ticket_standby, optarg);
break;
case 'e':
modified++;
ticket_activate = g_list_append(ticket_activate, optarg);
break;
case 'a':
all_actions = TRUE;
break;
case 's':
process = TRUE;
show_scores = TRUE;
break;
case 'U':
process = TRUE;
show_utilization = TRUE;
break;
case 'j':
print_pending = TRUE;
break;
case 'S':
process = TRUE;
simulate = TRUE;
break;
case 'X':
store = TRUE;
process = TRUE;
simulate = TRUE;
break;
case 'R':
process = TRUE;
break;
case 'D':
process = TRUE;
dot_file = optarg;
break;
case 'G':
process = TRUE;
graph_file = optarg;
break;
case 'I':
input_file = optarg;
break;
case 'O':
output_file = optarg;
break;
case 'P':
test_dir = optarg;
break;
default:
++argerr;
break;
}
}
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', EX_USAGE);
}
if (test_dir != NULL) {
return profile_all(test_dir);
}
setup_input(xml_file, store ? xml_file : output_file);
global_cib = cib_new();
global_cib->cmds->signon(global_cib, crm_system_name, cib_command);
set_working_set_defaults(&data_set);
if (data_set.now != NULL) {
quiet_log(" + Setting effective cluster time: %s", use_date);
crm_time_log(LOG_WARNING, "Set fake 'now' to", data_set.now,
crm_time_log_date | crm_time_log_timeofday);
}
rc = global_cib->cmds->query(global_cib, NULL, &input, cib_sync_call | cib_scope_local);
CRM_ASSERT(rc == pcmk_ok);
data_set.input = input;
get_date(&data_set);
if(xml_file) {
set_bit(data_set.flags, pe_flag_sanitized);
}
cluster_status(&data_set);
if (quiet == FALSE) {
int options = print_pending ? pe_print_pending : 0;
if(is_set(data_set.flags, pe_flag_maintenance_mode)) {
quiet_log("\n *** Resource management is DISABLED ***");
quiet_log("\n The cluster will not attempt to start, stop or recover services");
quiet_log("\n");
}
if(data_set.disabled_resources || data_set.blocked_resources) {
quiet_log("%d of %d resources DISABLED and %d BLOCKED from being started due to failures\n",
data_set.disabled_resources, count_resources(&data_set, NULL), data_set.blocked_resources);
}
quiet_log("\nCurrent cluster status:\n");
print_cluster_status(&data_set, options);
}
if (modified) {
quiet_log("Performing requested modifications\n");
modify_configuration(&data_set, global_cib, quorum, watchdog, node_up, node_down, node_fail, op_inject,
ticket_grant, ticket_revoke, ticket_standby, ticket_activate);
rc = global_cib->cmds->query(global_cib, NULL, &input, cib_sync_call);
if (rc != pcmk_ok) {
fprintf(stderr, "Could not connect to the CIB for input: %s\n", pcmk_strerror(rc));
goto done;
}
cleanup_calculations(&data_set);
data_set.input = input;
get_date(&data_set);
if(xml_file) {
set_bit(data_set.flags, pe_flag_sanitized);
}
cluster_status(&data_set);
}
if (input_file != NULL) {
rc = write_xml_file(input, input_file, FALSE);
if (rc < 0) {
fprintf(stderr, "Could not create '%s': %s\n", input_file, strerror(errno));
goto done;
}
}
rc = 0;
if (process || simulate) {
crm_time_t *local_date = NULL;
if (show_scores && show_utilization) {
printf("Allocation scores and utilization information:\n");
} else if (show_scores) {
fprintf(stdout, "Allocation scores:\n");
} else if (show_utilization) {
printf("Utilization information:\n");
}
do_calculations(&data_set, input, local_date);
input = NULL; /* Don't try and free it twice */
if (graph_file != NULL) {
write_xml_file(data_set.graph, graph_file, FALSE);
}
if (dot_file != NULL) {
create_dotfile(&data_set, dot_file, all_actions);
}
if (quiet == FALSE) {
GListPtr gIter = NULL;
quiet_log("%sTransition Summary:\n", show_scores || show_utilization
|| modified ? "\n" : "");
fflush(stdout);
for (gIter = data_set.resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
LogActions(rsc, &data_set, TRUE);
}
}
}
if (simulate) {
rc = run_simulation(&data_set, global_cib, op_fail, quiet);
if(quiet == FALSE) {
get_date(&data_set);
quiet_log("\nRevised cluster status:\n");
cluster_status(&data_set);
print_cluster_status(&data_set, 0);
}
}
done:
cleanup_alloc_calculations(&data_set);
global_cib->cmds->signoff(global_cib);
cib_delete(global_cib);
free(use_date);
fflush(stderr);
if (temp_shadow) {
unlink(temp_shadow);
free(temp_shadow);
}
return crm_exit(rc);
}