diff --git a/attrd/commands.c b/attrd/commands.c index 985f90c8d3..9f46d92d80 100644 --- a/attrd/commands.c +++ b/attrd/commands.c @@ -1,770 +1,770 @@ /* * Copyright (C) 2013 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #define ATTRD_PROTOCOL_VERSION "1" int last_cib_op_done = 0; char *peer_writer = NULL; GHashTable *attributes = NULL; typedef struct attribute_s { char *uuid; /* TODO: Remove if at all possible */ char *id; char *set; GHashTable *values; int update; int timeout_ms; bool changed; bool unknown_peer_uuids; mainloop_timer_t *timer; char *user; } attribute_t; typedef struct attribute_value_s { uint32_t nodeid; gboolean is_remote; char *nodename; char *current; char *requested; char *stored; } attribute_value_t; void write_attribute(attribute_t *a); void write_or_elect_attribute(attribute_t *a); void attrd_peer_update(crm_node_t *peer, xmlNode *xml, bool filter); void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); void attrd_peer_remove(const char *host, const char *source); static gboolean send_attrd_message(crm_node_t * node, xmlNode * data) { crm_xml_add(data, F_TYPE, T_ATTRD); crm_xml_add(data, F_ATTRD_IGNORE_LOCALLY, "atomic-version"); /* Tell older versions to ignore our messages */ crm_xml_add(data, F_ATTRD_VERSION, ATTRD_PROTOCOL_VERSION); crm_xml_add_int(data, F_ATTRD_WRITER, election_state(writer)); return send_cluster_message(node, crm_msg_attrd, data, TRUE); } static gboolean attribute_timer_cb(gpointer data) { attribute_t *a = data; crm_trace("Dampen interval expired for %s in state %d", a->id, election_state(writer)); write_or_elect_attribute(a); return FALSE; } static void free_attribute_value(gpointer data) { attribute_value_t *v = data; free(v->nodename); free(v->current); free(v->requested); free(v->stored); free(v); } void free_attribute(gpointer data) { attribute_t *a = data; if(a) { free(a->id); free(a->set); free(a->uuid); free(a->user); mainloop_timer_del(a->timer); g_hash_table_destroy(a->values); free(a); } } xmlNode * build_attribute_xml( xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user, const char *peer, uint32_t peerid, const char *value) { xmlNode *xml = create_xml_node(parent, __FUNCTION__); crm_xml_add(xml, F_ATTRD_ATTRIBUTE, name); crm_xml_add(xml, F_ATTRD_SET, set); crm_xml_add(xml, F_ATTRD_KEY, uuid); crm_xml_add(xml, F_ATTRD_USER, user); crm_xml_add(xml, F_ATTRD_HOST, peer); crm_xml_add_int(xml, F_ATTRD_HOST_ID, peerid); crm_xml_add(xml, F_ATTRD_VALUE, value); crm_xml_add_int(xml, F_ATTRD_DAMPEN, timeout_ms/1000); return xml; } static attribute_t * create_attribute(xmlNode *xml) { int dampen = 0; const char *value = crm_element_value(xml, F_ATTRD_DAMPEN); attribute_t *a = calloc(1, sizeof(attribute_t)); a->id = crm_element_value_copy(xml, F_ATTRD_ATTRIBUTE); a->set = crm_element_value_copy(xml, F_ATTRD_SET); a->uuid = crm_element_value_copy(xml, F_ATTRD_KEY); - a->values = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_attribute_value); + a->values = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, free_attribute_value); #if ENABLE_ACL crm_trace("Performing all %s operations as user '%s'", a->id, a->user); a->user = crm_element_value_copy(xml, F_ATTRD_USER); #endif if(value) { dampen = crm_get_msec(value); crm_trace("Created attribute %s with delay %dms (%s)", a->id, dampen, value); } else { crm_trace("Created attribute %s with no delay", a->id); } if(dampen > 0) { a->timeout_ms = dampen; a->timer = mainloop_timer_add(strdup(a->id), a->timeout_ms, FALSE, attribute_timer_cb, a); } g_hash_table_replace(attributes, a->id, a); return a; } void attrd_client_message(crm_client_t *client, xmlNode *xml) { bool broadcast = FALSE; static int plus_plus_len = 5; const char *op = crm_element_value(xml, F_ATTRD_TASK); if(safe_str_eq(op, "peer-remove")) { const char *host = crm_element_value(xml, F_ATTRD_HOST); crm_info("Client %s is requesting all values for %s be removed", client->name, host); if(host) { broadcast = TRUE; } } else if(safe_str_eq(op, "update")) { attribute_t *a = NULL; attribute_value_t *v = NULL; char *key = crm_element_value_copy(xml, F_ATTRD_KEY); char *set = crm_element_value_copy(xml, F_ATTRD_SET); char *host = crm_element_value_copy(xml, F_ATTRD_HOST); const char *attr = crm_element_value(xml, F_ATTRD_ATTRIBUTE); const char *value = crm_element_value(xml, F_ATTRD_VALUE); a = g_hash_table_lookup(attributes, attr); if(host == NULL) { crm_trace("Inferring host"); host = strdup(attrd_cluster->uname); crm_xml_add(xml, F_ATTRD_HOST, host); crm_xml_add_int(xml, F_ATTRD_HOST_ID, attrd_cluster->nodeid); } if (value) { int offset = 1; int int_value = 0; int value_len = strlen(value); if (value_len < (plus_plus_len + 2) || value[plus_plus_len] != '+' || (value[plus_plus_len + 1] != '+' && value[plus_plus_len + 1] != '=')) { goto send; } if(a) { v = g_hash_table_lookup(a->values, host); } if(v) { int_value = char2score(v->current); } if (value[plus_plus_len + 1] != '+') { const char *offset_s = value + (plus_plus_len + 2); offset = char2score(offset_s); } int_value += offset; if (int_value > INFINITY) { int_value = INFINITY; } crm_info("Expanded %s=%s to %d", attr, value, int_value); crm_xml_add_int(xml, F_ATTRD_VALUE, int_value); } send: if(peer_writer == NULL && election_state(writer) != election_in_progress) { crm_info("Starting an election to determine the writer"); election_vote(writer); } crm_debug("Broadcasting %s[%s] = %s%s", attr, host, value, election_state(writer) == election_won?" (writer)":""); broadcast = TRUE; free(key); free(set); free(host); } else if(safe_str_eq(op, "refresh")) { crm_notice("Updating all attributes"); write_attributes(TRUE, FALSE); } if(broadcast) { send_attrd_message(NULL, xml); } } void attrd_peer_message(crm_node_t *peer, xmlNode *xml) { int peer_state = 0; const char *v = crm_element_value(xml, F_ATTRD_VERSION); const char *op = crm_element_value(xml, F_ATTRD_TASK); const char *election_op = crm_element_value(xml, F_CRM_TASK); if(election_op) { enum election_result rc = 0; crm_xml_add(xml, F_CRM_HOST_FROM, peer->uname); rc = election_count_vote(writer, xml, TRUE); switch(rc) { case election_start: free(peer_writer); peer_writer = NULL; election_vote(writer); break; case election_lost: free(peer_writer); peer_writer = strdup(peer->uname); break; default: election_check(writer); break; } return; } else if(v == NULL) { /* From the non-atomic version */ if(safe_str_eq(op, "update")) { const char *name = crm_element_value(xml, F_ATTRD_ATTRIBUTE); crm_trace("Compatibility update of %s from %s", name, peer->uname); attrd_peer_update(peer, xml, FALSE); } else if(safe_str_eq(op, "flush")) { const char *name = crm_element_value(xml, F_ATTRD_ATTRIBUTE); attribute_t *a = g_hash_table_lookup(attributes, name); if(a) { crm_trace("Compatibility write-out of %s for %s from %s", a->id, op, peer->uname); write_or_elect_attribute(a); } } else if(safe_str_eq(op, "refresh")) { GHashTableIter aIter; attribute_t *a = NULL; g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { crm_trace("Compatibility write-out of %s for %s from %s", a->id, op, peer->uname); write_or_elect_attribute(a); } } } crm_element_value_int(xml, F_ATTRD_WRITER, &peer_state); if(election_state(writer) == election_won && peer_state == election_won && safe_str_neq(peer->uname, attrd_cluster->uname)) { crm_notice("Detected another attribute writer: %s", peer->uname); election_vote(writer); } else if(peer_state == election_won) { if(peer_writer == NULL) { peer_writer = strdup(peer->uname); crm_notice("Recorded attribute writer: %s", peer->uname); } else if(safe_str_neq(peer->uname, peer_writer)) { crm_notice("Recorded new attribute writer: %s (was %s)", peer->uname, peer_writer); free(peer_writer); peer_writer = strdup(peer->uname); } } if(safe_str_eq(op, "update")) { attrd_peer_update(peer, xml, FALSE); } else if(safe_str_eq(op, "sync")) { attrd_peer_sync(peer, xml); } else if(safe_str_eq(op, "peer-remove")) { const char *host = crm_element_value(xml, F_ATTRD_HOST); attrd_peer_remove(host, peer->uname); } else if(safe_str_eq(op, "sync-response") && safe_str_neq(peer->uname, attrd_cluster->uname)) { xmlNode *child = NULL; crm_notice("Processing %s from %s", op, peer->uname); for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) { attrd_peer_update(peer, child, TRUE); } } } void attrd_peer_sync(crm_node_t *peer, xmlNode *xml) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = create_xml_node(NULL, __FUNCTION__); crm_xml_add(sync, F_ATTRD_TASK, "sync-response"); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, v->nodename, v->nodeid, v->current); } } crm_debug("Syncing values to %s", peer?peer->uname:"everyone"); send_attrd_message(peer, sync); free_xml(sync); } void attrd_peer_remove(const char *host, const char *source) { attribute_t *a = NULL; GHashTableIter aIter; crm_notice("Removing all %s attributes for %s", host, source); if(host == NULL) { return; } g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { if(g_hash_table_remove(a->values, host)) { crm_debug("Removed %s[%s] for %s", a->id, host, source); } } /* if this matches a remote peer, it will be removed from the cache */ crm_remote_peer_cache_remove(host); } void attrd_peer_update(crm_node_t *peer, xmlNode *xml, bool filter) { bool changed = FALSE; attribute_value_t *v = NULL; const char *host = crm_element_value(xml, F_ATTRD_HOST); const char *attr = crm_element_value(xml, F_ATTRD_ATTRIBUTE); const char *value = crm_element_value(xml, F_ATTRD_VALUE); attribute_t *a = g_hash_table_lookup(attributes, attr); if(a == NULL) { a = create_attribute(xml); } v = g_hash_table_lookup(a->values, host); if(v == NULL) { v = calloc(1, sizeof(attribute_value_t)); v->nodename = strdup(host); crm_element_value_int(xml, F_ATTRD_IS_REMOTE, &v->is_remote); g_hash_table_replace(a->values, v->nodename, v); if (v->is_remote == TRUE) { crm_remote_peer_cache_add(host); } } if(filter && safe_str_neq(v->current, value) && safe_str_eq(host, attrd_cluster->uname)) { xmlNode *sync = create_xml_node(NULL, __FUNCTION__); crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", a->id, host, v->current, value, peer->uname); crm_xml_add(sync, F_ATTRD_TASK, "sync-response"); v = g_hash_table_lookup(a->values, host); build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, v->nodename, v->nodeid, v->current); crm_xml_add_int(sync, F_ATTRD_WRITER, election_state(writer)); send_attrd_message(peer, sync); free_xml(sync); } else if(safe_str_neq(v->current, value)) { crm_info("Setting %s[%s]: %s -> %s from %s", attr, host, v->current, value, peer->uname); free(v->current); if(value) { v->current = strdup(value); } else { v->current = NULL; } changed = TRUE; } else { crm_trace("Unchanged %s[%s] from %s is %s", attr, host, peer->uname, value); } a->changed |= changed; /* this only involves cluster nodes. */ if(v->nodeid == 0 && (v->is_remote == FALSE)) { if(crm_element_value_int(xml, F_ATTRD_HOST_ID, (int*)&v->nodeid) == 0) { /* Create the name/id association */ crm_node_t *peer = crm_get_peer(v->nodeid, host); crm_trace("We know %s's node id now: %s", peer->uname, peer->uuid); if(election_state(writer) == election_won) { write_attributes(FALSE, TRUE); return; } } } if(changed) { if(a->timer) { crm_trace("Delayed write out (%dms) for %s", a->timeout_ms, a->id); mainloop_timer_start(a->timer); } else { write_or_elect_attribute(a); } } } void write_or_elect_attribute(attribute_t *a) { enum election_result rc = election_state(writer); if(rc == election_won) { write_attribute(a); } else if(rc == election_in_progress) { crm_trace("Election in progress to determine who will write out %s", a->id); } else if(peer_writer == NULL) { crm_info("Starting an election to determine who will write out %s", a->id); election_vote(writer); } else { crm_trace("%s will write out %s, we are in state %d", peer_writer, a->id, rc); } } gboolean attrd_election_cb(gpointer user_data) { crm_trace("Election complete"); free(peer_writer); peer_writer = strdup(attrd_cluster->uname); /* Update the peers after an election */ attrd_peer_sync(NULL, NULL); /* Update the CIB after an election */ write_attributes(TRUE, FALSE); return FALSE; } void attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) { if(election_state(writer) == election_won && kind == crm_status_nstate && safe_str_eq(peer->state, CRM_NODE_MEMBER)) { attrd_peer_sync(peer, NULL); } else if(kind == crm_status_nstate && safe_str_neq(peer->state, CRM_NODE_MEMBER)) { attrd_peer_remove(peer->uname, __FUNCTION__); if(peer_writer && safe_str_eq(peer->uname, peer_writer)) { free(peer_writer); peer_writer = NULL; crm_notice("Lost attribute writer %s", peer->uname); } } else if(kind == crm_status_processes) { if(is_set(peer->processes, crm_proc_cpg)) { crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_MEMBER, 0); } else { crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_LOST, 0); } } } static void attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { int level = LOG_ERR; GHashTableIter iter; const char *peer = NULL; attribute_value_t *v = NULL; char *name = user_data; attribute_t *a = g_hash_table_lookup(attributes, name); if(a == NULL) { crm_info("Attribute %s no longer exists", name); goto done; } a->update = 0; if (rc == pcmk_ok && call_id < 0) { rc = call_id; } switch (rc) { case pcmk_ok: level = LOG_INFO; last_cib_op_done = call_id; break; case -pcmk_err_diff_failed: /* When an attr changes while the CIB is syncing */ case -ETIME: /* When an attr changes while there is a DC election */ case -ENXIO: /* When an attr changes while the CIB is syncing a * newer config from a node that just came up */ level = LOG_WARNING; break; } do_crm_log(level, "Update %d for %s: %s (%d)", call_id, name, pcmk_strerror(rc), rc); g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) { crm_notice("Update %d for %s[%s]=%s: %s (%d)", call_id, a->id, peer, v->requested, pcmk_strerror(rc), rc); if(rc == pcmk_ok) { free(v->stored); v->stored = v->requested; v->requested = NULL; } else { free(v->requested); v->requested = NULL; a->changed = TRUE; /* Attempt write out again */ } } done: free(name); if(a && a->changed && election_state(writer) == election_won) { write_attribute(a); } } void write_attributes(bool all, bool peer_discovered) { GHashTableIter iter; attribute_t *a = NULL; g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { if (peer_discovered && a->unknown_peer_uuids) { /* a new peer uuid has been discovered, try writing this attribute again. */ a->changed = TRUE; } if(all || a->changed) { write_attribute(a); } else { crm_debug("Skipping unchanged attribute %s", a->id); } } } static void build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value) { char *set = NULL; char *uuid = NULL; xmlNode *xml_obj = NULL; if(a->set) { set = g_strdup(a->set); } else { set = g_strdup_printf("%s-%s", XML_CIB_TAG_STATUS, nodeid); } if(a->uuid) { uuid = g_strdup(a->uuid); } else { int lpc; uuid = g_strdup_printf("%s-%s", set, a->id); /* Minimal attempt at sanitizing automatic IDs */ for (lpc = 0; uuid[lpc] != 0; lpc++) { switch (uuid[lpc]) { case ':': uuid[lpc] = '.'; } } } xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE); crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS); crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS); crm_xml_add(xml_obj, XML_ATTR_ID, set); xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_obj, XML_ATTR_ID, uuid); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id); if(value) { crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value); } else { crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, ""); crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE); } g_free(uuid); g_free(set); } void write_attribute(attribute_t *a) { int updates = 0; xmlNode *xml_top = NULL; attribute_value_t *v = NULL; GHashTableIter iter; enum cib_call_options flags = cib_quorum_override; if (a == NULL) { return; } else if (the_cib == NULL) { crm_info("Write out of '%s' delayed: cib not connected", a->id); return; } else if(a->update && a->update < last_cib_op_done) { crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update); } else if(a->update) { crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update); return; } else if(mainloop_timer_running(a->timer)) { crm_info("Write out of '%s' delayed: timer is running", a->id); return; } a->changed = FALSE; a->unknown_peer_uuids = FALSE; xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS); g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) { crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_REMOTE|CRM_GET_PEER_CLUSTER); if(peer && peer->id && v->nodeid == 0) { crm_trace("Updating value's nodeid"); v->nodeid = peer->id; } if (peer == NULL) { /* If the user is trying to set an attribute on an unknown peer, ignore it. */ crm_notice("Update error (peer not found): %s[%s]=%s failed (host=%p)", v->nodename, a->id, v->current, peer); } else if (peer->uuid == NULL) { /* peer is found, but we don't know the uuid yet. Wait until we discover a new uuid before attempting to write */ a->unknown_peer_uuids = FALSE; crm_notice("Update error (unknown peer uuid, retry will be attempted once uuid is discovered): %s[%s]=%s failed (host=%p)", v->nodename, a->id, v->current, peer); } else { crm_debug("Update: %s[%s]=%s (%s %u %u %s)", v->nodename, a->id, v->current, peer->uuid, peer->id, v->nodeid, peer->uname); build_update_element(xml_top, a, peer->uuid, v->current); updates++; free(v->requested); v->requested = NULL; if(v->current) { v->requested = strdup(v->current); } else { /* Older versions don't know about the cib_mixed_update flag * Make sure it goes to the local cib which does */ flags |= cib_mixed_update|cib_scope_local; } } } if(updates) { crm_log_xml_trace(xml_top, __FUNCTION__); a->update = cib_internal_op(the_cib, CIB_OP_MODIFY, NULL, XML_CIB_TAG_STATUS, xml_top, NULL, flags, a->user); crm_notice("Sent update %d with %d changes for %s, id=%s, set=%s", a->update, updates, a->id, a->uuid ? a->uuid : "", a->set); the_cib->cmds->register_callback( the_cib, a->update, 120, FALSE, strdup(a->id), "attrd_cib_callback", attrd_cib_callback); } free_xml(xml_top); } diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c index d677c18b11..9518b1157f 100644 --- a/crmd/lrm_state.c +++ b/crmd/lrm_state.c @@ -1,749 +1,749 @@ /* * Copyright (C) 2012 David Vossel * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include GHashTable *lrm_state_table = NULL; GHashTable *proxy_table = NULL; int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); typedef struct remote_proxy_s { char *node_name; char *session_id; gboolean is_local; crm_ipc_t *ipc; mainloop_io_t *source; } remote_proxy_t; static void history_cache_destroy(gpointer data) { rsc_history_t *entry = data; if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } free(entry->rsc.type); free(entry->rsc.class); free(entry->rsc.provider); lrmd_free_event(entry->failed); lrmd_free_event(entry->last); free(entry->id); free(entry); } static void free_rsc_info(gpointer value) { lrmd_rsc_info_t *rsc_info = value; lrmd_free_rsc_info(rsc_info); } static void free_deletion_op(gpointer value) { struct pending_deletion_op_s *op = value; free(op->rsc); delete_ha_msg_input(op->input); free(op); } static void free_recurring_op(gpointer value) { struct recurring_op_s *op = (struct recurring_op_s *)value; free(op->rsc_id); free(op->op_type); free(op->op_key); free(op); } lrm_state_t * lrm_state_create(const char *node_name) { lrm_state_t *state = NULL; if (!node_name) { crm_err("No node name given for lrm state object"); return NULL; } state = calloc(1, sizeof(lrm_state_t)); if (!state) { return NULL; } state->node_name = strdup(node_name); state->rsc_info_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_rsc_info); state->deletion_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, free_deletion_op); state->pending_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, free_recurring_op); state->resource_history = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, history_cache_destroy); g_hash_table_insert(lrm_state_table, (char *)state->node_name, state); return state; } void lrm_state_destroy(const char *node_name) { g_hash_table_remove(lrm_state_table, node_name); } static gboolean remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data) { remote_proxy_t *proxy = value; const char *node_name = user_data; if (safe_str_eq(node_name, proxy->node_name)) { return TRUE; } return FALSE; } static void internal_lrm_state_destroy(gpointer data) { lrm_state_t *lrm_state = data; if (!lrm_state) { return; } crm_trace("Destroying proxy table with %d members", g_hash_table_size(proxy_table)); g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name); remote_ra_cleanup(lrm_state); lrmd_api_delete(lrm_state->conn); if (lrm_state->rsc_info_cache) { crm_trace("Destroying rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_destroy(lrm_state->rsc_info_cache); } if (lrm_state->resource_history) { crm_trace("Destroying history op cache with %d members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_destroy(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Destroying deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_destroy(lrm_state->deletion_ops); } if (lrm_state->pending_ops) { crm_trace("Destroying pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops)); g_hash_table_destroy(lrm_state->pending_ops); } free((char *)lrm_state->node_name); free(lrm_state); } void lrm_state_reset_tables(lrm_state_t * lrm_state) { if (lrm_state->resource_history) { crm_trace("Re-setting history op cache with %d members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_remove_all(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Re-setting deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_remove_all(lrm_state->deletion_ops); } if (lrm_state->pending_ops) { crm_trace("Re-setting pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops)); g_hash_table_remove_all(lrm_state->pending_ops); } if (lrm_state->rsc_info_cache) { crm_trace("Re-setting rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_remove_all(lrm_state->rsc_info_cache); } } static void remote_proxy_free(gpointer data) { remote_proxy_t *proxy = data; crm_debug("Signing out of the IPC Service"); if (proxy->source != NULL) { mainloop_del_ipc_client(proxy->source); } free(proxy->node_name); free(proxy->session_id); } gboolean lrm_state_init_local(void) { if (lrm_state_table) { return TRUE; } lrm_state_table = - g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, internal_lrm_state_destroy); + g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, internal_lrm_state_destroy); if (!lrm_state_table) { return FALSE; } proxy_table = - g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, remote_proxy_free); + g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, remote_proxy_free); if (!proxy_table) { g_hash_table_destroy(lrm_state_table); return FALSE; } return TRUE; } void lrm_state_destroy_all(void) { if (lrm_state_table) { crm_trace("Destroying state table with %d members", g_hash_table_size(lrm_state_table)); g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL; } if(proxy_table) { crm_trace("Destroying proxy table with %d members", g_hash_table_size(proxy_table)); g_hash_table_destroy(proxy_table); proxy_table = NULL; } } lrm_state_t * lrm_state_find(const char *node_name) { if (!node_name) { return NULL; } return g_hash_table_lookup(lrm_state_table, node_name); } lrm_state_t * lrm_state_find_or_create(const char *node_name) { lrm_state_t *lrm_state; lrm_state = g_hash_table_lookup(lrm_state_table, node_name); if (!lrm_state) { lrm_state = lrm_state_create(node_name); } return lrm_state; } GList * lrm_state_get_list(void) { return g_hash_table_get_values(lrm_state_table); } void lrm_state_disconnect(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return; } ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn); lrmd_api_delete(lrm_state->conn); lrm_state->conn = NULL; } int lrm_state_is_connected(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return FALSE; } return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn); } int lrm_state_poke_connection(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return -1; } return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn); } int lrm_state_ipc_connect(lrm_state_t * lrm_state) { int ret; if (!lrm_state->conn) { lrm_state->conn = lrmd_api_new(); ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, lrm_op_callback); } ret = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn, CRM_SYSTEM_CRMD, NULL); if (ret != pcmk_ok) { lrm_state->num_lrm_register_fails++; } else { lrm_state->num_lrm_register_fails = 0; } return ret; } static void remote_proxy_notify_destroy(lrmd_t *lrmd, const char *session_id) { /* sending to the remote node that an ipc connection has been destroyed */ xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, "destroy"); crm_xml_add(msg, F_LRMD_IPC_SESSION, session_id); lrmd_internal_proxy_send(lrmd, msg); free_xml(msg); } static void remote_proxy_relay_event(lrmd_t *lrmd, const char *session_id, xmlNode *msg) { /* sending to the remote node an event msg. */ xmlNode *event = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(event, F_LRMD_IPC_OP, "event"); crm_xml_add(event, F_LRMD_IPC_SESSION, session_id); add_message_xml(event, F_LRMD_IPC_MSG, msg); crm_log_xml_explicit(event, "EventForProxy"); lrmd_internal_proxy_send(lrmd, event); free_xml(event); } static void remote_proxy_relay_response(lrmd_t *lrmd, const char *session_id, xmlNode *msg, int msg_id) { /* sending to the remote node a response msg. */ xmlNode *response = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(response, F_LRMD_IPC_OP, "response"); crm_xml_add(response, F_LRMD_IPC_SESSION, session_id); crm_xml_add_int(response, F_LRMD_IPC_MSG_ID, msg_id); add_message_xml(response, F_LRMD_IPC_MSG, msg); lrmd_internal_proxy_send(lrmd, response); free_xml(response); } static int remote_proxy_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { /* Async responses from cib and friends back to clients via pacemaker_remoted */ xmlNode *xml = NULL; remote_proxy_t *proxy = userdata; lrm_state_t *lrm_state = lrm_state_find(proxy->node_name); if (lrm_state == NULL) { return 0; } xml = string2xml(buffer); if (xml == NULL) { crm_warn("Received a NULL msg from IPC service."); return 1; } crm_trace("Passing event back to %.8s on %s: %.200s", proxy->session_id, proxy->node_name, buffer); remote_proxy_relay_event(lrm_state->conn, proxy->session_id, xml); free_xml(xml); return 1; } static void remote_proxy_disconnected(void *userdata) { remote_proxy_t *proxy = userdata; lrm_state_t *lrm_state = lrm_state_find(proxy->node_name); crm_trace("destroying %p", userdata); proxy->source = NULL; proxy->ipc = NULL; if (lrm_state && lrm_state->conn) { remote_proxy_notify_destroy(lrm_state->conn, proxy->session_id); } g_hash_table_remove(proxy_table, proxy->session_id); } static remote_proxy_t * remote_proxy_new(const char *node_name, const char *session_id, const char *channel) { static struct ipc_client_callbacks proxy_callbacks = { .dispatch = remote_proxy_dispatch_internal, .destroy = remote_proxy_disconnected }; remote_proxy_t *proxy = calloc(1, sizeof(remote_proxy_t)); proxy->node_name = strdup(node_name); proxy->session_id = strdup(session_id); if (safe_str_eq(channel, CRM_SYSTEM_CRMD)) { proxy->is_local = TRUE; } else { proxy->source = mainloop_add_ipc_client(channel, G_PRIORITY_LOW, 0, proxy, &proxy_callbacks); proxy->ipc = mainloop_get_ipc_client(proxy->source); if (proxy->source == NULL) { remote_proxy_free(proxy); return NULL; } } g_hash_table_insert(proxy_table, proxy->session_id, proxy); return proxy; } gboolean crmd_is_proxy_session(const char *session) { return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE; } void crmd_proxy_send(const char *session, xmlNode *msg) { remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); lrm_state_t *lrm_state = NULL; if (!proxy) { return; } crm_log_xml_trace(msg, "to-proxy"); lrm_state = lrm_state_find(proxy->node_name); if (lrm_state) { crm_trace("Sending event to %.8s on %s", proxy->session_id, proxy->node_name); remote_proxy_relay_event(lrm_state->conn, session, msg); } } static void crmd_proxy_dispatch(const char *session, xmlNode *msg) { crm_log_xml_trace(msg, "CRMd-PROXY[inbound]"); crm_xml_add(msg, F_CRM_SYS_FROM, session); if (crmd_authorize_message(msg, NULL, session)) { route_message(C_IPC_MESSAGE, msg); } trigger_fsa(fsa_source); } static void remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) { lrm_state_t *lrm_state = userdata; const char *op = crm_element_value(msg, F_LRMD_IPC_OP); const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION); int msg_id = 0; /* sessions are raw ipc connections to IPC, * all we do is proxy requests/responses exactly * like they are given to us at the ipc level. */ CRM_CHECK(op != NULL, return); CRM_CHECK(session != NULL, return); crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); /* This is msg from remote ipc client going to real ipc server */ if (safe_str_eq(op, "new")) { const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); CRM_CHECK(channel != NULL, return); if (remote_proxy_new(lrm_state->node_name, session, channel) == NULL) { remote_proxy_notify_destroy(lrmd, session); } crm_info("new remote proxy client established to %s, session id %s", channel, session); } else if (safe_str_eq(op, "destroy")) { g_hash_table_remove(proxy_table, session); } else if (safe_str_eq(op, "request")) { int flags = 0; xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); const char *name = crm_element_value(msg, F_LRMD_IPC_CLIENT); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); CRM_CHECK(request != NULL, return); if (proxy == NULL) { /* proxy connection no longer exists */ remote_proxy_notify_destroy(lrmd, session); return; } else if ((proxy->is_local == FALSE) && (crm_ipc_connected(proxy->ipc) == FALSE)) { g_hash_table_remove(proxy_table, session); return; } crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags); crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote"); #if ENABLE_ACL CRM_ASSERT(lrm_state->node_name); crm_acl_get_set_user(request, F_LRMD_IPC_USER, lrm_state->node_name); #endif if (proxy->is_local) { /* this is for the crmd, which we are, so don't try * and connect/send to ourselves over ipc. instead * do it directly. */ crmd_proxy_dispatch(session, request); if (flags & crm_ipc_client_response) { xmlNode *op_reply = create_xml_node(NULL, "ack"); crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } } else if(is_set(flags, crm_ipc_proxied)) { int rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); if(rc < 0) { xmlNode *op_reply = create_xml_node(NULL, "nack"); crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); /* Send a n'ack so the caller doesn't block */ crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); crm_xml_add_int(op_reply, "rc", rc); remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); } } else { int rc = pcmk_ok; xmlNode *op_reply = NULL; /* For backwards compatibility with pacemaker_remoted <= 1.1.10 */ crm_trace("Relaying %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); rc = crm_ipc_send(proxy->ipc, request, flags, 10000, &op_reply); if(rc < 0) { crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); } if(op_reply) { remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } } } else { crm_err("Unknown proxy operation: %s", op); } } int lrm_state_remote_connect_async(lrm_state_t * lrm_state, const char *server, int port, int timeout_ms) { int ret; if (!lrm_state->conn) { lrm_state->conn = lrmd_remote_api_new(lrm_state->node_name, server, port); if (!lrm_state->conn) { return -1; } ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, remote_lrm_op_callback); lrmd_internal_set_proxy_callback(lrm_state->conn, lrm_state, remote_proxy_cb); } crm_trace("initiating remote connection to %s at %d with timeout %d", server, port, timeout_ms); ret = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn, lrm_state->node_name, timeout_ms); if (ret != pcmk_ok) { lrm_state->num_lrm_register_fails++; } else { lrm_state->num_lrm_register_fails = 0; } return ret; } int lrm_state_get_metadata(lrm_state_t * lrm_state, const char *class, const char *provider, const char *agent, char **output, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* Optimize this... only retrieve metadata from local lrmd connection. Perhaps consider * caching result. */ return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata(lrm_state->conn, class, provider, agent, output, options); } int lrm_state_cancel(lrm_state_t * lrm_state, const char *rsc_id, const char *action, int interval) { if (!lrm_state->conn) { return -ENOTCONN; } /* Optimize this, cancel requires a synced request/response to the server. * Figure out a way to make this async. */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_cancel(lrm_state, rsc_id, action, interval); } return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id, action, interval); } lrmd_rsc_info_t * lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc = NULL; if (!lrm_state->conn) { return NULL; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_get_rsc_info(lrm_state, rsc_id); } rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id); if (rsc == NULL) { /* only contact the lrmd if we don't already have a cached rsc info */ rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options); if (rsc == NULL) { return NULL; } /* cache the result */ g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc); } return lrmd_copy_rsc_info(rsc); } int lrm_state_exec(lrm_state_t * lrm_state, const char *rsc_id, const char *action, const char *userdata, int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ lrmd_key_value_t * params) { if (!lrm_state->conn) { lrmd_key_value_freeall(params); return -ENOTCONN; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_exec(lrm_state, rsc_id, action, userdata, interval, timeout, start_delay, params); } return ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id, action, userdata, interval, timeout, start_delay, lrmd_opt_notify_changes_only, params); } int lrm_state_register_rsc(lrm_state_t * lrm_state, const char *rsc_id, const char *class, const char *provider, const char *agent, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* optimize this... this function is a synced round trip from client to daemon. * The crmd/lrm.c code path should be re-factored to allow the register of resources * to be performed async. The lrmd client api needs to make an async version * of register available. */ if (is_remote_lrmd_ra(agent, provider, NULL)) { return lrm_state_find_or_create(rsc_id) ? pcmk_ok : -1; } return ((lrmd_t *) lrm_state->conn)->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider, agent, options); } int lrm_state_unregister_rsc(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* optimize this... this function is a synced round trip from client to daemon. * The crmd/lrm.c code path that uses this function should always treat it as an * async operation. The lrmd client api needs to make an async version unreg available. */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { lrm_state_destroy(rsc_id); return pcmk_ok; } g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id); return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options); } diff --git a/doc/pcs-crmsh-quick-ref.md b/doc/pcs-crmsh-quick-ref.md index b8c8598a16..814a9f0d02 100644 --- a/doc/pcs-crmsh-quick-ref.md +++ b/doc/pcs-crmsh-quick-ref.md @@ -1,161 +1,267 @@ + + + +**Table of Contents** + +- [General Operations](#general-operations) + - [Display the configuration](#display-the-configuration) + - [Display the current status](#display-the-current-status) + - [Node standby](#node-standby) + - [Set cluster property](#set-cluster-property) +- [Resource manipulation](#resource-manipulation) + - [List Resource Agent (RA) classes](#list-resource-agent-ra-classes) + - [List available RAs](#list-available-ras) + - [List RA info](#list-ra-info) + - [Create a resource](#create-a-resource) + - [Display a resource](#display-a-resource) + - [Display fencing resources](#display-fencing-resources) + - [Display Stonith RA info](#display-stonith-ra-info) + - [Start a resource](#start-a-resource) + - [Stop a resource](#stop-a-resource) + - [Remove a resource](#remove-a-resource) + - [Modify a resource](#modify-a-resource) + - [Delete parameters for a given resource](#delete-parameters-for-a-given-resource) + - [List the current resource defaults](#list-the-current-resource-defaults) + - [Set resource defaults](#set-resource-defaults) + - [List the current operation defaults](#list-the-current-operation-defaults) + - [Set operation defaults](#set-operation-defaults) + - [Set Colocation](#set-colocation) + - [Set ordering](#set-ordering) + - [Set preferred location](#set-preferred-location) + - [Move resources](#move-resources) + - [Create a clone](#create-a-clone) + - [Create a master/slave clone](#create-a-masterslave-clone) +- [Other operations](#other-operations) + - [Batch changes](#batch-changes) + + + +# General Operations + ## Display the configuration - crmsh # crm configure show + crmsh # crm configure show xml pcs # pcs cluster cib + +crmsh can show a simplified (non-xml) syntax as well + + crmsh # crm configure show ## Display the current status crmsh # crm status pcs # pcs status +also + + # crm_mon -1 + ## Node standby - crmsh # crm node standby +Put node in standby + + crmsh # crm node standby pcmk-1 pcs # pcs cluster standby pcmk-1 - crmsh # crm node online +Remove node from standby + + crmsh # crm node online pcmk-1 pcs # pcs cluster unstandby pcmk-1 -## Setting configuration options +crm has the ability to set the status on reboot or forever. +pcs can apply the change to all the nodes. + +## Set cluster property crmsh # crm configure property stonith-enabled=false pcs # pcs property set stonith-enabled=false -## Listing available resources +# Resource manipulation + +## List Resource Agent (RA) classes crmsh # crm ra classes pcs # pcs resource standards +## List available RAs + + crmsh # crm ra list ocf + crmsh # crm ra list lsb + crmsh # crm ra list service + crmsh # crm ra list stonith + pcs # pcs resource agents ocf + pcs # pcs resource agents lsb + pcs # pcs resource agents service + pcs # pcs resource agents stonith + pcs # pcs resource agents + +You can also filter by provider + crmsh # crm ra list ocf pacemaker pcs # pcs resource agents ocf:pacemaker -## Creating a resource +## List RA info + + crmsh # crm ra meta IPaddr2 + pcs # pcs resource describe IPaddr2 + +Use any RA name (like IPaddr2) from the list displayed with the previous command +You can also use the full class:provider:RA format if multiple RAs with the same name are available : + + crmsh # crm ra meta ocf:heartbeat:IPaddr2 + pcs # pcs resource describe ocf:heartbeat:IPaddr2 + +## Create a resource crmsh # crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip=192.168.122.120 cidr_netmask=32 \ op monitor interval=30s pcs # pcs resource create ClusterIP IPaddr2 ip=192.168.0.120 cidr_netmask=32 The standard and provider (`ocf:heartbeat`) are determined automatically since `IPaddr2` is unique. The monitor operation is automatically created based on the agent's metadata. ## Display a resource + crmsh # crm configure show + pcs # pcs resource show + +crmsh also displays fencing resources. +The result can be filtered by supplying a resource name (IE `ClusterIP`): + crmsh # crm configure show ClusterIP pcs # pcs resource show ClusterIP +crmsh also displays fencing resources. + +## Display fencing resources + + crmsh # crm resource show + pcs # pcs stonith show + +pcs treats STONITH devices separately. + +## Display Stonith RA info + + crmsh # crm ra meta stonith:fence_ipmilan + pcs # pcs stonith describe fence_ipmilan + ## Start a resource crmsh # crm resource start ClusterIP pcs # pcs resource enable ClusterIP ## Stop a resource crmsh # crm resource stop ClusterIP pcs # pcs resource disable ClusterIP ## Remove a resource crmsh # crm configure delete ClusterIP pcs # pcs resource delete ClusterIP -## Update a resource +## Modify a resource crmsh # crm resource param ClusterIP set clusterip_hash=sourceip pcs # pcs resource update ClusterIP clusterip_hash=sourceip -## Resource defaults +## Delete parameters for a given resource - crmsh # crm configure rsc_defaults resource-stickiness=100 - pcs # pcs resource rsc defaults resource-stickiness=100 - -Listing the current defaults: + crmsh # crm resource param ClusterIP delete nic + pcs # pcs resource update ClusterIP ip=192.168.0.98 nic= + +## List the current resource defaults crmsh # crm configure show type:rsc_defaults pcs # pcs resource rsc defaults -## Operation defaults - - crmsh # crm configure op_defaults timeout=240s - pcs # pcs resource op defaults timeout=240s +## Set resource defaults -Listing the current defaults: + crmsh # crm configure rsc_defaults resource-stickiness=100 + pcs # pcs resource rsc defaults resource-stickiness=100 + +## List the current operation defaults crmsh # crm configure show type:op_defaults pcs # pcs resource op defaults -## Colocation +## Set operation defaults + + crmsh # crm configure op_defaults timeout=240s + pcs # pcs resource op defaults timeout=240s + +## Set Colocation crmsh # crm configure colocation website-with-ip INFINITY: WebSite ClusterIP pcs # pcs constraint colocation add ClusterIP with WebSite INFINITY With roles - crmsh # + crmsh # crm configure colocation another-ip-with-website inf: AnotherIP WebSite:Master pcs # pcs constraint colocation add Started AnotherIP with Master WebSite INFINITY -## Start/stop ordering +## Set ordering crmsh # crm configure order apache-after-ip mandatory: ClusterIP WebSite pcs # pcs constraint order ClusterIP then WebSite With roles: - crmsh # + crmsh # crm configure order ip-after-website Mandatory: WebSite:Master AnotherIP pcs # pcs constraint order promote WebSite then start AnotherIP -## Preferred locations +## Set preferred location crmsh # crm configure location prefer-pcmk-1 WebSite 50: pcmk-1 pcs # pcs constraint location WebSite prefers pcmk-1=50 With roles: - crmsh # + crmsh # crm configure location prefer-pcmk-1 WebSite rule role=Master 50: \#uname eq pcmk-1 pcs # pcs constraint location WebSite rule role=master 50 \#uname eq pcmk-1 -## Moving resources +## Move resources crmsh # crm resource move WebSite pcmk-1 pcs # pcs resource move WebSite pcmk-1 crmsh # crm resource unmove WebSite pcs # pcs resource unmove WebSite - -## Creating a clone - crmsh # crm configure clone WebIP ClusterIP meta globally-unique="true" clone-max="2" clone-node-max="2" +Remember that moving a resource set a stickyness to -INF until unmoved + +## Create a clone + + crmsh # crm configure clone WebIP ClusterIP meta globally-unique=true clone-max=2 clone-node-max=2 pcs # pcs resource clone ClusterIP globally-unique=true clone-max=2 clone-node-max=2 -## Creating a master/slave clone +## Create a master/slave clone crmsh # crm configure ms WebDataClone WebData \ meta master-max=1 master-node-max=1 \ clone-max=2 clone-node-max=1 notify=true pcs # resource master WebDataClone WebData \ master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 \ notify=true -## ... - - crmsh # - pcs # +# Other operations ## Batch changes crmsh # crm crmsh # cib new drbd_cfg crmsh # configure primitive WebData ocf:linbit:drbd params drbd_resource=wwwdata \ op monitor interval=60s crmsh # configure ms WebDataClone WebData meta master-max=1 master-node-max=1 \ clone-max=2 clone-node-max=1 notify=true crmsh # cib commit drbd_cfg crmsh # quit . pcs # pcs cluster cib drbd_cfg pcs # pcs -f drbd_cfg resource create WebData ocf:linbit:drbd drbd_resource=wwwdata \ op monitor interval=60s pcs # pcs -f drbd_cfg resource master WebDataClone WebData master-max=1 master-node-max=1 \ clone-max=2 clone-node-max=1 notify=true pcs # pcs cluster push cib drbd_cfg diff --git a/extra/resources/controld b/extra/resources/controld index 9b26f77a04..2cd85dcbc2 100644 --- a/extra/resources/controld +++ b/extra/resources/controld @@ -1,301 +1,301 @@ #!/bin/sh # # Resource Agent for managing the DLM controld process. # # Copyright (c) 2009 Novell, Inc # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs} . ${OCF_FUNCTIONS} : ${__OCF_ACTION=$1} ####################################################################### if [ -e "$OCF_ROOT/resource.d/heartbeat/controld" ]; then ocf_log info "Using heartbeat controld agent" $OCF_ROOT/resource.d/heartbeat/controld $1 exit $? fi meta_data() { cat < 1.0 This Resource Agent can control the dlm_controld services needed by cluster-aware file systems. It assumes that dlm_controld is in your default PATH. In most cases, it should be run as an anonymous clone. DLM Agent for cluster file systems Any additional options to start the dlm_controld service with DLM Options - + The location where configfs is or should be mounted Location of configfs The daemon to start - supports gfs_controld(.pcmk) and dlm_controld(.pcmk) The daemon to start Allow DLM start-up even if STONITH/fencing is disabled in the cluster. Setting this option to true will cause cluster malfunction and hangs on fail-over for DLM clients that require fencing (such as GFS2, OCFS2, and cLVM2). This option is advanced use only. Allow start-up even without STONITH/fencing END } ####################################################################### DLM_SYSFS_DIR="/sys/kernel/dlm" controld_usage() { cat <&1) if [ $? -eq 0 ]; then if [ -n "$tmp" ]; then ocf_log err "Uncontrolled lockspace exists, system must reboot. Executing suicide fencing" stonith_admin --reboot=$(crm_node -n) --tag controld exit $OCF_ERR_GENERIC fi fi } controld_start() { controld_monitor; rc=$? case $rc in $OCF_SUCCESS) return $OCF_SUCCESS;; $OCF_NOT_RUNNING) ;; *) return $OCF_ERR_GENERIC;; esac if [ ! -e $OCF_RESKEY_configdir ]; then modprobe configfs if [ ! -e $OCF_RESKEY_configdir ]; then ocf_log err "$OCF_RESKEY_configdir not available" return $OCF_NOT_INSTALLED fi fi mount | grep "type configfs" > /dev/null if [ $? != 0 ]; then mount -t configfs none $OCF_RESKEY_configdir fi if [ ! -e $OCF_RESKEY_configdir/dlm ]; then modprobe dlm if [ ! -e $OCF_RESKEY_configdir/dlm ]; then ocf_log err "$OCF_RESKEY_configdir/dlm not available" return $OCF_NOT_INSTALLED fi fi if ! ocf_is_true "$OCF_RESKEY_allow_stonith_disabled" && \ ! ocf_is_true "`crm_attribute --type=crm_config --name=stonith-enabled --query --quiet --default=true`"; then ocf_log err "The cluster property stonith-enabled may not be deactivated to use the DLM" return $OCF_ERR_CONFIGURED fi ${OCF_RESKEY_daemon} $OCF_RESKEY_args while true do sleep 1 controld_monitor; rc=$? case $rc in $OCF_SUCCESS) local addr_list=$(cat /sys/kernel/config/dlm/cluster/comms/*/addr_list 2>/dev/null) if [ $? -eq 0 ] && [ -n "$addr_list" ]; then return $OCF_SUCCESS fi ;; $OCF_NOT_RUNNING) return $OCF_NOT_RUNNING ;; *) return $OCF_ERR_GENERIC ;; esac ocf_log debug "Waiting for ${OCF_RESKEY_daemon} to be ready" done } controld_stop() { controld_monitor; rc=$? if [ $rc = $OCF_NOT_RUNNING ]; then return $OCF_SUCCESS fi killall -TERM ${OCF_RESKEY_daemon}; rc=$? if [ $rc != 0 ]; then return $OCF_ERR_GENERIC fi rc=$OCF_SUCCESS while [ $rc = $OCF_SUCCESS ]; do controld_monitor; rc=$? sleep 1 done if [ $rc = $OCF_NOT_RUNNING ]; then rc=$OCF_SUCCESS fi return $rc } controld_monitor() { local rc killall -0 ${OCF_RESKEY_daemon} >/dev/null 2>&1 ; rc=$? case $rc in 0) rc=$OCF_SUCCESS;; 1) rc=$OCF_NOT_RUNNING;; *) rc=$OCF_ERR_GENERIC;; esac # if the dlm is not successfully running, but # dlm lockspace bits are left over, we self must fence. if [ $rc -ne $OCF_SUCCESS ]; then check_uncontrolled_locks fi return $rc } controld_validate() { check_binary killall check_binary ${OCF_RESKEY_daemon} case ${OCF_RESKEY_CRM_meta_globally_unique} in yes|Yes|true|True|1) ocf_log err "$OCF_RESOURCE_INSTANCE must be configured with the globally_unique=false meta attribute" exit $OCF_ERR_CONFIGURED ;; esac [ -d /var/run/cluster ] || mkdir /var/run/cluster return $OCF_SUCCESS } : ${OCF_RESKEY_sctp=false} : ${OCF_RESKEY_configdir=/sys/kernel/config} : ${OCF_RESKEY_CRM_meta_globally_unique:="false"} case "$HA_quorum_type" in pcmk) daemon_ext=".pcmk";; *) daemon_ext="";; esac case "$OCF_RESOURCE_INSTANCE" in *[gG][fF][sS]*) : ${OCF_RESKEY_args=-g 0} : ${OCF_RESKEY_daemon=gfs_controld${daemon_ext}} ;; *[dD][lL][mM]*) - : ${OCF_RESKEY_args=-q 0 -s 0} + : ${OCF_RESKEY_args=-s 0} : ${OCF_RESKEY_daemon=dlm_controld${daemon_ext}} ;; *) - : ${OCF_RESKEY_args=-q 0 -s 0} + : ${OCF_RESKEY_args=-s 0} : ${OCF_RESKEY_daemon=dlm_controld${daemon_ext}} esac case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; start) controld_validate; controld_start;; stop) controld_stop;; monitor) controld_validate; controld_monitor;; validate-all) controld_validate;; usage|help) controld_usage exit $OCF_SUCCESS ;; *) controld_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac rc=$? exit $rc diff --git a/fencing/commands.c b/fencing/commands.c index 60ef1697e7..1885c144d9 100644 --- a/fencing/commands.c +++ b/fencing/commands.c @@ -1,2066 +1,2066 @@ /* * Copyright (C) 2009 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if SUPPORT_CIBSECRETS # include #endif #include GHashTable *device_list = NULL; GHashTable *topology = NULL; GList *cmd_list = NULL; static int active_children = 0; struct device_search_s { char *host; char *action; int per_device_timeout; int replies_needed; int replies_received; void *user_data; void (*callback) (GList * devices, void *user_data); GListPtr capable; }; static gboolean stonith_device_dispatch(gpointer user_data); static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data); static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer, const char *client_id); static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence); typedef struct async_command_s { int id; int pid; int fd_stdout; int options; int default_timeout; int timeout; char *op; char *origin; char *client; char *client_name; char *remote_op_id; char *victim; uint32_t victim_nodeid; char *action; char *device; char *mode; GListPtr device_list; GListPtr device_next; void *internal_user_data; void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data); guint timer_sigterm; guint timer_sigkill; /*! If the operation timed out, this is the last signal * we sent to the process to get it to terminate */ int last_timeout_signo; } async_command_t; static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc); static int get_action_timeout(stonith_device_t * device, const char *action, int default_timeout) { char buffer[512] = { 0, }; char *value = NULL; CRM_CHECK(action != NULL, return default_timeout); if (!device->params) { return default_timeout; } snprintf(buffer, sizeof(buffer) - 1, "pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, buffer); if (!value) { return default_timeout; } return atoi(value); } static void free_async_command(async_command_t * cmd) { if (!cmd) { return; } cmd_list = g_list_remove(cmd_list, cmd); g_list_free_full(cmd->device_list, free); free(cmd->device); free(cmd->action); free(cmd->victim); free(cmd->remote_op_id); free(cmd->client); free(cmd->client_name); free(cmd->origin); free(cmd->mode); free(cmd->op); free(cmd); } static async_command_t * create_async_command(xmlNode * msg) { async_command_t *cmd = NULL; xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR); const char *action = crm_element_value(op, F_STONITH_ACTION); CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL); crm_log_xml_trace(msg, "Command"); cmd = calloc(1, sizeof(async_command_t)); crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id)); crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options)); crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; cmd->origin = crm_element_value_copy(msg, F_ORIG); cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID); cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID); cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME); cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION); cmd->action = strdup(action); cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET); cmd->mode = crm_element_value_copy(op, F_STONITH_MODE); cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE); CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL); CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient")); cmd->done_cb = st_child_done; cmd_list = g_list_append(cmd_list, cmd); return cmd; } static gboolean stonith_device_execute(stonith_device_t * device) { int exec_rc = 0; async_command_t *cmd = NULL; stonith_action_t *action = NULL; CRM_CHECK(device != NULL, return FALSE); if (device->active_pid) { crm_trace("%s is still active with pid %u", device->id, device->active_pid); return TRUE; } if (device->pending_ops) { GList *first = device->pending_ops; device->pending_ops = g_list_remove_link(device->pending_ops, first); cmd = first->data; g_list_free_1(first); } if (cmd == NULL) { crm_trace("Nothing further to do for %s", device->id); return TRUE; } #if SUPPORT_CIBSECRETS if (replace_secret_params(device->id, device->params) < 0) { /* replacing secrets failed! */ if (safe_str_eq(cmd->action,"stop")) { /* don't fail on stop! */ crm_info("proceeding with the stop operation for %s", device->id); } else { crm_err("failed to get secrets for %s, " "considering resource not configured", device->id); exec_rc = PCMK_OCF_NOT_CONFIGURED; cmd->done_cb(0, exec_rc, NULL, cmd); return TRUE; } } #endif action = stonith_action_create(device->agent, cmd->action, cmd->victim, cmd->victim_nodeid, cmd->timeout, device->params, device->aliases); /* for async exec, exec_rc is pid if positive and error code if negative/zero */ exec_rc = stonith_action_execute_async(action, (void *)cmd, cmd->done_cb); if (exec_rc > 0) { crm_debug("Operation %s%s%s on %s now running with pid=%d, timeout=%ds", cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "", device->id, exec_rc, cmd->timeout); device->active_pid = exec_rc; } else { crm_warn("Operation %s%s%s on %s failed: %s (%d)", cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "", device->id, pcmk_strerror(exec_rc), exec_rc); cmd->done_cb(0, exec_rc, NULL, cmd); } return TRUE; } static gboolean stonith_device_dispatch(gpointer user_data) { return stonith_device_execute(user_data); } static void schedule_stonith_command(async_command_t * cmd, stonith_device_t * device) { CRM_CHECK(cmd != NULL, return); CRM_CHECK(device != NULL, return); if (cmd->device) { free(cmd->device); } if (device->include_nodeid && cmd->victim) { crm_node_t *node = crm_get_peer(0, cmd->victim); cmd->victim_nodeid = node->id; } cmd->device = strdup(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); if (cmd->remote_op_id) { crm_debug("Scheduling %s on %s for remote peer %s with op id (%s) (timeout=%ds)", cmd->action, device->id, cmd->origin, cmd->remote_op_id, cmd->timeout); } else { crm_debug("Scheduling %s on %s for %s (timeout=%ds)", cmd->action, device->id, cmd->client, cmd->timeout); } device->pending_ops = g_list_append(device->pending_ops, cmd); mainloop_set_trigger(device->work); } void free_device(gpointer data) { GListPtr gIter = NULL; stonith_device_t *device = data; g_hash_table_destroy(device->params); g_hash_table_destroy(device->aliases); for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) { async_command_t *cmd = gIter->data; crm_warn("Removal of device '%s' purged operation %s", device->id, cmd->action); cmd->done_cb(0, -ENODEV, NULL, cmd); free_async_command(cmd); } g_list_free(device->pending_ops); g_list_free_full(device->targets, free); mainloop_destroy_trigger(device->work); free_xml(device->agent_metadata); free(device->namespace); free(device->on_target_actions); free(device->agent); free(device->id); free(device); } static GHashTable * build_port_aliases(const char *hostmap, GListPtr * targets) { char *name = NULL; int last = 0, lpc = 0, max = 0, added = 0; GHashTable *aliases = - g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); + g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, g_hash_destroy_str, g_hash_destroy_str); if (hostmap == NULL) { return aliases; } max = strlen(hostmap); for (; lpc <= max; lpc++) { switch (hostmap[lpc]) { /* Assignment chars */ case '=': case ':': if (lpc > last) { free(name); name = calloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; break; /* Delimeter chars */ /* case ',': Potentially used to specify multiple ports */ case 0: case ';': case ' ': case '\t': if (name) { char *value = NULL; value = calloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if (targets) { *targets = g_list_append(*targets, strdup(value)); } value = NULL; name = NULL; added++; } else if (lpc > last) { crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last); } last = lpc + 1; break; } if (hostmap[lpc] == 0) { break; } } if (added == 0) { crm_info("No host mappings detected in '%s'", hostmap); } free(name); return aliases; } static void parse_host_line(const char *line, int max, GListPtr * output) { int lpc = 0; int last = 0; if (max <= 0) { return; } /* Check for any complaints about additional parameters that the device doesn't understand */ if (strstr(line, "invalid") || strstr(line, "variable")) { crm_debug("Skipping: %s", line); return; } crm_trace("Processing %d bytes: [%s]", max, line); /* Skip initial whitespace */ for (lpc = 0; lpc <= max && isspace(line[lpc]); lpc++) { last = lpc + 1; } /* Now the actual content */ for (lpc = 0; lpc <= max; lpc++) { gboolean a_space = isspace(line[lpc]); if (a_space && lpc < max && isspace(line[lpc + 1])) { /* fast-forward to the end of the spaces */ } else if (a_space || line[lpc] == ',' || line[lpc] == 0) { int rc = 1; char *entry = NULL; if (lpc != last) { entry = calloc(1, 1 + lpc - last); rc = sscanf(line + last, "%[a-zA-Z0-9_-.]", entry); } if (entry == NULL) { /* Skip */ } else if (rc != 1) { crm_warn("Could not parse (%d %d): %s", last, lpc, line + last); } else if (safe_str_neq(entry, "on") && safe_str_neq(entry, "off")) { crm_trace("Adding '%s'", entry); *output = g_list_append(*output, entry); entry = NULL; } free(entry); last = lpc + 1; } } } static GListPtr parse_host_list(const char *hosts) { int lpc = 0; int max = 0; int last = 0; GListPtr output = NULL; if (hosts == NULL) { return output; } max = strlen(hosts); for (lpc = 0; lpc <= max; lpc++) { if (hosts[lpc] == '\n' || hosts[lpc] == 0) { char *line = NULL; int len = lpc - last; if(len > 1) { line = malloc(1 + len); } if(line) { snprintf(line, 1 + len, "%s", hosts + last); line[len] = 0; /* Because it might be '\n' */ parse_host_line(line, len, &output); free(line); } last = lpc + 1; } } crm_trace("Parsed %d entries from '%s'", g_list_length(output), hosts); return output; } static xmlNode * get_agent_metadata(const char *agent) { stonith_t *st = stonith_api_new(); xmlNode *xml = NULL; char *buffer = NULL; int rc = 0; rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); if (rc || !buffer) { crm_err("Could not retrieve metadata for fencing agent %s", agent); return NULL; } xml = string2xml(buffer); free(buffer); stonith_api_delete(st); return xml; } static gboolean is_nodeid_required(xmlNode * xml) { xmlXPathObjectPtr xpath = NULL; if (stand_alone) { return FALSE; } if (!xml) { return FALSE; } xpath = xpath_search(xml, "//parameter[@name='nodeid']"); if (numXpathResults(xpath) <= 0) { freeXpathObject(xpath); return FALSE; } freeXpathObject(xpath); return TRUE; } static char * get_on_target_actions(xmlNode * xml) { char *actions = NULL; xmlXPathObjectPtr xpath = NULL; int max = 0; int lpc = 0; if (!xml) { return NULL; } xpath = xpath_search(xml, "//action"); max = numXpathResults(xpath); if (max <= 0) { freeXpathObject(xpath); return NULL; } actions = calloc(1, 512); for (lpc = 0; lpc < max; lpc++) { const char *on_target = NULL; const char *action = NULL; xmlNode *match = getXpathResult(xpath, lpc); CRM_CHECK(match != NULL, continue); on_target = crm_element_value(match, "on_target"); action = crm_element_value(match, "name"); if (action && crm_is_true(on_target)) { if (strlen(actions)) { g_strlcat(actions, " ", 512); } g_strlcat(actions, action, 512); } } freeXpathObject(xpath); if (!strlen(actions)) { free(actions); actions = NULL; } return actions; } static stonith_device_t * build_device_from_xml(xmlNode * msg) { const char *value = NULL; xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR); stonith_device_t *device = NULL; device = calloc(1, sizeof(stonith_device_t)); device->id = crm_element_value_copy(dev, XML_ATTR_ID); device->agent = crm_element_value_copy(dev, "agent"); device->namespace = crm_element_value_copy(dev, "namespace"); device->params = xml2list(dev); value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST); if (value) { device->targets = parse_host_list(value); } value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP); device->aliases = build_port_aliases(value, &(device->targets)); device->agent_metadata = get_agent_metadata(device->agent); device->on_target_actions = get_on_target_actions(device->agent_metadata); value = g_hash_table_lookup(device->params, "nodeid"); if (!value) { device->include_nodeid = is_nodeid_required(device->agent_metadata); } if (device->on_target_actions) { crm_info("The fencing device '%s' requires actions (%s) to be executed on the target node", device->id, device->on_target_actions); } device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device); /* TODO: Hook up priority */ return device; } static const char * target_list_type(stonith_device_t * dev) { const char *check_type = NULL; check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK); if (check_type == NULL) { if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) { check_type = "static-list"; } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) { check_type = "static-list"; } else { check_type = "dynamic-list"; } } return check_type; } void schedule_internal_command(const char *origin, stonith_device_t * device, const char *action, const char *victim, int timeout, void *internal_user_data, void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data)) { async_command_t *cmd = NULL; cmd = calloc(1, sizeof(async_command_t)); cmd->id = -1; cmd->default_timeout = timeout ? timeout : 60; cmd->timeout = cmd->default_timeout; cmd->action = strdup(action); cmd->victim = victim ? strdup(victim) : NULL; cmd->device = strdup(device->id); cmd->origin = strdup(origin); cmd->client = strdup(crm_system_name); cmd->client_name = strdup(crm_system_name); cmd->internal_user_data = internal_user_data; cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */ schedule_stonith_command(cmd, device); } gboolean string_in_list(GListPtr list, const char *item) { int lpc = 0; int max = g_list_length(list); for (lpc = 0; lpc < max; lpc++) { const char *value = g_list_nth_data(list, lpc); if (safe_str_eq(item, value)) { return TRUE; } else { crm_trace("%d: '%s' != '%s'", lpc, item, value); } } return FALSE; } static void status_search_cb(GPid pid, int rc, const char *output, gpointer user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL; gboolean can = FALSE; free_async_command(cmd); if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } dev->active_pid = 0; mainloop_set_trigger(dev->work); if (rc == 1 /* unkown */ ) { crm_trace("Host %s is not known by %s", search->host, dev->id); } else if (rc == 0 /* active */ || rc == 2 /* inactive */ ) { can = TRUE; } else { crm_notice("Unkown result when testing if %s can fence %s: rc=%d", dev->id, search->host, rc); } search_devices_record_result(search, dev->id, can); } static void dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL; gboolean can_fence = FALSE; free_async_command(cmd); /* Host/alias must be in the list output to be eligable to be fenced * * Will cause problems if down'd nodes aren't listed or (for virtual nodes) * if the guest is still listed despite being moved to another machine */ if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } dev->active_pid = 0; mainloop_set_trigger(dev->work); /* If we successfully got the targets earlier, don't disable. */ if (rc != 0 && !dev->targets) { crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output); /* Fall back to status */ g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status")); g_list_free_full(dev->targets, free); dev->targets = NULL; } else if (!rc) { crm_info("Refreshing port list for %s", dev->id); g_list_free_full(dev->targets, free); dev->targets = parse_host_list(output); dev->targets_age = time(NULL); } if (dev->targets) { const char *alias = g_hash_table_lookup(dev->aliases, search->host); if (!alias) { alias = search->host; } if (string_in_list(dev->targets, alias)) { can_fence = TRUE; } } search_devices_record_result(search, dev->id, can_fence); } /*! * \internal * \brief Checks to see if an identical device already exists in the device_list */ static stonith_device_t * device_has_duplicate(stonith_device_t * device) { char *key = NULL; char *value = NULL; GHashTableIter gIter; stonith_device_t *dup = g_hash_table_lookup(device_list, device->id); if (!dup) { crm_trace("No match for %s", device->id); return NULL; } else if (safe_str_neq(dup->agent, device->agent)) { crm_trace("Different agent: %s != %s", dup->agent, device->agent); return NULL; } /* Use calculate_operation_digest() here? */ g_hash_table_iter_init(&gIter, device->params); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) { if(strstr(key, "CRM_meta") == key) { continue; } else if(strcmp(key, "crm_feature_set") == 0) { continue; } else { char *other_value = g_hash_table_lookup(dup->params, key); if (!other_value || safe_str_neq(other_value, value)) { crm_trace("Different value for %s: %s != %s", key, other_value, value); return NULL; } } } crm_trace("Match"); return dup; } int stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib) { stonith_device_t *dup = NULL; stonith_device_t *device = build_device_from_xml(msg); dup = device_has_duplicate(device); if (dup) { crm_notice("Device '%s' already existed in device list (%d active devices)", device->id, g_hash_table_size(device_list)); free_device(device); device = dup; } else { stonith_device_t *old = g_hash_table_lookup(device_list, device->id); if (from_cib && old && old->api_registered) { /* If the cib is writing over an entry that is shared with a stonith client, * copy any pending ops that currently exist on the old entry to the new one. * Otherwise the pending ops will be reported as failures */ crm_trace("Overwriting an existing entry for %s from the cib", device->id); device->pending_ops = old->pending_ops; device->api_registered = TRUE; old->pending_ops = NULL; if (device->pending_ops) { mainloop_set_trigger(device->work); } } g_hash_table_replace(device_list, device->id, device); crm_notice("Added '%s' to the device list (%d active devices)", device->id, g_hash_table_size(device_list)); } if (desc) { *desc = device->id; } if (from_cib) { device->cib_registered = TRUE; } else { device->api_registered = TRUE; } return pcmk_ok; } int stonith_device_remove(const char *id, gboolean from_cib) { stonith_device_t *device = g_hash_table_lookup(device_list, id); if (!device) { crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list)); return pcmk_ok; } if (from_cib) { device->cib_registered = FALSE; } else { device->verified = FALSE; device->api_registered = FALSE; } if (!device->cib_registered && !device->api_registered) { g_hash_table_remove(device_list, id); crm_info("Removed '%s' from the device list (%d active devices)", id, g_hash_table_size(device_list)); } return pcmk_ok; } static int count_active_levels(stonith_topology_t * tp) { int lpc = 0; int count = 0; for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) { if (tp->levels[lpc] != NULL) { count++; } } return count; } void free_topology_entry(gpointer data) { stonith_topology_t *tp = data; int lpc = 0; for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) { if (tp->levels[lpc] != NULL) { g_list_free_full(tp->levels[lpc], free); } } free(tp->node); free(tp); } int stonith_level_register(xmlNode * msg, char **desc) { int id = 0; int rc = pcmk_ok; xmlNode *child = NULL; xmlNode *level = get_xpath_object("//" F_STONITH_LEVEL, msg, LOG_ERR); const char *node = crm_element_value(level, F_STONITH_TARGET); stonith_topology_t *tp = g_hash_table_lookup(topology, node); crm_element_value_int(level, XML_ATTR_ID, &id); if (desc) { *desc = g_strdup_printf("%s[%d]", node, id); } if (id <= 0 || id >= ST_LEVEL_MAX) { return -EINVAL; } if (tp == NULL) { tp = calloc(1, sizeof(stonith_topology_t)); tp->node = strdup(node); g_hash_table_replace(topology, tp->node, tp); crm_trace("Added %s to the topology (%d active entries)", node, g_hash_table_size(topology)); } if (tp->levels[id] != NULL) { crm_info("Adding to the existing %s[%d] topology entry (%d active entries)", node, id, count_active_levels(tp)); } for (child = __xml_first_child(level); child != NULL; child = __xml_next(child)) { const char *device = ID(child); crm_trace("Adding device '%s' for %s (%d)", device, node, id); tp->levels[id] = g_list_append(tp->levels[id], strdup(device)); } crm_info("Node %s has %d active fencing levels", node, count_active_levels(tp)); return rc; } int stonith_level_remove(xmlNode * msg, char **desc) { int id = 0; xmlNode *level = get_xpath_object("//" F_STONITH_LEVEL, msg, LOG_ERR); const char *node = crm_element_value(level, F_STONITH_TARGET); stonith_topology_t *tp = g_hash_table_lookup(topology, node); if (desc) { *desc = g_strdup_printf("%s[%d]", node, id); } crm_element_value_int(level, XML_ATTR_ID, &id); if (tp == NULL) { crm_info("Node %s not found (%d active entries)", node, g_hash_table_size(topology)); return pcmk_ok; } else if (id < 0 || id >= ST_LEVEL_MAX) { return -EINVAL; } if (id == 0 && g_hash_table_remove(topology, node)) { crm_info("Removed all %s related entries from the topology (%d active entries)", node, g_hash_table_size(topology)); } else if (id > 0 && tp->levels[id] != NULL) { g_list_free_full(tp->levels[id], free); tp->levels[id] = NULL; crm_info("Removed entry '%d' from %s's topology (%d active entries remaining)", id, node, count_active_levels(tp)); } return pcmk_ok; } static int stonith_device_action(xmlNode * msg, char **output) { int rc = pcmk_ok; xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR); const char *id = crm_element_value(dev, F_STONITH_DEVICE); async_command_t *cmd = NULL; stonith_device_t *device = NULL; if (id) { crm_trace("Looking for '%s'", id); device = g_hash_table_lookup(device_list, id); } if (device) { cmd = create_async_command(msg); if (cmd == NULL) { free_device(device); return -EPROTO; } schedule_stonith_command(cmd, device); rc = -EINPROGRESS; } else { crm_info("Device %s not found", id ? id : ""); rc = -ENODEV; } return rc; } static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence) { search->replies_received++; if (can_fence && device) { search->capable = g_list_append(search->capable, strdup(device)); } if (search->replies_needed == search->replies_received) { crm_debug("Finished Search. %d devices can perform action (%s) on node %s", g_list_length(search->capable), search->action ? search->action : "", search->host ? search->host : ""); search->callback(search->capable, search->user_data); free(search->host); free(search->action); free(search); } } static void can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search) { gboolean can = FALSE; const char *check_type = NULL; const char *host = search->host; const char *alias = NULL; CRM_LOG_ASSERT(dev != NULL); if (dev == NULL) { goto search_report_results; } else if (host == NULL) { can = TRUE; goto search_report_results; } if (dev->on_target_actions && search->action && strstr(dev->on_target_actions, search->action) && safe_str_neq(host, stonith_our_uname)) { /* this device can only execute this action on the target node */ goto search_report_results; } alias = g_hash_table_lookup(dev->aliases, host); if (alias == NULL) { alias = host; } check_type = target_list_type(dev); if (safe_str_eq(check_type, "none")) { can = TRUE; } else if (safe_str_eq(check_type, "static-list")) { /* Presence in the hostmap is sufficient * Only use if all hosts on which the device can be active can always fence all listed hosts */ if (string_in_list(dev->targets, host)) { can = TRUE; } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP) && g_hash_table_lookup(dev->aliases, host)) { can = TRUE; } } else if (safe_str_eq(check_type, "dynamic-list")) { time_t now = time(NULL); if (dev->targets == NULL || dev->targets_age + 60 < now) { schedule_internal_command(__FUNCTION__, dev, "list", NULL, search->per_device_timeout, search, dynamic_list_search_cb); /* we'll respond to this search request async in the cb */ return; } if (string_in_list(dev->targets, alias)) { can = TRUE; } } else if (safe_str_eq(check_type, "status")) { schedule_internal_command(__FUNCTION__, dev, "status", search->host, search->per_device_timeout, search, status_search_cb); /* we'll respond to this search request async in the cb */ return; } else { crm_err("Unknown check type: %s", check_type); } if (safe_str_eq(host, alias)) { crm_notice("%s can%s fence %s: %s", dev->id, can ? "" : " not", host, check_type); } else { crm_notice("%s can%s fence %s (aka. '%s'): %s", dev->id, can ? "" : " not", host, alias, check_type); } search_report_results: search_devices_record_result(search, dev ? dev->id : NULL, can); } static void search_devices(gpointer key, gpointer value, gpointer user_data) { stonith_device_t *dev = value; struct device_search_s *search = user_data; can_fence_host_with_device(dev, search); } #define DEFAULT_QUERY_TIMEOUT 20 static void get_capable_devices(const char *host, const char *action, int timeout, void *user_data, void (*callback) (GList * devices, void *user_data)) { struct device_search_s *search; int per_device_timeout = DEFAULT_QUERY_TIMEOUT; int devices_needing_async_query = 0; char *key = NULL; const char *check_type = NULL; GHashTableIter gIter; stonith_device_t *device = NULL; if (!g_hash_table_size(device_list)) { callback(NULL, user_data); return; } search = calloc(1, sizeof(struct device_search_s)); if (!search) { callback(NULL, user_data); return; } g_hash_table_iter_init(&gIter, device_list); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&device)) { check_type = target_list_type(device); if (safe_str_eq(check_type, "status") || safe_str_eq(check_type, "dynamic-list")) { devices_needing_async_query++; } } /* If we have devices that require an async event in order to know what * nodes they can fence, we have to give the events a timeout. The total * query timeout is divided among those events. */ if (devices_needing_async_query) { per_device_timeout = timeout / devices_needing_async_query; if (!per_device_timeout) { crm_err("stonith-timeout duration %d is too low, raise the duration to %d seconds", timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query); per_device_timeout = DEFAULT_QUERY_TIMEOUT; } else if (per_device_timeout < DEFAULT_QUERY_TIMEOUT) { crm_notice ("stonith-timeout duration %d is low for the current configuration. Consider raising it to %d seconds", timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query); } } search->host = host ? strdup(host) : NULL; search->action = action ? strdup(action) : NULL; search->per_device_timeout = per_device_timeout; /* We are guaranteed this many replies. Even if a device gets * unregistered some how during the async search, we will get * the correct number of replies. */ search->replies_needed = g_hash_table_size(device_list); search->callback = callback; search->user_data = user_data; /* kick off the search */ crm_debug("Searching through %d devices to see what is capable of action (%s) for target %s", search->replies_needed, search->action ? search->action : "", search->host ? search->host : ""); g_hash_table_foreach(device_list, search_devices, search); } struct st_query_data { xmlNode *reply; char *remote_peer; char *client_id; char *target; char *action; int call_options; }; static void stonith_query_capable_device_cb(GList * devices, void *user_data) { struct st_query_data *query = user_data; int available_devices = 0; xmlNode *dev = NULL; xmlNode *list = NULL; GListPtr lpc = NULL; /* Pack the results into data */ list = create_xml_node(NULL, __FUNCTION__); crm_xml_add(list, F_STONITH_TARGET, query->target); for (lpc = devices; lpc != NULL; lpc = lpc->next) { stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data); int action_specific_timeout; if (!device) { /* It is possible the device got unregistered while * determining who can fence the target */ continue; } available_devices++; action_specific_timeout = get_action_timeout(device, query->action, 0); dev = create_xml_node(list, F_STONITH_DEVICE); crm_xml_add(dev, XML_ATTR_ID, device->id); crm_xml_add(dev, "namespace", device->namespace); crm_xml_add(dev, "agent", device->agent); crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified); if (action_specific_timeout) { crm_xml_add_int(dev, F_STONITH_ACTION_TIMEOUT, action_specific_timeout); } if (query->target == NULL) { xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS); g_hash_table_foreach(device->params, hash2field, attrs); } } crm_xml_add_int(list, "st-available-devices", available_devices); if (query->target) { crm_debug("Found %d matching devices for '%s'", available_devices, query->target); } else { crm_debug("%d devices installed", available_devices); } if (list != NULL) { crm_trace("Attaching query list output"); add_message_xml(query->reply, F_STONITH_CALLDATA, list); } stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id); free_xml(query->reply); free(query->remote_peer); free(query->client_id); free(query->target); free(query->action); free(query); free_xml(list); g_list_free_full(devices, free); } static void stonith_query(xmlNode * msg, const char *remote_peer, const char *client_id, int call_options) { struct st_query_data *query = NULL; const char *action = NULL; const char *target = NULL; int timeout = 0; xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_DEBUG_3); crm_element_value_int(msg, F_STONITH_TIMEOUT, &timeout); if (dev) { const char *device = crm_element_value(dev, F_STONITH_DEVICE); target = crm_element_value(dev, F_STONITH_TARGET); action = crm_element_value(dev, F_STONITH_ACTION); if (device && safe_str_eq(device, "manual_ack")) { /* No query or reply necessary */ return; } } crm_log_xml_debug(msg, "Query"); query = calloc(1, sizeof(struct st_query_data)); query->reply = stonith_construct_reply(msg, NULL, NULL, pcmk_ok); query->remote_peer = remote_peer ? strdup(remote_peer) : NULL; query->client_id = client_id ? strdup(client_id) : NULL; query->target = target ? strdup(target) : NULL; query->action = action ? strdup(action) : NULL; query->call_options = call_options; get_capable_devices(target, action, timeout, query, stonith_query_capable_device_cb); } #define ST_LOG_OUTPUT_MAX 512 static void log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output) { if (rc == 0) { next = NULL; } if (cmd->victim != NULL) { do_crm_log(rc == 0 ? LOG_NOTICE : LOG_ERR, "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned: %d (%s)%s%s", cmd->action, pid, cmd->id, cmd->client_name, cmd->victim, cmd->device, rc, pcmk_strerror(rc), next ? ". Trying: " : "", next ? next : ""); } else { do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, "Operation '%s' [%d] for device '%s' returned: %d (%s)%s%s", cmd->action, pid, cmd->device, rc, pcmk_strerror(rc), next ? ". Trying: " : "", next ? next : ""); } if (output) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = g_strdup_printf("%s:%d", cmd->device, pid); crm_log_output(rc == 0 ? LOG_DEBUG : LOG_WARNING, prefix, output); g_free(prefix); } } static void stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid) { xmlNode *reply = NULL; gboolean bcast = FALSE; reply = stonith_construct_async_reply(cmd, output, NULL, rc); if (safe_str_eq(cmd->action, "metadata")) { /* Too verbose to log */ crm_trace("Metadata query for %s", cmd->device); output = NULL; } else if (crm_str_eq(cmd->action, "monitor", TRUE) || crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) { crm_trace("Never broadcast %s replies", cmd->action); } else if (!stand_alone && safe_str_eq(cmd->origin, cmd->victim) && safe_str_neq(cmd->action, "on")) { crm_trace("Broadcast %s reply for %s", cmd->action, cmd->victim); crm_xml_add(reply, F_SUBTYPE, "broadcast"); bcast = TRUE; } log_operation(cmd, rc, pid, NULL, output); crm_log_xml_trace(reply, "Reply"); if (bcast) { crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY); send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE); } else if (cmd->origin) { crm_trace("Directed reply to %s", cmd->origin); send_cluster_message(crm_get_peer(0, cmd->origin), crm_msg_stonith_ng, reply, FALSE); } else { crm_trace("Directed local %ssync reply to %s", (cmd->options & st_opt_sync_call) ? "" : "a-", cmd->client_name); do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE); } if (stand_alone) { /* Do notification with a clean data object */ xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE); crm_xml_add_int(notify_data, F_STONITH_RC, rc); crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim); crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op); crm_xml_add(notify_data, F_STONITH_DELEGATE, cmd->device); crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id); crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client); do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data); } free_xml(reply); } void unfence_cb(GPid pid, int rc, const char *output, gpointer user_data) { async_command_t * cmd = user_data; stonith_device_t *dev = g_hash_table_lookup(device_list, cmd->device); log_operation(cmd, rc, pid, NULL, output); if(dev) { dev->active_pid = 0; mainloop_set_trigger(dev->work); } else { crm_trace("Device %s does not exist", cmd->device); } if(rc != 0) { crm_exit(DAEMON_RESPAWN_STOP); } } static void cancel_stonith_command(async_command_t * cmd) { stonith_device_t *device; CRM_CHECK(cmd != NULL, return); if (!cmd->device) { return; } device = g_hash_table_lookup(device_list, cmd->device); if (device) { crm_trace("Cancel scheduled %s on %s", cmd->action, device->id); device->pending_ops = g_list_remove(device->pending_ops, cmd); } } #define READ_MAX 500 static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data) { stonith_device_t *device = NULL; async_command_t *cmd = user_data; GListPtr gIter = NULL; GListPtr gIterNext = NULL; CRM_CHECK(cmd != NULL, return); active_children--; /* The device is ready to do something else now */ device = g_hash_table_lookup(device_list, cmd->device); if (device) { device->active_pid = 0; if (rc == pcmk_ok && (safe_str_eq(cmd->action, "list") || safe_str_eq(cmd->action, "monitor") || safe_str_eq(cmd->action, "status"))) { device->verified = TRUE; } mainloop_set_trigger(device->work); } crm_trace("Operation %s on %s completed with rc=%d (%d remaining)", cmd->action, cmd->device, rc, g_list_length(cmd->device_next)); if (rc != 0 && cmd->device_next) { stonith_device_t *dev = g_hash_table_lookup(device_list, cmd->device_next->data); if (dev) { log_operation(cmd, rc, pid, dev->id, output); cmd->device_next = cmd->device_next->next; schedule_stonith_command(cmd, dev); /* Prevent cmd from being freed */ cmd = NULL; goto done; } } if (rc > 0) { /* Try to provide _something_ useful */ if(output == NULL) { rc = -ENODATA; } else if(strstr(output, "imed out")) { rc = -ETIMEDOUT; } else if(strstr(output, "Unrecognised action")) { rc = -EOPNOTSUPP; } else { rc = -pcmk_err_generic; } } stonith_send_async_reply(cmd, output, rc, pid); if (rc != 0) { goto done; } /* Check to see if any operations are scheduled to do the exact * same thing that just completed. If so, rather than * performing the same fencing operation twice, return the result * of this operation for all pending commands it matches. */ for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd_other = gIter->data; gIterNext = gIter->next; if (cmd == cmd_other) { continue; } /* A pending scheduled command matches the command that just finished if. * 1. The client connections are different. * 2. The node victim is the same. * 3. The fencing action is the same. * 4. The device scheduled to execute the action is the same. */ if (safe_str_eq(cmd->client, cmd_other->client) || safe_str_neq(cmd->victim, cmd_other->victim) || safe_str_neq(cmd->action, cmd_other->action) || safe_str_neq(cmd->device, cmd_other->device)) { continue; } crm_notice ("Merging stonith action %s for node %s originating from client %s with identical stonith request from client %s", cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name); cmd_list = g_list_remove_link(cmd_list, gIter); stonith_send_async_reply(cmd_other, output, rc, pid); cancel_stonith_command(cmd_other); free_async_command(cmd_other); g_list_free_1(gIter); } done: free_async_command(cmd); } static gint sort_device_priority(gconstpointer a, gconstpointer b) { const stonith_device_t *dev_a = a; const stonith_device_t *dev_b = b; if (dev_a->priority > dev_b->priority) { return -1; } else if (dev_a->priority < dev_b->priority) { return 1; } return 0; } static void stonith_fence_get_devices_cb(GList * devices, void *user_data) { async_command_t *cmd = user_data; stonith_device_t *device = NULL; crm_info("Found %d matching devices for '%s'", g_list_length(devices), cmd->victim); if (g_list_length(devices) > 0) { /* Order based on priority */ devices = g_list_sort(devices, sort_device_priority); device = g_hash_table_lookup(device_list, devices->data); if (device) { cmd->device_list = devices; cmd->device_next = devices->next; devices = NULL; /* list owned by cmd now */ } } /* we have a device, schedule it for fencing. */ if (device) { schedule_stonith_command(cmd, device); /* in progress */ return; } /* no device found! */ stonith_send_async_reply(cmd, NULL, -ENODEV, 0); free_async_command(cmd); g_list_free_full(devices, free); } static int stonith_fence(xmlNode * msg) { const char *device_id = NULL; stonith_device_t *device = NULL; async_command_t *cmd = create_async_command(msg); xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR); if (cmd == NULL) { return -EPROTO; } device_id = crm_element_value(dev, F_STONITH_DEVICE); if (device_id) { device = g_hash_table_lookup(device_list, device_id); if (device == NULL) { crm_err("Requested device '%s' is not available", device_id); return -ENODEV; } schedule_stonith_command(cmd, device); } else { const char *host = crm_element_value(dev, F_STONITH_TARGET); if (cmd->options & st_opt_cs_nodeid) { int nodeid = crm_atoi(host, NULL); crm_node_t *node = crm_get_peer(nodeid, NULL); if (node) { host = node->uname; } } get_capable_devices(host, cmd->action, cmd->default_timeout, cmd, stonith_fence_get_devices_cb); } return -EINPROGRESS; } xmlNode * stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc) { int lpc = 0; xmlNode *reply = NULL; const char *name = NULL; const char *value = NULL; const char *names[] = { F_STONITH_OPERATION, F_STONITH_CALLID, F_STONITH_CLIENTID, F_STONITH_CLIENTNAME, F_STONITH_REMOTE_OP_ID, F_STONITH_CALLOPTS }; crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); crm_xml_add(reply, F_TYPE, T_STONITH_NG); crm_xml_add(reply, "st_output", output); crm_xml_add_int(reply, F_STONITH_RC, rc); CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply); for (lpc = 0; lpc < DIMOF(names); lpc++) { name = names[lpc]; value = crm_element_value(request, name); crm_xml_add(reply, name, value); } if (data != NULL) { crm_trace("Attaching reply output"); add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } static xmlNode * stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc) { xmlNode *reply = NULL; crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); crm_xml_add(reply, F_TYPE, T_STONITH_NG); crm_xml_add(reply, F_STONITH_OPERATION, cmd->op); crm_xml_add(reply, F_STONITH_DEVICE, cmd->device); crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id); crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client); crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name); crm_xml_add(reply, F_STONITH_TARGET, cmd->victim); crm_xml_add(reply, F_STONITH_ACTION, cmd->op); crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin); crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id); crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options); crm_xml_add_int(reply, F_STONITH_RC, rc); crm_xml_add(reply, "st_output", output); if (data != NULL) { crm_info("Attaching reply output"); add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } bool fencing_peer_active(crm_node_t *peer) { if (peer == NULL) { return FALSE; } else if (peer->uname == NULL) { return FALSE; } else if(peer->processes & (crm_proc_plugin | crm_proc_heartbeat | crm_proc_cpg)) { return TRUE; } return FALSE; } /*! * \internal * \brief Determine if we need to use an alternate node to * fence the target. If so return that node's uname * * \retval NULL, no alternate host * \retval uname, uname of alternate host to use */ static const char * check_alternate_host(const char *target) { const char *alternate_host = NULL; if (g_hash_table_lookup(topology, target) && safe_str_eq(target, stonith_our_uname)) { GHashTableIter gIter; crm_node_t *entry = NULL; g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target); if (fencing_peer_active(entry) && safe_str_neq(entry->uname, target)) { alternate_host = entry->uname; break; } } if (alternate_host == NULL) { crm_err("No alternate host available to handle complex self fencing request"); g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { crm_notice("Peer[%d] %s", entry->id, entry->uname); } } } return alternate_host; } static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer, const char *client_id) { if (remote_peer) { send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, reply, FALSE); } else { do_local_reply(reply, client_id, is_set(call_options, st_opt_sync_call), remote_peer != NULL); } } static int handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request, const char *remote_peer) { int call_options = 0; int rc = -EOPNOTSUPP; xmlNode *data = NULL; xmlNode *reply = NULL; char *output = NULL; const char *op = crm_element_value(request, F_STONITH_OPERATION); const char *client_id = crm_element_value(request, F_STONITH_CLIENTID); crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { xmlNode *reply = create_xml_node(NULL, "reply"); CRM_ASSERT(client); crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(reply, F_STONITH_CLIENTID, client->id); crm_ipcs_send(client, id, reply, flags); client->request_id = 0; free_xml(reply); return 0; } else if (crm_str_eq(op, STONITH_OP_EXEC, TRUE)) { rc = stonith_device_action(request, &output); } else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) { const char *call_id = crm_element_value(request, F_STONITH_CALLID); const char *client_id = crm_element_value(request, F_STONITH_CLIENTID); int op_timeout = 0; crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); return 0; } else if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) { if (remote_peer) { create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */ } stonith_query(request, remote_peer, client_id, call_options); return 0; } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) { const char *flag_name = NULL; CRM_ASSERT(client); flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE); if (flag_name) { crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id); client->options |= get_stonith_flag(flag_name); } flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE); if (flag_name) { crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id); client->options |= get_stonith_flag(flag_name); } if (flags & crm_ipc_client_response) { crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__); } return 0; } else if (crm_str_eq(op, STONITH_OP_RELAY, TRUE)) { xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); crm_notice("Peer %s has received a forwarded fencing request from %s to fence (%s) peer %s", stonith_our_uname, client ? client->name : remote_peer, crm_element_value(dev, F_STONITH_ACTION), crm_element_value(dev, F_STONITH_TARGET)); if (initiate_remote_stonith_op(NULL, request, FALSE) != NULL) { rc = -EINPROGRESS; } } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { if (remote_peer || stand_alone) { rc = stonith_fence(request); } else if (call_options & st_opt_manual_ack) { remote_fencing_op_t *rop = NULL; xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); const char *target = crm_element_value(dev, F_STONITH_TARGET); crm_notice("Received manual confirmation that %s is fenced", target); rop = initiate_remote_stonith_op(client, request, TRUE); rc = stonith_manual_ack(request, rop); } else { const char *alternate_host = NULL; xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); const char *target = crm_element_value(dev, F_STONITH_TARGET); const char *action = crm_element_value(dev, F_STONITH_ACTION); const char *device = crm_element_value(dev, F_STONITH_DEVICE); if (client) { int tolerance = 0; crm_notice("Client %s.%.8s wants to fence (%s) '%s' with device '%s'", client->name, client->id, action, target, device ? device : "(any)"); crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance); if (stonith_check_fence_tolerance(tolerance, target, action)) { rc = 0; goto done; } } else { crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'", remote_peer, action, target, device ? device : "(any)"); } alternate_host = check_alternate_host(target); if (alternate_host && client) { const char *client_id = NULL; crm_notice("Forwarding complex self fencing request to peer %s", alternate_host); if (client) { client_id = client->id; } else { client_id = crm_element_value(request, F_STONITH_CLIENTID); } /* Create a record of it, otherwise call_id will be 0 if we need to notify of failures */ create_remote_stonith_op(client_id, request, FALSE); crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY); crm_xml_add(request, F_STONITH_CLIENTID, client->id); send_cluster_message(crm_get_peer(0, alternate_host), crm_msg_stonith_ng, request, FALSE); rc = -EINPROGRESS; } else if (initiate_remote_stonith_op(client, request, FALSE) != NULL) { rc = -EINPROGRESS; } } } else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) { rc = stonith_fence_history(request, &data); } else if (crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) { const char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_device_register(request, &id, FALSE); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list)); do_stonith_notify(call_options, op, rc, notify_data); free_xml(notify_data); } else if (crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) { xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request, LOG_ERR); const char *id = crm_element_value(dev, XML_ATTR_ID); xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_device_remove(id, FALSE); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list)); do_stonith_notify(call_options, op, rc, notify_data); free_xml(notify_data); } else if (crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) { char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_level_register(request, &id); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology)); do_stonith_notify(call_options, op, rc, notify_data); free_xml(notify_data); } else if (crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) { char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_level_remove(request, &id); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology)); do_stonith_notify(call_options, op, rc, notify_data); free_xml(notify_data); } else if (crm_str_eq(op, STONITH_OP_CONFIRM, TRUE)) { async_command_t *cmd = create_async_command(request); xmlNode *reply = stonith_construct_async_reply(cmd, NULL, NULL, 0); crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY); crm_notice("Broadcasting manual fencing confirmation for node %s", cmd->victim); send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE); free_async_command(cmd); free_xml(reply); } else { crm_err("Unknown %s from %s", op, client ? client->name : remote_peer); crm_log_xml_warn(request, "UnknownOp"); } done: /* Always reply unles the request is in process still. * If in progress, a reply will happen async after the request * processing is finished */ if (rc != -EINPROGRESS) { crm_trace("Reply handling: %p %u %u %d %d %s", client, client?client->request_id:0, id, is_set(call_options, st_opt_sync_call), call_options, crm_element_value(request, F_STONITH_CALLOPTS)); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } reply = stonith_construct_reply(request, output, data, rc); stonith_send_reply(reply, call_options, remote_peer, client_id); } free(output); free_xml(data); free_xml(reply); return rc; } static void handle_reply(crm_client_t * client, xmlNode * request, const char *remote_peer) { const char *op = crm_element_value(request, F_STONITH_OPERATION); if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) { process_remote_stonith_query(request); } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) { process_remote_stonith_exec(request); } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { /* Reply to a complex fencing op */ process_remote_stonith_exec(request); } else { crm_err("Unknown %s reply from %s", op, client ? client->name : remote_peer); crm_log_xml_warn(request, "UnknownOp"); } } void stonith_command(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request, const char *remote_peer) { int call_options = 0; int rc = 0; gboolean is_reply = FALSE; char *op = crm_element_value_copy(request, F_STONITH_OPERATION); /* F_STONITH_OPERATION can be overwritten in remote_op_done() with crm_xml_add() * * by 0x4C2E934: crm_xml_add (xml.c:377) * by 0x40C5E9: remote_op_done (remote.c:178) * by 0x40F1D3: process_remote_stonith_exec (remote.c:1084) * by 0x40AD4F: stonith_command (commands.c:1891) * */ if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_DEBUG_3)) { is_reply = TRUE; } crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); crm_debug("Processing %s%s %u from %s (%16x)", op, is_reply ? " reply" : "", id, client ? client->name : remote_peer, call_options); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } if (is_reply) { handle_reply(client, request, remote_peer); } else { rc = handle_request(client, id, flags, request, remote_peer); } crm_debug("Processed %s%s from %s: %s (%d)", op, is_reply ? " reply" : "", client ? client->name : remote_peer, rc > 0 ? "" : pcmk_strerror(rc), rc); free(op); } diff --git a/include/crm/common/util.h b/include/crm/common/util.h index 859c8b4644..47f37bbf75 100644 --- a/include/crm/common/util.h +++ b/include/crm/common/util.h @@ -1,127 +1,136 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_COMMON_UTIL__H # define CRM_COMMON_UTIL__H /** * \file * \brief Utility functions * \ingroup core */ # include # include # include # include # include # include # if SUPPORT_HEARTBEAT # include # else # define NORMALNODE "normal" # define ACTIVESTATUS "active"/* fully functional, and all links are up */ # define DEADSTATUS "dead" /* Status of non-working link or machine */ # define PINGSTATUS "ping" /* Status of a working ping node */ # define JOINSTATUS "join" /* Status when an api client joins */ # define LEAVESTATUS "leave" /* Status when an api client leaves */ # define ONLINESTATUS "online"/* Status of an online client */ # define OFFLINESTATUS "offline" /* Status of an offline client */ # endif char *crm_itoa_stack(int an_int, char *buf, size_t len); char *crm_itoa(int an_int); gboolean crm_is_true(const char *s); int crm_str_to_boolean(const char *s, int *ret); int crm_parse_int(const char *text, const char *default_text); long long crm_get_msec(const char *input); unsigned long long crm_get_interval(const char *input); int char2score(const char *score); char *score2char(int score); char *score2char_stack(int score, char *buf, size_t len); int compare_version(const char *version1, const char *version2); gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, int *interval); gboolean decode_transition_key(const char *key, char **uuid, int *action, int *transition_id, int *target_rc); gboolean decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc); char * crm_strip_trailing_newline(char *str); # define safe_str_eq(a, b) crm_str_eq(a, b, FALSE) + gboolean crm_str_eq(const char *a, const char *b, gboolean use_case); + +/* used with hash tables where case does not matter */ +static inline gboolean +crm_strcase_equal(gconstpointer a, gconstpointer b) +{ + return crm_str_eq((const char *) a, (const char *) b, FALSE); +} + gboolean safe_str_neq(const char *a, const char *b); # define crm_atoi(text, default_text) crm_parse_int(text, default_text) /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *condition, gboolean do_core, gboolean do_fork); static inline gboolean is_not_set(long long word, long long bit) { return ((word & bit) == 0); } static inline gboolean is_set(long long word, long long bit) { return ((word & bit) == bit); } static inline gboolean is_set_any(long long word, long long bit) { return ((word & bit) != 0); } static inline guint crm_hash_table_size(GHashTable * hashtable) { if (hashtable == NULL) { return 0; } return g_hash_table_size(hashtable); } char *crm_meta_name(const char *field); const char *crm_meta_value(GHashTable * hash, const char *field); int rsc_op_expected_rc(lrmd_event_data_t * event); gboolean did_rsc_op_fail(lrmd_event_data_t * event, int target_rc); char *crm_md5sum(const char *buffer); char *crm_generate_uuid(void); void crm_build_path(const char *path_c, mode_t mode); int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid); int crm_exit(int rc); bool pcmk_acl_required(const char *user); #endif diff --git a/include/crm/crm.h b/include/crm/crm.h index b763cc0659..78118f46f9 100644 --- a/include/crm/crm.h +++ b/include/crm/crm.h @@ -1,201 +1,202 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM__H # define CRM__H /** * \file * \brief A dumping ground * \ingroup core */ # include # include # include # include # undef MIN # undef MAX # include # include # define CRM_FEATURE_SET "3.0.9" # define MINIMUM_SCHEMA_VERSION "pacemaker-1.0" # define LATEST_SCHEMA_VERSION "pacemaker-"CRM_DTD_VERSION # define EOS '\0' # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) # ifndef MAX_NAME # define MAX_NAME 256 # endif # ifndef __GNUC__ # define __builtin_expect(expr, result) (expr) # endif /* Some handy macros used by the Linux kernel */ # define __likely(expr) __builtin_expect(expr, 1) # define __unlikely(expr) __builtin_expect(expr, 0) # define CRM_META "CRM_meta" extern char *crm_system_name; /* *INDENT-OFF* */ /* Clean these up at some point, some probably should be runtime options */ # define SOCKET_LEN 1024 # define APPNAME_LEN 256 # define MAX_IPC_FAIL 5 # define MAX_IPC_DELAY 120 # define DAEMON_RESPAWN_STOP 100 # define MSG_LOG 1 # define DOT_FSA_ACTIONS 1 # define DOT_ALL_FSA_INPUTS 1 /* #define FSA_TRACE 1 */ # define INFINITY_S "INFINITY" # define MINUS_INFINITY_S "-INFINITY" # define INFINITY 1000000 /* Sub-systems */ # define CRM_SYSTEM_DC "dc" # define CRM_SYSTEM_DCIB "dcib" /* The master CIB */ # define CRM_SYSTEM_CIB "cib" # define CRM_SYSTEM_CRMD "crmd" # define CRM_SYSTEM_LRMD "lrmd" # define CRM_SYSTEM_PENGINE "pengine" # define CRM_SYSTEM_TENGINE "tengine" # define CRM_SYSTEM_STONITHD "stonithd" # define CRM_SYSTEM_MCP "pacemakerd" /* Valid operations */ # define CRM_OP_NOOP "noop" # define CRM_OP_JOIN_ANNOUNCE "join_announce" # define CRM_OP_JOIN_OFFER "join_offer" # define CRM_OP_JOIN_REQUEST "join_request" # define CRM_OP_JOIN_ACKNAK "join_ack_nack" # define CRM_OP_JOIN_CONFIRM "join_confirm" # define CRM_OP_DIE "die_no_respawn" # define CRM_OP_RETRIVE_CIB "retrieve_cib" # define CRM_OP_PING "ping" # define CRM_OP_THROTTLE "throttle" # define CRM_OP_VOTE "vote" # define CRM_OP_NOVOTE "no-vote" # define CRM_OP_HELLO "hello" # define CRM_OP_HBEAT "dc_beat" # define CRM_OP_PECALC "pe_calc" # define CRM_OP_ABORT "abort" # define CRM_OP_QUIT "quit" # define CRM_OP_LOCAL_SHUTDOWN "start_shutdown" # define CRM_OP_SHUTDOWN_REQ "req_shutdown" # define CRM_OP_SHUTDOWN "do_shutdown" # define CRM_OP_FENCE "stonith" # define CRM_OP_EVENTCC "event_cc" # define CRM_OP_TEABORT "te_abort" # define CRM_OP_TEABORTED "te_abort_confirmed" /* we asked */ # define CRM_OP_TE_HALT "te_halt" # define CRM_OP_TECOMPLETE "te_complete" # define CRM_OP_TETIMEOUT "te_timeout" # define CRM_OP_TRANSITION "transition" # define CRM_OP_REGISTER "register" # define CRM_OP_IPC_FWD "ipc_fwd" # define CRM_OP_DEBUG_UP "debug_inc" # define CRM_OP_DEBUG_DOWN "debug_dec" # define CRM_OP_INVOKE_LRM "lrm_invoke" # define CRM_OP_LRM_REFRESH "lrm_refresh" /* Deprecated */ # define CRM_OP_LRM_QUERY "lrm_query" # define CRM_OP_LRM_DELETE "lrm_delete" # define CRM_OP_LRM_FAIL "lrm_fail" # define CRM_OP_PROBED "probe_complete" # define CRM_OP_NODES_PROBED "probe_nodes_complete" # define CRM_OP_REPROBE "probe_again" # define CRM_OP_CLEAR_FAILCOUNT "clear_failcount" # define CRM_OP_RELAXED_SET "one-or-more" # define CRM_OP_RM_NODE_CACHE "rm_node_cache" # define CRMD_JOINSTATE_DOWN "down" # define CRMD_JOINSTATE_PENDING "pending" # define CRMD_JOINSTATE_MEMBER "member" # define CRMD_JOINSTATE_NACK "banned" # define CRMD_ACTION_DELETE "delete" # define CRMD_ACTION_CANCEL "cancel" # define CRMD_ACTION_MIGRATE "migrate_to" # define CRMD_ACTION_MIGRATED "migrate_from" # define CRMD_ACTION_START "start" # define CRMD_ACTION_STARTED "running" # define CRMD_ACTION_STOP "stop" # define CRMD_ACTION_STOPPED "stopped" # define CRMD_ACTION_PROMOTE "promote" # define CRMD_ACTION_PROMOTED "promoted" # define CRMD_ACTION_DEMOTE "demote" # define CRMD_ACTION_DEMOTED "demoted" # define CRMD_ACTION_NOTIFY "notify" # define CRMD_ACTION_NOTIFIED "notified" # define CRMD_ACTION_STATUS "monitor" /* short names */ # define RSC_DELETE CRMD_ACTION_DELETE # define RSC_CANCEL CRMD_ACTION_CANCEL # define RSC_MIGRATE CRMD_ACTION_MIGRATE # define RSC_MIGRATED CRMD_ACTION_MIGRATED # define RSC_START CRMD_ACTION_START # define RSC_STARTED CRMD_ACTION_STARTED # define RSC_STOP CRMD_ACTION_STOP # define RSC_STOPPED CRMD_ACTION_STOPPED # define RSC_PROMOTE CRMD_ACTION_PROMOTE # define RSC_PROMOTED CRMD_ACTION_PROMOTED # define RSC_DEMOTE CRMD_ACTION_DEMOTE # define RSC_DEMOTED CRMD_ACTION_DEMOTED # define RSC_NOTIFY CRMD_ACTION_NOTIFY # define RSC_NOTIFIED CRMD_ACTION_NOTIFIED # define RSC_STATUS CRMD_ACTION_STATUS /* *INDENT-ON* */ typedef GList *GListPtr; # include # include # include # define crm_str_hash g_str_hash_traditional +guint crm_strcase_hash(gconstpointer v); guint g_str_hash_traditional(gconstpointer v); #endif diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c index d2bf780953..0981e4551b 100644 --- a/lib/cluster/membership.c +++ b/lib/cluster/membership.c @@ -1,674 +1,674 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include GHashTable *crm_peer_cache = NULL; GHashTable *crm_remote_peer_cache = NULL; unsigned long long crm_peer_seq = 0; gboolean crm_have_quorum = FALSE; int crm_remote_peer_cache_size(void) { if (crm_remote_peer_cache == NULL) { return 0; } return g_hash_table_size(crm_remote_peer_cache); } void crm_remote_peer_cache_add(const char *node_name) { crm_node_t *node = g_hash_table_lookup(crm_remote_peer_cache, node_name); if (node == NULL) { crm_trace("added %s to remote cache", node_name); node = calloc(1, sizeof(crm_node_t)); node->flags = crm_remote_node; CRM_ASSERT(node); node->uname = strdup(node_name); node->uuid = strdup(node_name); node->state = strdup(CRM_NODE_MEMBER); g_hash_table_replace(crm_remote_peer_cache, node->uname, node); } } void crm_remote_peer_cache_remove(const char *node_name) { g_hash_table_remove(crm_remote_peer_cache, node_name); } static void remote_cache_refresh_helper(xmlNode *cib, const char *xpath, const char *field, int flags) { const char *remote = NULL; crm_node_t *node = NULL; xmlXPathObjectPtr xpathObj = NULL; int max = 0; int lpc = 0; xpathObj = xpath_search(cib, xpath); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *xml = getXpathResult(xpathObj, lpc); CRM_CHECK(xml != NULL, continue); remote = crm_element_value(xml, field); if (remote) { crm_trace("added %s to remote cache", remote); node = calloc(1, sizeof(crm_node_t)); node->flags = flags; CRM_ASSERT(node); node->uname = strdup(remote); node->uuid = strdup(remote); node->state = strdup(CRM_NODE_MEMBER); g_hash_table_replace(crm_remote_peer_cache, node->uname, node); } } freeXpathObject(xpathObj); } void crm_remote_peer_cache_refresh(xmlNode *cib) { const char *xpath = NULL; g_hash_table_remove_all(crm_remote_peer_cache); /* remote nodes associated with a cluster resource */ xpath = "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE "//" XML_TAG_META_SETS "//" XML_CIB_TAG_NVPAIR "[@name='remote-node']"; remote_cache_refresh_helper(cib, xpath, "value", crm_remote_node | crm_remote_container); /* baremetal nodes defined by connection resources*/ xpath = "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE "[@type='remote'][@provider='pacemaker']"; remote_cache_refresh_helper(cib, xpath, "id", crm_remote_node | crm_remote_baremetal); /* baremetal nodes we have seen in the config that may or may not have connection * resources associated with them anymore */ xpath = "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE "[@remote_node='true']"; remote_cache_refresh_helper(cib, xpath, "id", crm_remote_node | crm_remote_baremetal); } gboolean crm_is_peer_active(const crm_node_t * node) { if(node == NULL) { return FALSE; } if (is_set(node->flags, crm_remote_node)) { /* remote nodes are never considered active members. This * guarantees they will never be considered for DC membership.*/ return FALSE; } #if SUPPORT_COROSYNC if (is_openais_cluster()) { return crm_is_corosync_peer_active(node); } #endif #if SUPPORT_HEARTBEAT if (is_heartbeat_cluster()) { return crm_is_heartbeat_peer_active(node); } #endif crm_err("Unhandled cluster type: %s", name_for_cluster_type(get_cluster_type())); return FALSE; } static gboolean crm_reap_dead_member(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; crm_node_t *search = user_data; if (search == NULL) { return FALSE; } else if (search->id && node->id != search->id) { return FALSE; } else if (search->id == 0 && safe_str_neq(node->uname, search->uname)) { return FALSE; } else if (crm_is_peer_active(value) == FALSE) { crm_notice("Removing %s/%u from the membership list", node->uname, node->id); return TRUE; } return FALSE; } guint reap_crm_member(uint32_t id, const char *name) { int matches = 0; crm_node_t search; if (crm_peer_cache == NULL) { crm_trace("Nothing to do, cache not initialized"); return 0; } search.id = id; search.uname = name ? strdup(name) : NULL; matches = g_hash_table_foreach_remove(crm_peer_cache, crm_reap_dead_member, &search); if(matches) { crm_notice("Purged %d peers with id=%u and/or uname=%s from the membership cache", matches, id, name); } else { crm_info("No peers with id=%u and/or uname=%s exist", id, name); } free(search.uname); return matches; } static void crm_count_peer(gpointer key, gpointer value, gpointer user_data) { guint *count = user_data; crm_node_t *node = value; if (crm_is_peer_active(node)) { *count = *count + 1; } } guint crm_active_peers(void) { guint count = 0; if (crm_peer_cache) { g_hash_table_foreach(crm_peer_cache, crm_count_peer, &count); } return count; } static void destroy_crm_node(gpointer data) { crm_node_t *node = data; crm_trace("Destroying entry for node %u: %s", node->id, node->uname); free(node->addr); free(node->uname); free(node->state); free(node->uuid); free(node->expected); free(node); } void crm_peer_init(void) { if (crm_peer_cache == NULL) { - crm_peer_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_crm_node); + crm_peer_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, free, destroy_crm_node); } if (crm_remote_peer_cache == NULL) { - crm_remote_peer_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, destroy_crm_node); + crm_remote_peer_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, destroy_crm_node); } } void crm_peer_destroy(void) { if (crm_peer_cache != NULL) { crm_trace("Destroying peer cache with %d members", g_hash_table_size(crm_peer_cache)); g_hash_table_destroy(crm_peer_cache); crm_peer_cache = NULL; } if (crm_remote_peer_cache != NULL) { crm_trace("Destroying remote peer cache with %d members", g_hash_table_size(crm_remote_peer_cache)); g_hash_table_destroy(crm_remote_peer_cache); crm_remote_peer_cache = NULL; } } void (*crm_status_callback) (enum crm_status_type, crm_node_t *, const void *) = NULL; void crm_set_status_callback(void (*dispatch) (enum crm_status_type, crm_node_t *, const void *)) { crm_status_callback = dispatch; } static void crm_dump_peer_hash(int level, const char *caller) { GHashTableIter iter; const char *id = NULL; crm_node_t *node = NULL; g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) { do_crm_log(level, "%s: Node %u/%s = %p - %s", caller, node->id, node->uname, node, id); } } static gboolean crm_hash_find_by_data(gpointer key, gpointer value, gpointer user_data) { if(value == user_data) { return TRUE; } return FALSE; } crm_node_t * crm_find_peer_full(unsigned int id, const char *uname, int flags) { crm_node_t *node = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (flags & CRM_GET_PEER_REMOTE) { node = g_hash_table_lookup(crm_remote_peer_cache, uname); } if (node == NULL && (flags & CRM_GET_PEER_CLUSTER)) { node = crm_find_peer(id, uname); } return node; } crm_node_t * crm_get_peer_full(unsigned int id, const char *uname, int flags) { crm_node_t *node = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (flags & CRM_GET_PEER_REMOTE) { node = g_hash_table_lookup(crm_remote_peer_cache, uname); } if (node == NULL && (flags & CRM_GET_PEER_CLUSTER)) { node = crm_get_peer(id, uname); } return node; } crm_node_t * crm_find_peer(unsigned int id, const char *uname) { GHashTableIter iter; crm_node_t *node = NULL; crm_node_t *by_id = NULL; crm_node_t *by_name = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (uname != NULL) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(node->uname && strcasecmp(node->uname, uname) == 0) { crm_trace("Name match: %s = %p", node->uname, node); by_name = node; break; } } } if (id > 0) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(node->id == id) { crm_trace("ID match: %u = %p", node->id, node); by_id = node; break; } } } node = by_id; /* Good default */ if(by_id == by_name) { /* Nothing to do if they match (both NULL counts) */ crm_trace("Consistent: %p for %u/%s", by_id, id, uname); } else if(by_id == NULL && by_name) { crm_trace("Only one: %p for %u/%s", by_name, id, uname); if(id && by_name->id) { crm_dump_peer_hash(LOG_WARNING, __FUNCTION__); crm_crit("Node %u and %u share the same name '%s'", id, by_name->id, uname); node = NULL; /* Create a new one */ } else { node = by_name; } } else if(by_name == NULL && by_id) { crm_trace("Only one: %p for %u/%s", by_id, id, uname); if(uname && by_id->uname) { crm_dump_peer_hash(LOG_WARNING, __FUNCTION__); crm_crit("Node '%s' and '%s' share the same cluster nodeid %u: assuming '%s' is correct", uname, by_id->uname, id, uname); } } else if(uname && by_id->uname) { crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u", by_id->uname, by_name->uname, id); } else if(id && by_name->id) { crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname); } else { /* Simple merge */ /* Only corosync based clusters use nodeid's * * The functions that call crm_update_peer_state() only know nodeid * so 'by_id' is authorative when merging * * Same for crm_update_peer_proc() */ crm_dump_peer_hash(LOG_DEBUG, __FUNCTION__); crm_info("Merging %p into %p", by_name, by_id); g_hash_table_foreach_remove(crm_peer_cache, crm_hash_find_by_data, by_name); } return node; } /* coverity[-alloc] Memory is referenced in one or both hashtables */ crm_node_t * crm_get_peer(unsigned int id, const char *uname) { crm_node_t *node = NULL; char *uname_lookup = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); node = crm_find_peer(id, uname); if (node == NULL) { char *uniqueid = crm_generate_uuid(); node = calloc(1, sizeof(crm_node_t)); CRM_ASSERT(node); crm_info("Created entry %s/%p for node %s/%u (%d total)", uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache)); g_hash_table_replace(crm_peer_cache, uniqueid, node); } if(id && uname == NULL && node->uname == NULL) { uname_lookup = get_node_name(id); uname = uname_lookup; crm_trace("Inferred a name of '%s' for node %u", uname, id); } if(id > 0 && uname && (node->id == 0 || node->uname == NULL)) { crm_info("Node %u is now known as %s", id, uname); } if(id > 0 && node->id == 0) { node->id = id; } if(uname && node->uname == NULL) { int lpc, len = strlen(uname); for (lpc = 0; lpc < len; lpc++) { if (uname[lpc] >= 'A' && uname[lpc] <= 'Z') { crm_warn("Node names with capitals are discouraged, consider changing '%s' to something else", uname); break; } } node->uname = strdup(uname); if (crm_status_callback) { crm_status_callback(crm_status_uname, node, NULL); } } if(node->uuid == NULL) { const char *uuid = crm_peer_uuid(node); if (uuid) { crm_info("Node %u has uuid %s", id, uuid); } else { crm_info("Cannot obtain a UUID for node %u/%s", id, node->uname); } } free(uname_lookup); return node; } crm_node_t * crm_update_peer(const char *source, unsigned int id, uint64_t born, uint64_t seen, int32_t votes, uint32_t children, const char *uuid, const char *uname, const char *addr, const char *state) { #if SUPPORT_PLUGIN gboolean addr_changed = FALSE; gboolean votes_changed = FALSE; #endif crm_node_t *node = NULL; id = get_corosync_id(id, uuid); node = crm_get_peer(id, uname); CRM_ASSERT(node != NULL); if (node->uuid == NULL) { if (is_openais_cluster()) { /* Yes, overrule whatever was passed in */ crm_peer_uuid(node); } else if (uuid != NULL) { node->uuid = strdup(uuid); } } if (children > 0) { crm_update_peer_proc(source, node, children, state); } if (state != NULL) { crm_update_peer_state(source, node, state, seen); } #if SUPPORT_HEARTBEAT if (born != 0) { node->born = born; } #endif #if SUPPORT_PLUGIN /* These were only used by the plugin */ if (born != 0) { node->born = born; } if (votes > 0 && node->votes != votes) { votes_changed = TRUE; node->votes = votes; } if (addr != NULL) { if (node->addr == NULL || crm_str_eq(node->addr, addr, FALSE) == FALSE) { addr_changed = TRUE; free(node->addr); node->addr = strdup(addr); } } if (addr_changed || votes_changed) { crm_info("%s: Node %s: id=%u state=%s addr=%s%s votes=%d%s born=" U64T " seen=" U64T " proc=%.32x", source, node->uname, node->id, node->state, node->addr, addr_changed ? " (new)" : "", node->votes, votes_changed ? " (new)" : "", node->born, node->last_seen, node->processes); } #endif return node; } void crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const char *status) { uint32_t last = 0; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL", source, peer2text(flag), status); return); last = node->processes; if (status == NULL) { node->processes = flag; if (node->processes != last) { changed = TRUE; } } else if (safe_str_eq(status, ONLINESTATUS)) { if ((node->processes & flag) == 0) { set_bit(node->processes, flag); changed = TRUE; } #if SUPPORT_PLUGIN } else if (safe_str_eq(status, CRM_NODE_MEMBER)) { if (flag > 0 && node->processes != flag) { node->processes = flag; changed = TRUE; } #endif } else if (node->processes & flag) { clear_bit(node->processes, flag); changed = TRUE; } if (changed) { if (status == NULL && flag <= crm_proc_none) { crm_info("%s: Node %s[%u] - all processes are now offline", source, node->uname, node->id); } else { crm_info("%s: Node %s[%u] - %s is now %s", source, node->uname, node->id, peer2text(flag), status); } if (crm_status_callback) { crm_status_callback(crm_status_processes, node, &last); } } else { crm_trace("%s: Node %s[%u] - %s is unchanged (%s)", source, node->uname, node->id, peer2text(flag), status); } } void crm_update_peer_expected(const char *source, crm_node_t * node, const char *expected) { char *last = NULL; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected); return); last = node->expected; if (expected != NULL && safe_str_neq(node->expected, expected)) { node->expected = strdup(expected); changed = TRUE; } if (changed) { crm_info("%s: Node %s[%u] - expected state is now %s (was %s)", source, node->uname, node->id, expected, last); free(last); } else { crm_trace("%s: Node %s[%u] - expected state is unchanged (%s)", source, node->uname, node->id, expected); } } void crm_update_peer_state(const char *source, crm_node_t * node, const char *state, int membership) { char *last = NULL; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set 'state' to %s", source, state); return); last = node->state; if (state != NULL && safe_str_neq(node->state, state)) { node->state = strdup(state); changed = TRUE; } if (membership != 0 && safe_str_eq(node->state, CRM_NODE_MEMBER)) { node->last_seen = membership; } if (changed) { crm_notice("%s: Node %s[%u] - state is now %s (was %s)", source, node->uname, node->id, state, last); if (crm_status_callback) { enum crm_status_type status_type = crm_status_nstate; if (is_set(node->flags, crm_remote_node)) { status_type = crm_status_rstate; } crm_status_callback(status_type, node, last); } free(last); } else { crm_trace("%s: Node %s[%u] - state is unchanged (%s)", source, node->uname, node->id, state); } } int crm_terminate_member(int nodeid, const char *uname, void *unused) { /* Always use the synchronous, non-mainloop version */ return stonith_api_kick(nodeid, uname, 120, TRUE); } int crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection) { return stonith_api_kick(nodeid, uname, 120, TRUE); } diff --git a/lib/common/utils.c b/lib/common/utils.c index b31a60e304..133af9e877 100644 --- a/lib/common/utils.c +++ b/lib/common/utils.c @@ -1,2569 +1,2581 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MAXLINE # define MAXLINE 512 #endif #ifdef HAVE_GETOPT_H # include #endif #ifndef PW_BUFFER_LEN # define PW_BUFFER_LEN 500 #endif CRM_TRACE_INIT_DATA(common); gboolean crm_config_error = FALSE; gboolean crm_config_warning = FALSE; char *crm_system_name = NULL; int node_score_red = 0; int node_score_green = 0; int node_score_yellow = 0; int node_score_infinity = INFINITY; static struct crm_option *crm_long_options = NULL; static const char *crm_app_description = NULL; static char *crm_short_options = NULL; static const char *crm_app_usage = NULL; int crm_exit(int rc) { mainloop_cleanup(); #if HAVE_LIBXML2 crm_trace("cleaning up libxml"); crm_xml_cleanup(); #endif crm_trace("exit %d", rc); qb_log_fini(); free(crm_short_options); free(crm_system_name); exit(ABS(rc)); /* Always exit with a positive value so that it can be passed to crm_error * * Otherwise the system wraps it around and people * have to jump through hoops figuring out what the * error was */ return rc; /* Can never happen, but allows return crm_exit(rc) * where "return rc" was used previously - which * keeps compilers happy. */ } gboolean check_time(const char *value) { if (crm_get_msec(value) < 5000) { return FALSE; } return TRUE; } gboolean check_timer(const char *value) { if (crm_get_msec(value) < 0) { return FALSE; } return TRUE; } gboolean check_boolean(const char *value) { int tmp = FALSE; if (crm_str_to_boolean(value, &tmp) != 1) { return FALSE; } return TRUE; } gboolean check_number(const char *value) { errno = 0; if (value == NULL) { return FALSE; } else if (safe_str_eq(value, MINUS_INFINITY_S)) { } else if (safe_str_eq(value, INFINITY_S)) { } else { crm_int_helper(value, NULL); } if (errno != 0) { return FALSE; } return TRUE; } gboolean check_utilization(const char *value) { char *end = NULL; long number = strtol(value, &end, 10); if(end && end[0] != '%') { return FALSE; } else if(number < 0) { return FALSE; } return TRUE; } int char2score(const char *score) { int score_f = 0; if (score == NULL) { } else if (safe_str_eq(score, MINUS_INFINITY_S)) { score_f = -node_score_infinity; } else if (safe_str_eq(score, INFINITY_S)) { score_f = node_score_infinity; } else if (safe_str_eq(score, "+" INFINITY_S)) { score_f = node_score_infinity; } else if (safe_str_eq(score, "red")) { score_f = node_score_red; } else if (safe_str_eq(score, "yellow")) { score_f = node_score_yellow; } else if (safe_str_eq(score, "green")) { score_f = node_score_green; } else { score_f = crm_parse_int(score, NULL); if (score_f > 0 && score_f > node_score_infinity) { score_f = node_score_infinity; } else if (score_f < 0 && score_f < -node_score_infinity) { score_f = -node_score_infinity; } } return score_f; } char * score2char_stack(int score, char *buf, size_t len) { if (score >= node_score_infinity) { strncpy(buf, INFINITY_S, 9); } else if (score <= -node_score_infinity) { strncpy(buf, MINUS_INFINITY_S , 10); } else { return crm_itoa_stack(score, buf, len); } return buf; } char * score2char(int score) { if (score >= node_score_infinity) { return strdup(INFINITY_S); } else if (score <= -node_score_infinity) { return strdup("-" INFINITY_S); } return crm_itoa(score); } const char * cluster_option(GHashTable * options, gboolean(*validate) (const char *), const char *name, const char *old_name, const char *def_value) { const char *value = NULL; CRM_ASSERT(name != NULL); if (options != NULL) { value = g_hash_table_lookup(options, name); } if (value == NULL && old_name && options != NULL) { value = g_hash_table_lookup(options, old_name); if (value != NULL) { crm_config_warn("Using deprecated name '%s' for" " cluster option '%s'", old_name, name); g_hash_table_insert(options, strdup(name), strdup(value)); value = g_hash_table_lookup(options, old_name); } } if (value == NULL) { crm_trace("Using default value '%s' for cluster option '%s'", def_value, name); if (options == NULL) { return def_value; } g_hash_table_insert(options, strdup(name), strdup(def_value)); value = g_hash_table_lookup(options, name); } if (validate && validate(value) == FALSE) { crm_config_err("Value '%s' for cluster option '%s' is invalid." " Defaulting to %s", value, name, def_value); g_hash_table_replace(options, strdup(name), strdup(def_value)); value = g_hash_table_lookup(options, name); } return value; } const char * get_cluster_pref(GHashTable * options, pe_cluster_option * option_list, int len, const char *name) { int lpc = 0; const char *value = NULL; gboolean found = FALSE; for (lpc = 0; lpc < len; lpc++) { if (safe_str_eq(name, option_list[lpc].name)) { found = TRUE; value = cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } CRM_CHECK(found, crm_err("No option named: %s", name)); CRM_ASSERT(value != NULL); return value; } void config_metadata(const char *name, const char *version, const char *desc_short, const char *desc_long, pe_cluster_option * option_list, int len) { int lpc = 0; fprintf(stdout, "" "\n" "\n" " %s\n" " %s\n" " %s\n" " \n", name, version, desc_long, desc_short); for (lpc = 0; lpc < len; lpc++) { if (option_list[lpc].description_long == NULL && option_list[lpc].description_short == NULL) { continue; } fprintf(stdout, " \n" " %s\n" " \n" " %s%s%s\n" " \n", option_list[lpc].name, option_list[lpc].description_short, option_list[lpc].type, option_list[lpc].default_value, option_list[lpc].description_long ? option_list[lpc]. description_long : option_list[lpc].description_short, option_list[lpc].values ? " Allowed values: " : "", option_list[lpc].values ? option_list[lpc].values : ""); } fprintf(stdout, " \n\n"); } void verify_all_options(GHashTable * options, pe_cluster_option * option_list, int len) { int lpc = 0; for (lpc = 0; lpc < len; lpc++) { cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } char * crm_concat(const char *prefix, const char *suffix, char join) { int len = 0; char *new_str = NULL; CRM_ASSERT(prefix != NULL); CRM_ASSERT(suffix != NULL); len = strlen(prefix) + strlen(suffix) + 2; new_str = malloc(len); if(new_str) { sprintf(new_str, "%s%c%s", prefix, join, suffix); new_str[len - 1] = 0; } return new_str; } char * generate_hash_key(const char *crm_msg_reference, const char *sys) { char *hash_key = crm_concat(sys ? sys : "none", crm_msg_reference, '_'); crm_trace("created hash key: (%s)", hash_key); return hash_key; } char * crm_itoa_stack(int an_int, char *buffer, size_t len) { if (buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } char * crm_itoa(int an_int) { int len = 32; char *buffer = NULL; buffer = malloc(len + 1); if (buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } void crm_build_path(const char *path_c, mode_t mode) { int offset = 1, len = 0; char *path = strdup(path_c); CRM_CHECK(path != NULL, return); for (len = strlen(path); offset < len; offset++) { if (path[offset] == '/') { path[offset] = 0; if (mkdir(path, mode) < 0 && errno != EEXIST) { crm_perror(LOG_ERR, "Could not create directory '%s'", path); break; } path[offset] = '/'; } } if (mkdir(path, mode) < 0 && errno != EEXIST) { crm_perror(LOG_ERR, "Could not create directory '%s'", path); } free(path); } int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid) { int rc = -1; char *buffer = NULL; struct passwd pwd; struct passwd *pwentry = NULL; buffer = calloc(1, PW_BUFFER_LEN); getpwnam_r(name, &pwd, buffer, PW_BUFFER_LEN, &pwentry); if (pwentry) { rc = 0; if (uid) { *uid = pwentry->pw_uid; } if (gid) { *gid = pwentry->pw_gid; } crm_trace("Cluster user %s has uid=%d gid=%d", name, pwentry->pw_uid, pwentry->pw_gid); } else { crm_err("Cluster user %s does not exist", name); } free(buffer); return rc; } static int crm_version_helper(const char *text, char **end_text) { int atoi_result = -1; CRM_ASSERT(end_text != NULL); errno = 0; if (text != NULL && text[0] != 0) { atoi_result = (int)strtol(text, end_text, 10); if (errno == EINVAL) { crm_err("Conversion of '%s' %c failed", text, text[0]); atoi_result = -1; } } return atoi_result; } /* * version1 < version2 : -1 * version1 = version2 : 0 * version1 > version2 : 1 */ int compare_version(const char *version1, const char *version2) { int rc = 0; int lpc = 0; char *ver1_copy = NULL, *ver2_copy = NULL; char *rest1 = NULL, *rest2 = NULL; if (version1 == NULL && version2 == NULL) { return 0; } else if (version1 == NULL) { return -1; } else if (version2 == NULL) { return 1; } ver1_copy = strdup(version1); ver2_copy = strdup(version2); rest1 = ver1_copy; rest2 = ver2_copy; while (1) { int digit1 = 0; int digit2 = 0; lpc++; if (rest1 == rest2) { break; } if (rest1 != NULL) { digit1 = crm_version_helper(rest1, &rest1); } if (rest2 != NULL) { digit2 = crm_version_helper(rest2, &rest2); } if (digit1 < digit2) { rc = -1; break; } else if (digit1 > digit2) { rc = 1; break; } if (rest1 != NULL && rest1[0] == '.') { rest1++; } if (rest1 != NULL && rest1[0] == 0) { rest1 = NULL; } if (rest2 != NULL && rest2[0] == '.') { rest2++; } if (rest2 != NULL && rest2[0] == 0) { rest2 = NULL; } } free(ver1_copy); free(ver2_copy); if (rc == 0) { crm_trace("%s == %s (%d)", version1, version2, lpc); } else if (rc < 0) { crm_trace("%s < %s (%d)", version1, version2, lpc); } else if (rc > 0) { crm_trace("%s > %s (%d)", version1, version2, lpc); } return rc; } gboolean do_stderr = FALSE; void g_hash_destroy_str(gpointer data) { free(data); } #include /* #include */ /* #include */ long long crm_int_helper(const char *text, char **end_text) { long long result = -1; char *local_end_text = NULL; int saved_errno = 0; errno = 0; if (text != NULL) { #ifdef ANSI_ONLY if (end_text != NULL) { result = strtol(text, end_text, 10); } else { result = strtol(text, &local_end_text, 10); } #else if (end_text != NULL) { result = strtoll(text, end_text, 10); } else { result = strtoll(text, &local_end_text, 10); } #endif saved_errno = errno; /* CRM_CHECK(errno != EINVAL); */ if (errno == EINVAL) { crm_err("Conversion of %s failed", text); result = -1; } else if (errno == ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); } else if (errno != 0) { crm_perror(LOG_ERR, "Conversion of %s failed:", text); } if (local_end_text != NULL && local_end_text[0] != '\0') { crm_err("Characters left over after parsing '%s': '%s'", text, local_end_text); } errno = saved_errno; } return result; } int crm_parse_int(const char *text, const char *default_text) { int atoi_result = -1; if (text != NULL) { atoi_result = crm_int_helper(text, NULL); if (errno == 0) { return atoi_result; } } if (default_text != NULL) { atoi_result = crm_int_helper(default_text, NULL); if (errno == 0) { return atoi_result; } } else { crm_err("No default conversion value supplied"); } return -1; } gboolean safe_str_neq(const char *a, const char *b) { if (a == b) { return FALSE; } else if (a == NULL || b == NULL) { return TRUE; } else if (strcasecmp(a, b) == 0) { return FALSE; } return TRUE; } gboolean crm_is_true(const char *s) { gboolean ret = FALSE; if (s != NULL) { crm_str_to_boolean(s, &ret); } return ret; } int crm_str_to_boolean(const char *s, int *ret) { if (s == NULL) { return -1; } else if (strcasecmp(s, "true") == 0 || strcasecmp(s, "on") == 0 || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) { *ret = TRUE; return 1; } else if (strcasecmp(s, "false") == 0 || strcasecmp(s, "off") == 0 || strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0) { *ret = FALSE; return 1; } return -1; } #ifndef NUMCHARS # define NUMCHARS "0123456789." #endif #ifndef WHITESPACE # define WHITESPACE " \t\n\r\f" #endif unsigned long long crm_get_interval(const char *input) { unsigned long long msec = 0; if (input == NULL) { return msec; } else if (input[0] != 'P') { long long tmp = crm_get_msec(input); if(tmp > 0) { msec = tmp; } } else { crm_time_t *interval = crm_time_parse_duration(input); msec = 1000 * crm_time_get_seconds(interval); crm_time_free(interval); } return msec; } long long crm_get_msec(const char *input) { const char *cp = input; const char *units; long long multiplier = 1000; long long divisor = 1; long long msec = -1; char *end_text = NULL; /* double dret; */ if (input == NULL) { return msec; } cp += strspn(cp, WHITESPACE); units = cp + strspn(cp, NUMCHARS); units += strspn(units, WHITESPACE); if (strchr(NUMCHARS, *cp) == NULL) { return msec; } if (strncasecmp(units, "ms", 2) == 0 || strncasecmp(units, "msec", 4) == 0) { multiplier = 1; divisor = 1; } else if (strncasecmp(units, "us", 2) == 0 || strncasecmp(units, "usec", 4) == 0) { multiplier = 1; divisor = 1000; } else if (strncasecmp(units, "s", 1) == 0 || strncasecmp(units, "sec", 3) == 0) { multiplier = 1000; divisor = 1; } else if (strncasecmp(units, "m", 1) == 0 || strncasecmp(units, "min", 3) == 0) { multiplier = 60 * 1000; divisor = 1; } else if (strncasecmp(units, "h", 1) == 0 || strncasecmp(units, "hr", 2) == 0) { multiplier = 60 * 60 * 1000; divisor = 1; } else if (*units != EOS && *units != '\n' && *units != '\r') { return msec; } msec = crm_int_helper(cp, &end_text); msec *= multiplier; msec /= divisor; /* dret += 0.5; */ /* msec = (long long)dret; */ return msec; } char * generate_op_key(const char *rsc_id, const char *op_type, int interval) { int len = 35; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); op_id = malloc(len); CRM_CHECK(op_id != NULL, return NULL); sprintf(op_id, "%s_%s_%d", rsc_id, op_type, interval); return op_id; } gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, int *interval) { char *notify = NULL; char *mutable_key = NULL; char *mutable_key_ptr = NULL; int len = 0, offset = 0, ch = 0; CRM_CHECK(key != NULL, return FALSE); *interval = 0; len = strlen(key); offset = len - 1; crm_trace("Source: %s", key); while (offset > 0 && isdigit(key[offset])) { int digits = len - offset; ch = key[offset] - '0'; CRM_CHECK(ch < 10, return FALSE); CRM_CHECK(ch >= 0, return FALSE); while (digits > 1) { digits--; ch = ch * 10; } *interval += ch; offset--; } crm_trace(" Interval: %d", *interval); CRM_CHECK(key[offset] == '_', return FALSE); mutable_key = strdup(key); mutable_key[offset] = 0; offset--; while (offset > 0 && key[offset] != '_') { offset--; } CRM_CHECK(key[offset] == '_', free(mutable_key); return FALSE); mutable_key_ptr = mutable_key + offset + 1; crm_trace(" Action: %s", mutable_key_ptr); *op_type = strdup(mutable_key_ptr); mutable_key[offset] = 0; offset--; CRM_CHECK(mutable_key != mutable_key_ptr, free(mutable_key); return FALSE); notify = strstr(mutable_key, "_post_notify"); if (notify && safe_str_eq(notify, "_post_notify")) { notify[0] = 0; } notify = strstr(mutable_key, "_pre_notify"); if (notify && safe_str_eq(notify, "_pre_notify")) { notify[0] = 0; } crm_trace(" Resource: %s", mutable_key); *rsc_id = mutable_key; return TRUE; } char * generate_notify_key(const char *rsc_id, const char *notify_type, const char *op_type) { int len = 12; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); CRM_CHECK(notify_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); len += strlen(notify_type); if(len > 0) { op_id = malloc(len); } if (op_id != NULL) { sprintf(op_id, "%s_%s_notify_%s_0", rsc_id, notify_type, op_type); } return op_id; } char * generate_transition_magic_v202(const char *transition_key, int op_status) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%s", op_status, transition_key); } return fail_state; } char * generate_transition_magic(const char *transition_key, int op_status, int op_rc) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%d;%s", op_status, op_rc, transition_key); } return fail_state; } gboolean decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc) { int res = 0; char *key = NULL; gboolean result = TRUE; CRM_CHECK(magic != NULL, return FALSE); CRM_CHECK(op_rc != NULL, return FALSE); CRM_CHECK(op_status != NULL, return FALSE); key = calloc(1, strlen(magic) + 1); res = sscanf(magic, "%d:%d;%s", op_status, op_rc, key); if (res != 3) { crm_warn("Only found %d items in: '%s'", res, magic); free(key); return FALSE; } CRM_CHECK(decode_transition_key(key, uuid, transition_id, action_id, target_rc), result = FALSE); free(key); return result; } char * generate_transition_key(int transition_id, int action_id, int target_rc, const char *node) { int len = 40; char *fail_state = NULL; CRM_CHECK(node != NULL, return NULL); len += strlen(node); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%d:%d:%-*s", action_id, transition_id, target_rc, 36, node); } return fail_state; } gboolean decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id, int *target_rc) { int res = 0; gboolean done = FALSE; CRM_CHECK(uuid != NULL, return FALSE); CRM_CHECK(target_rc != NULL, return FALSE); CRM_CHECK(action_id != NULL, return FALSE); CRM_CHECK(transition_id != NULL, return FALSE); *uuid = calloc(1, 37); res = sscanf(key, "%d:%d:%d:%36s", action_id, transition_id, target_rc, *uuid); switch (res) { case 4: /* Post Pacemaker 0.6 */ done = TRUE; break; case 3: case 2: /* this can be tricky - the UUID might start with an integer */ /* Until Pacemaker 0.6 */ done = TRUE; *target_rc = -1; res = sscanf(key, "%d:%d:%36s", action_id, transition_id, *uuid); if (res == 2) { *action_id = -1; res = sscanf(key, "%d:%36s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); } else if (res != 3) { CRM_CHECK(res == 3, done = FALSE); } break; case 1: /* Prior to Heartbeat 2.0.8 */ done = TRUE; *action_id = -1; *target_rc = -1; res = sscanf(key, "%d:%36s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); break; default: crm_crit("Unhandled sscanf result (%d) for %s", res, key); } if (strlen(*uuid) != 36) { crm_warn("Bad UUID (%s) in sscanf result (%d) for %s", *uuid, res, key); } if (done == FALSE) { crm_err("Cannot decode '%s' rc=%d", key, res); free(*uuid); *uuid = NULL; *target_rc = -1; *action_id = -1; *transition_id = -1; } return done; } void filter_action_parameters(xmlNode * param_set, const char *version) { char *key = NULL; char *timeout = NULL; char *interval = NULL; const char *attr_filter[] = { XML_ATTR_ID, XML_ATTR_CRM_VERSION, XML_LRM_ATTR_OP_DIGEST, }; gboolean do_delete = FALSE; int lpc = 0; static int meta_len = 0; if (meta_len == 0) { meta_len = strlen(CRM_META); } if (param_set == NULL) { return; } for (lpc = 0; lpc < DIMOF(attr_filter); lpc++) { xml_remove_prop(param_set, attr_filter[lpc]); } key = crm_meta_name(XML_LRM_ATTR_INTERVAL); interval = crm_element_value_copy(param_set, key); free(key); key = crm_meta_name(XML_ATTR_TIMEOUT); timeout = crm_element_value_copy(param_set, key); if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; do_delete = FALSE; if (strncasecmp(prop_name, CRM_META, meta_len) == 0) { do_delete = TRUE; } if (do_delete) { xml_remove_prop(param_set, prop_name); } } } if (crm_get_msec(interval) > 0 && compare_version(version, "1.0.8") > 0) { /* Re-instate the operation's timeout value */ if (timeout != NULL) { crm_xml_add(param_set, key, timeout); } } free(interval); free(timeout); free(key); } void filter_reload_parameters(xmlNode * param_set, const char *restart_string) { int len = 0; char *name = NULL; char *match = NULL; if (param_set == NULL) { return; } if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; name = NULL; len = strlen(prop_name) + 3; name = malloc(len); if(name) { sprintf(name, " %s ", prop_name); name[len - 1] = 0; match = strstr(restart_string, name); } if (match == NULL) { crm_trace("%s not found in %s", prop_name, restart_string); xml_remove_prop(param_set, prop_name); } free(name); } } } extern bool crm_is_daemon; /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *assert_condition, gboolean do_core, gboolean do_fork) { int rc = 0; int pid = 0; int status = 0; /* Implied by the parent's error logging below */ /* crm_write_blackbox(0); */ if(crm_is_daemon == FALSE) { /* This is a command line tool - do not fork */ /* crm_add_logfile(NULL); * Record it to a file? */ crm_enable_stderr(TRUE); /* Make sure stderr is enabled so we can tell the caller */ do_fork = FALSE; /* Just crash if needed */ } if (do_core == FALSE) { crm_err("%s: Triggered assert at %s:%d : %s", function, file, line, assert_condition); return; } else if (do_fork) { pid = fork(); } else { crm_err("%s: Triggered fatal assert at %s:%d : %s", function, file, line, assert_condition); } if (pid == -1) { crm_crit("%s: Cannot create core for non-fatal assert at %s:%d : %s", function, file, line, assert_condition); return; } else if(pid == 0) { /* Child process */ abort(); return; } /* Parent process */ crm_err("%s: Forked child %d to record non-fatal assert at %s:%d : %s", function, pid, file, line, assert_condition); crm_write_blackbox(SIGTRAP, NULL); do { rc = waitpid(pid, &status, 0); if(rc == pid) { return; /* Job done */ } } while(errno == EINTR); if (errno == ECHILD) { /* crm_mon does this */ crm_trace("Cannot wait on forked child %d - SIGCHLD is probably set to SIG_IGN", pid); return; } crm_perror(LOG_ERR, "Cannot wait on forked child %d", pid); } char * generate_series_filename(const char *directory, const char *series, int sequence, gboolean bzip) { int len = 40; char *filename = NULL; const char *ext = "raw"; CRM_CHECK(directory != NULL, return NULL); CRM_CHECK(series != NULL, return NULL); #if !HAVE_BZLIB_H bzip = FALSE; #endif len += strlen(directory); len += strlen(series); filename = malloc(len); CRM_CHECK(filename != NULL, return NULL); if (bzip) { ext = "bz2"; } sprintf(filename, "%s/%s-%d.%s", directory, series, sequence, ext); return filename; } int get_last_sequence(const char *directory, const char *series) { FILE *file_strm = NULL; int start = 0, length = 0, read_len = 0; char *series_file = NULL; char *buffer = NULL; int seq = 0; int len = 36; CRM_CHECK(directory != NULL, return 0); CRM_CHECK(series != NULL, return 0); len += strlen(directory); len += strlen(series); series_file = malloc(len); CRM_CHECK(series_file != NULL, return 0); sprintf(series_file, "%s/%s.last", directory, series); file_strm = fopen(series_file, "r"); if (file_strm == NULL) { crm_debug("Series file %s does not exist", series_file); free(series_file); return 0; } /* see how big the file is */ start = ftell(file_strm); fseek(file_strm, 0L, SEEK_END); length = ftell(file_strm); fseek(file_strm, 0L, start); CRM_ASSERT(length >= 0); CRM_ASSERT(start == ftell(file_strm)); if (length <= 0) { crm_info("%s was not valid", series_file); free(buffer); buffer = NULL; } else { crm_trace("Reading %d bytes from file", length); buffer = calloc(1, (length + 1)); read_len = fread(buffer, 1, length, file_strm); if (read_len != length) { crm_err("Calculated and read bytes differ: %d vs. %d", length, read_len); free(buffer); buffer = NULL; } } seq = crm_parse_int(buffer, "0"); fclose(file_strm); crm_trace("Found %d in %s", seq, series_file); free(series_file); free(buffer); return seq; } void write_last_sequence(const char *directory, const char *series, int sequence, int max) { int rc = 0; int len = 36; FILE *file_strm = NULL; char *series_file = NULL; CRM_CHECK(directory != NULL, return); CRM_CHECK(series != NULL, return); if (max == 0) { return; } if (max > 0 && sequence >= max) { sequence = 0; } len += strlen(directory); len += strlen(series); series_file = malloc(len); if(series_file) { sprintf(series_file, "%s/%s.last", directory, series); file_strm = fopen(series_file, "w"); } if (file_strm != NULL) { rc = fprintf(file_strm, "%d", sequence); if (rc < 0) { crm_perror(LOG_ERR, "Cannot write to series file %s", series_file); } } else { crm_err("Cannot open series file %s for writing", series_file); } if (file_strm != NULL) { fflush(file_strm); fclose(file_strm); } crm_trace("Wrote %d to %s", sequence, series_file); free(series_file); } #define LOCKSTRLEN 11 int crm_pid_active(long pid) { if (pid <= 0) { return -1; } else if (kill(pid, 0) < 0 && errno == ESRCH) { return 0; } #ifndef HAVE_PROC_PID return 1; #else { int rc = 0; int running = 0; char proc_path[PATH_MAX], exe_path[PATH_MAX], myexe_path[PATH_MAX]; /* check to make sure pid hasn't been reused by another process */ snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", pid); rc = readlink(proc_path, exe_path, PATH_MAX - 1); if (rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); goto bail; } exe_path[rc] = 0; snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", (long unsigned int)getpid()); rc = readlink(proc_path, myexe_path, PATH_MAX - 1); if (rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); goto bail; } myexe_path[rc] = 0; if (strcmp(exe_path, myexe_path) == 0) { running = 1; } } bail: return running; #endif } static int crm_read_pidfile(const char *filename) { int fd; long pid = -1; char buf[LOCKSTRLEN + 1]; if ((fd = open(filename, O_RDONLY)) < 0) { goto bail; } if (read(fd, buf, sizeof(buf)) < 1) { goto bail; } if (sscanf(buf, "%lu", &pid) > 0) { if (pid <= 0) { pid = -ESRCH; } } bail: if (fd >= 0) { close(fd); } return pid; } static int crm_pidfile_inuse(const char *filename, long mypid) { long pid = 0; struct stat sbuf; char buf[LOCKSTRLEN + 1]; int rc = -ENOENT, fd = 0; if ((fd = open(filename, O_RDONLY)) >= 0) { if (fstat(fd, &sbuf) >= 0 && sbuf.st_size < LOCKSTRLEN) { sleep(2); /* if someone was about to create one, * give'm a sec to do so */ } if (read(fd, buf, sizeof(buf)) > 0) { if (sscanf(buf, "%lu", &pid) > 0) { crm_trace("Got pid %lu from %s\n", pid, filename); if (pid <= 1) { /* Invalid pid */ rc = -ENOENT; unlink(filename); } else if (mypid && pid == mypid) { /* In use by us */ rc = pcmk_ok; } else if (crm_pid_active(pid) == FALSE) { /* Contains a stale value */ unlink(filename); rc = -ENOENT; } else if (mypid && pid != mypid) { /* locked by existing process - give up */ rc = -EEXIST; } } } close(fd); } return rc; } static int crm_lock_pidfile(const char *filename) { long mypid = 0; int fd = 0, rc = 0; char buf[LOCKSTRLEN + 1]; mypid = (unsigned long)getpid(); rc = crm_pidfile_inuse(filename, 0); if (rc == -ENOENT) { /* exists but the process is not active */ } else if (rc != pcmk_ok) { /* locked by existing process - give up */ return rc; } if ((fd = open(filename, O_CREAT | O_WRONLY | O_EXCL, 0644)) < 0) { /* Hmmh, why did we fail? Anyway, nothing we can do about it */ return -errno; } snprintf(buf, sizeof(buf), "%*lu\n", LOCKSTRLEN - 1, mypid); rc = write(fd, buf, LOCKSTRLEN); close(fd); if (rc != LOCKSTRLEN) { crm_perror(LOG_ERR, "Incomplete write to %s", filename); return -errno; } return crm_pidfile_inuse(filename, mypid); } void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile) { int rc; long pid; const char *devnull = "/dev/null"; if (daemonize == FALSE) { return; } /* Check before we even try... */ rc = crm_pidfile_inuse(pidfile, 1); if(rc < pcmk_ok && rc != -ENOENT) { pid = crm_read_pidfile(pidfile); crm_err("%s: already running [pid %ld in %s]", name, pid, pidfile); printf("%s: already running [pid %ld in %s]\n", name, pid, pidfile); crm_exit(rc); } pid = fork(); if (pid < 0) { fprintf(stderr, "%s: could not start daemon\n", name); crm_perror(LOG_ERR, "fork"); crm_exit(EINVAL); } else if (pid > 0) { crm_exit(pcmk_ok); } rc = crm_lock_pidfile(pidfile); if(rc < pcmk_ok) { crm_err("Could not lock '%s' for %s: %s (%d)", pidfile, name, pcmk_strerror(rc), rc); printf("Could not lock '%s' for %s: %s (%d)\n", pidfile, name, pcmk_strerror(rc), rc); crm_exit(rc); } umask(S_IWGRP | S_IWOTH | S_IROTH); close(STDIN_FILENO); (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ close(STDOUT_FILENO); (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ close(STDERR_FILENO); (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ } gboolean crm_is_writable(const char *dir, const char *file, const char *user, const char *group, gboolean need_both) { int s_res = -1; struct stat buf; char *full_file = NULL; const char *target = NULL; gboolean pass = TRUE; gboolean readwritable = FALSE; CRM_ASSERT(dir != NULL); if (file != NULL) { full_file = crm_concat(dir, file, '/'); target = full_file; s_res = stat(full_file, &buf); if (s_res == 0 && S_ISREG(buf.st_mode) == FALSE) { crm_err("%s must be a regular file", target); pass = FALSE; goto out; } } if (s_res != 0) { target = dir; s_res = stat(dir, &buf); if (s_res != 0) { crm_err("%s must exist and be a directory", dir); pass = FALSE; goto out; } else if (S_ISDIR(buf.st_mode) == FALSE) { crm_err("%s must be a directory", dir); pass = FALSE; } } if (user) { struct passwd *sys_user = NULL; sys_user = getpwnam(user); readwritable = (sys_user != NULL && buf.st_uid == sys_user->pw_uid && (buf.st_mode & (S_IRUSR | S_IWUSR))); if (readwritable == FALSE) { crm_err("%s must be owned and r/w by user %s", target, user); if (need_both) { pass = FALSE; } } } if (group) { struct group *sys_grp = getgrnam(group); readwritable = (sys_grp != NULL && buf.st_gid == sys_grp->gr_gid && (buf.st_mode & (S_IRGRP | S_IWGRP))); if (readwritable == FALSE) { if (need_both || user == NULL) { pass = FALSE; crm_err("%s must be owned and r/w by group %s", target, group); } else { crm_warn("%s should be owned and r/w by group %s", target, group); } } } out: free(full_file); return pass; } char * crm_strip_trailing_newline(char *str) { int len; if (str == NULL) { return str; } for (len = strlen(str) - 1; len >= 0 && str[len] == '\n'; len--) { str[len] = '\0'; } return str; } gboolean crm_str_eq(const char *a, const char *b, gboolean use_case) { if (use_case) { return g_strcmp0(a, b) == 0; /* TODO - Figure out which calls, if any, really need to be case independant */ } else if (a == b) { return TRUE; } else if (a == NULL || b == NULL) { /* shouldn't be comparing NULLs */ return FALSE; } else if (strcasecmp(a, b) == 0) { return TRUE; } return FALSE; } char * crm_meta_name(const char *field) { int lpc = 0; int max = 0; char *crm_name = NULL; CRM_CHECK(field != NULL, return NULL); crm_name = crm_concat(CRM_META, field, '_'); /* Massage the names so they can be used as shell variables */ max = strlen(crm_name); for (; lpc < max; lpc++) { switch (crm_name[lpc]) { case '-': crm_name[lpc] = '_'; break; } } return crm_name; } const char * crm_meta_value(GHashTable * hash, const char *field) { char *key = NULL; const char *value = NULL; key = crm_meta_name(field); if (key) { value = g_hash_table_lookup(hash, key); free(key); } return value; } static struct option * crm_create_long_opts(struct crm_option *long_options) { struct option *long_opts = NULL; #ifdef HAVE_GETOPT_H int index = 0, lpc = 0; /* * A previous, possibly poor, choice of '?' as the short form of --help * means that getopt_long() returns '?' for both --help and for "unknown option" * * This dummy entry allows us to differentiate between the two in crm_get_option() * and exit with the correct error code */ long_opts = realloc(long_opts, (index + 1) * sizeof(struct option)); long_opts[index].name = "__dummmy__"; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = '_'; index++; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].name[0] == '-') { continue; } long_opts = realloc(long_opts, (index + 1) * sizeof(struct option)); /*fprintf(stderr, "Creating %d %s = %c\n", index, * long_options[lpc].name, long_options[lpc].val); */ long_opts[index].name = long_options[lpc].name; long_opts[index].has_arg = long_options[lpc].has_arg; long_opts[index].flag = long_options[lpc].flag; long_opts[index].val = long_options[lpc].val; index++; } /* Now create the list terminator */ long_opts = realloc(long_opts, (index + 1) * sizeof(struct option)); long_opts[index].name = NULL; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = 0; #endif return long_opts; } void crm_set_options(const char *short_options, const char *app_usage, struct crm_option *long_options, const char *app_desc) { if (short_options) { crm_short_options = strdup(short_options); } else if (long_options) { int lpc = 0; int opt_string_len = 0; char *local_short_options = NULL; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].val && long_options[lpc].val != '-' && long_options[lpc].val < UCHAR_MAX) { local_short_options = realloc(local_short_options, opt_string_len + 4); local_short_options[opt_string_len++] = long_options[lpc].val; /* getopt(3) says: Two colons mean an option takes an optional arg; */ if (long_options[lpc].has_arg == optional_argument) { local_short_options[opt_string_len++] = ':'; } if (long_options[lpc].has_arg >= required_argument) { local_short_options[opt_string_len++] = ':'; } local_short_options[opt_string_len] = 0; } } crm_short_options = local_short_options; crm_trace("Generated short option string: '%s'", local_short_options); } if (long_options) { crm_long_options = long_options; } if (app_desc) { crm_app_description = app_desc; } if (app_usage) { crm_app_usage = app_usage; } } int crm_get_option(int argc, char **argv, int *index) { return crm_get_option_long(argc, argv, index, NULL); } int crm_get_option_long(int argc, char **argv, int *index, const char **longname) { #ifdef HAVE_GETOPT_H static struct option *long_opts = NULL; if (long_opts == NULL && crm_long_options) { long_opts = crm_create_long_opts(crm_long_options); } if (long_opts) { int flag = getopt_long(argc, argv, crm_short_options, long_opts, index); switch (flag) { case 0: if (long_opts[*index].val) { return long_opts[*index].val; } else if (longname) { *longname = long_opts[*index].name; } else { crm_notice("Unhandled option --%s", long_opts[*index].name); return flag; } case -1: /* End of option processing */ break; case ':': crm_trace("Missing argument"); crm_help('?', 1); break; case '?': crm_help('?', *index ? 0 : 1); break; } return flag; } #endif if (crm_short_options) { return getopt(argc, argv, crm_short_options); } return -1; } void crm_help(char cmd, int exit_code) { int i = 0; FILE *stream = (exit_code ? stderr : stdout); if (cmd == 'v' || cmd == '$') { fprintf(stream, "Pacemaker %s\n", VERSION); fprintf(stream, "Written by Andrew Beekhof\n"); goto out; } if (cmd == '!') { fprintf(stream, "Pacemaker %s (Build: %s): %s\n", VERSION, BUILD_VERSION, CRM_FEATURES); goto out; } fprintf(stream, "%s - %s\n", crm_system_name, crm_app_description); if (crm_app_usage) { fprintf(stream, "Usage: %s %s\n", crm_system_name, crm_app_usage); } if (crm_long_options) { fprintf(stream, "Options:\n"); for (i = 0; crm_long_options[i].name != NULL; i++) { if (crm_long_options[i].flags & pcmk_option_hidden) { } else if (crm_long_options[i].flags & pcmk_option_paragraph) { fprintf(stream, "%s\n\n", crm_long_options[i].desc); } else if (crm_long_options[i].flags & pcmk_option_example) { fprintf(stream, "\t#%s\n\n", crm_long_options[i].desc); } else if (crm_long_options[i].val == '-' && crm_long_options[i].desc) { fprintf(stream, "%s\n", crm_long_options[i].desc); } else { /* is val printable as char ? */ if (crm_long_options[i].val && crm_long_options[i].val <= UCHAR_MAX) { fprintf(stream, " -%c,", crm_long_options[i].val); } else { fputs(" ", stream); } fprintf(stream, " --%s%s\t%s\n", crm_long_options[i].name, crm_long_options[i].has_arg == optional_argument ? "[=value]" : crm_long_options[i].has_arg == required_argument ? "=value" : "", crm_long_options[i].desc ? crm_long_options[i].desc : ""); } } } else if (crm_short_options) { fprintf(stream, "Usage: %s - %s\n", crm_system_name, crm_app_description); for (i = 0; crm_short_options[i] != 0; i++) { int has_arg = no_argument /* 0 */; if (crm_short_options[i + 1] == ':') { if (crm_short_options[i + 2] == ':') has_arg = optional_argument /* 2 */; else has_arg = required_argument /* 1 */; } fprintf(stream, " -%c %s\n", crm_short_options[i], has_arg == optional_argument ? "[value]" : has_arg == required_argument ? "{value}" : ""); i += has_arg; } } fprintf(stream, "\nReport bugs to %s\n", PACKAGE_BUGREPORT); out: if (exit_code >= 0) { crm_exit(exit_code); } } void cib_ipc_servers_init(qb_ipcs_service_t **ipcs_ro, qb_ipcs_service_t **ipcs_rw, qb_ipcs_service_t **ipcs_shm, struct qb_ipcs_service_handlers *ro_cb, struct qb_ipcs_service_handlers *rw_cb) { *ipcs_ro = mainloop_add_ipc_server(cib_channel_ro, QB_IPC_NATIVE, ro_cb); *ipcs_rw = mainloop_add_ipc_server(cib_channel_rw, QB_IPC_NATIVE, rw_cb); *ipcs_shm = mainloop_add_ipc_server(cib_channel_shm, QB_IPC_SHM, rw_cb); if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) { crm_err("Failed to create cib servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void cib_ipc_servers_destroy(qb_ipcs_service_t *ipcs_ro, qb_ipcs_service_t *ipcs_rw, qb_ipcs_service_t *ipcs_shm) { qb_ipcs_destroy(ipcs_ro); qb_ipcs_destroy(ipcs_rw); qb_ipcs_destroy(ipcs_shm); } qb_ipcs_service_t * crmd_ipc_server_init(struct qb_ipcs_service_handlers *cb) { return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb); } void attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server(T_ATTRD, QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Failed to create attrd servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Failed to create stonith-ng servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } int attrd_update_delegate(crm_ipc_t * ipc, char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen, const char *user_name, gboolean is_remote) { int rc = -ENOTCONN; int max = 5; enum crm_ipc_flags flags = crm_ipc_flags_none; xmlNode *update = create_xml_node(NULL, __FUNCTION__); static gboolean connected = TRUE; static crm_ipc_t *local_ipc = NULL; if (ipc == NULL && local_ipc == NULL) { local_ipc = crm_ipc_new(T_ATTRD, 0); flags |= crm_ipc_client_response; connected = FALSE; } if (ipc == NULL) { ipc = local_ipc; } /* remap common aliases */ if (safe_str_eq(section, "reboot")) { section = XML_CIB_TAG_STATUS; } else if (safe_str_eq(section, "forever")) { section = XML_CIB_TAG_NODES; } crm_xml_add(update, F_TYPE, T_ATTRD); crm_xml_add(update, F_ORIG, crm_system_name); if (name == NULL && command == 'U') { command = 'R'; } switch (command) { case 'D': case 'U': case 'v': crm_xml_add(update, F_ATTRD_TASK, "update"); crm_xml_add(update, F_ATTRD_ATTRIBUTE, name); break; case 'R': crm_xml_add(update, F_ATTRD_TASK, "refresh"); break; case 'q': crm_xml_add(update, F_ATTRD_TASK, "query"); break; case 'C': crm_xml_add(update, F_ATTRD_TASK, "peer-remove"); break; } crm_xml_add(update, F_ATTRD_VALUE, value); crm_xml_add(update, F_ATTRD_DAMPEN, dampen); crm_xml_add(update, F_ATTRD_SECTION, section); crm_xml_add(update, F_ATTRD_HOST, host); crm_xml_add(update, F_ATTRD_SET, set); crm_xml_add_int(update, F_ATTRD_IS_REMOTE, is_remote); #if ENABLE_ACL if (user_name) { crm_xml_add(update, F_ATTRD_USER, user_name); } #endif while (max > 0) { if (connected == FALSE) { crm_info("Connecting to cluster... %d retries remaining", max); connected = crm_ipc_connect(ipc); } if (connected) { rc = crm_ipc_send(ipc, update, flags, 0, NULL); } if (ipc != local_ipc) { break; } else if (rc > 0) { break; } else if (rc == -EAGAIN || rc == -EALREADY) { sleep(5 - max); max--; } else { crm_ipc_close(ipc); connected = FALSE; sleep(5 - max); max--; } } free_xml(update); if (rc > 0) { crm_debug("Sent update: %s=%s for %s", name, value, host ? host : "localhost"); rc = pcmk_ok; } else { crm_debug("Could not send update %s=%s for %s: %s (%d)", name, value, host ? host : "localhost", pcmk_strerror(rc), rc); } return rc; } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" static void append_digest(lrmd_event_data_t * op, xmlNode * update, const char *version, const char *magic, int level) { /* this will enable us to later determine that the * resource's parameters have changed and we should force * a restart */ char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); filter_action_parameters(args_xml, version); digest = calculate_operation_digest(args_xml, version); #if 0 if (level < get_crm_log_level() && op->interval == 0 && crm_str_eq(op->op_type, CRMD_ACTION_START, TRUE)) { char *digest_source = dump_xml_unformatted(args_xml); do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n", digest, ID(update), magic, digest_source); free(digest_source); } #endif crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } int rsc_op_expected_rc(lrmd_event_data_t * op) { int rc = 0; if (op && op->user_data) { int dummy = 0; char *uuid = NULL; decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &rc); free(uuid); } return rc; } gboolean did_rsc_op_fail(lrmd_event_data_t * op, int target_rc) { switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: case PCMK_LRM_OP_PENDING: return FALSE; break; case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_ERROR: return TRUE; break; default: if (target_rc != op->rc) { return TRUE; } } return FALSE; } xmlNode * create_operation_update(xmlNode * parent, lrmd_event_data_t * op, const char *caller_version, int target_rc, const char *origin, int level) { char *key = NULL; char *magic = NULL; char *op_id = NULL; char *local_user_data = NULL; xmlNode *xml_op = NULL; const char *task = NULL; gboolean dc_munges_migrate_ops = (compare_version(caller_version, "3.0.3") < 0); gboolean dc_needs_unique_ops = (compare_version(caller_version, "3.0.6") < 0); CRM_CHECK(op != NULL, return NULL); do_crm_log(level, "%s: Updating resource %s after %s op %s (interval=%d)", origin, op->rsc_id, op->op_type, services_lrm_status_str(op->op_status), op->interval); crm_trace("DC version: %s", caller_version); task = op->op_type; /* remap the task name under various scenarios * this makes life easier for the PE when its trying determin the current state */ if (crm_str_eq(task, "reload", TRUE)) { if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } else if (dc_munges_migrate_ops && crm_str_eq(task, CRMD_ACTION_MIGRATE, TRUE)) { /* if the migrate_from fails it will have enough info to do the right thing */ if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_STOP; } else { task = CRMD_ACTION_STATUS; } } else if (dc_munges_migrate_ops && op->op_status == PCMK_LRM_OP_DONE && crm_str_eq(task, CRMD_ACTION_MIGRATED, TRUE)) { task = CRMD_ACTION_START; } key = generate_op_key(op->rsc_id, task, op->interval); if (dc_needs_unique_ops && op->interval > 0) { op_id = strdup(key); } else if (crm_str_eq(task, CRMD_ACTION_NOTIFY, TRUE)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = generate_notify_key(op->rsc_id, n_type, n_task); /* these are not yet allowed to fail */ op->op_status = PCMK_LRM_OP_DONE; op->rc = 0; } else if (did_rsc_op_fail(op, target_rc)) { op_id = generate_op_key(op->rsc_id, "last_failure", 0); } else if (op->interval > 0) { op_id = strdup(key); } else { op_id = generate_op_key(op->rsc_id, "last", 0); } xml_op = find_entity(parent, XML_LRM_TAG_RSC_OP, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for:" " %s_%s_%d %d from %s", op->rsc_id, op->op_type, op->interval, op->call_id, origin); local_user_data = generate_transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } magic = generate_transition_magic(op->user_data, op->op_status, op->rc); crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_int(xml_op, XML_LRM_ATTR_INTERVAL, op->interval); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (%s_%s_%d): last=%lu change=%lu exec=%lu queue=%lu", op->rsc_id, op->op_type, op->interval, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if (op->interval == 0) { /* The values are the same for non-recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_RUN, op->t_run); crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } else if(op->t_rcchange) { /* last-run is not accurate for recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_rcchange); } else { /* ...but is better than nothing otherwise */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (crm_str_eq(op->op_type, CRMD_ACTION_MIGRATE, TRUE) || crm_str_eq(op->op_type, CRMD_ACTION_MIGRATED, TRUE)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } append_digest(op, xml_op, caller_version, magic, LOG_DEBUG); if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } bool pcmk_acl_required(const char *user) { #if ENABLE_ACL if(user == NULL || strlen(user) == 0) { crm_trace("no user set"); return FALSE; } else if (strcmp(user, CRM_DAEMON_USER) == 0) { return FALSE; } else if (strcmp(user, "root") == 0) { return FALSE; } crm_trace("acls required for %s", user); return TRUE; #else crm_trace("acls not supported"); return FALSE; #endif } #if ENABLE_ACL char * uid2username(uid_t uid) { struct passwd *pwent = getpwuid(uid); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get password entry of uid: %d", uid); return NULL; } else { return strdup(pwent->pw_name); } } const char * crm_acl_get_set_user(xmlNode * request, const char *field, const char *peer_user) { /* field is only checked for backwards compatibility */ static const char *effective_user = NULL; const char *requested_user = NULL; const char *user = NULL; if(effective_user == NULL) { effective_user = uid2username(geteuid()); } requested_user = crm_element_value(request, XML_ACL_TAG_USER); if(requested_user == NULL) { requested_user = crm_element_value(request, field); } if (is_privileged(effective_user) == FALSE) { /* We're not running as a privileged user, set or overwrite any existing value for $XML_ACL_TAG_USER */ user = effective_user; } else if(peer_user == NULL && requested_user == NULL) { /* No user known or requested, use 'effective_user' and make sure one is set for the request */ user = effective_user; } else if(peer_user == NULL) { /* No user known, trusting 'requested_user' */ user = requested_user; } else if (is_privileged(peer_user) == FALSE) { /* The peer is not a privileged user, set or overwrite any existing value for $XML_ACL_TAG_USER */ user = peer_user; } else if (requested_user == NULL) { /* Even if we're privileged, make sure there is always a value set */ user = peer_user; } else { /* Legal delegation to 'requested_user' */ user = requested_user; } /* Yes, pointer comparision */ if(user != crm_element_value(request, XML_ACL_TAG_USER)) { crm_xml_add(request, XML_ACL_TAG_USER, user); } if(field != NULL && user != crm_element_value(request, field)) { crm_xml_add(request, field, user); } return requested_user; } void determine_request_user(const char *user, xmlNode * request, const char *field) { /* Get our internal validation out of the way first */ CRM_CHECK(user != NULL && request != NULL && field != NULL, return); /* If our peer is a privileged user, we might be doing something on behalf of someone else */ if (is_privileged(user) == FALSE) { /* We're not a privileged user, set or overwrite any existing value for $field */ crm_xml_replace(request, field, user); } else if (crm_element_value(request, field) == NULL) { /* Even if we're privileged, make sure there is always a value set */ crm_xml_replace(request, field, user); /* } else { Legal delegation */ } crm_trace("Processing msg as user '%s'", crm_element_value(request, field)); } #endif /* * This re-implements g_str_hash as it was prior to glib2-2.28: * * http://git.gnome.org/browse/glib/commit/?id=354d655ba8a54b754cb5a3efb42767327775696c * * Note that the new g_str_hash is presumably a *better* hash (it's actually * a correct implementation of DJB's hash), but we need to preserve existing * behaviour, because the hash key ultimately determines the "sort" order * when iterating through GHashTables, which affects allocation of scores to * clone instances when iterating through rsc->allowed_nodes. It (somehow) * also appears to have some minor impact on the ordering of a few * pseudo_event IDs in the transition graph. */ guint g_str_hash_traditional(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + *p; return h; } +guint +crm_strcase_hash(gconstpointer v) +{ + const signed char *p; + guint32 h = 0; + + for (p = v; *p != '\0'; p++) + h = (h << 5) - h + g_ascii_tolower(*p); + + return h; +} + void * find_library_function(void **handle, const char *lib, const char *fn, gboolean fatal) { char *error; void *a_function; if (*handle == NULL) { *handle = dlopen(lib, RTLD_LAZY); } if (!(*handle)) { crm_err("%sCould not open %s: %s", fatal ? "Fatal: " : "", lib, dlerror()); if (fatal) { crm_exit(DAEMON_RESPAWN_STOP); } return NULL; } a_function = dlsym(*handle, fn); if ((error = dlerror()) != NULL) { crm_err("%sCould not find %s in %s: %s", fatal ? "Fatal: " : "", fn, lib, error); if (fatal) { crm_exit(DAEMON_RESPAWN_STOP); } } return a_function; } char * add_list_element(char *list, const char *value) { int len = 0; int last = 0; if (value == NULL) { return list; } if (list) { last = strlen(list); } len = last + 2; /* +1 space, +1 EOS */ len += strlen(value); list = realloc(list, len); sprintf(list + last, " %s", value); return list; } void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } #ifdef HAVE_UUID_UUID_H # include #endif char * crm_generate_uuid(void) { unsigned char uuid[16]; char *buffer = malloc(37); /* Including NUL byte */ uuid_generate(uuid); uuid_unparse(uuid, buffer); return buffer; } #include char * crm_md5sum(const char *buffer) { int lpc = 0, len = 0; char *digest = NULL; unsigned char raw_digest[MD5_DIGEST_SIZE]; if(buffer != NULL) { len = strlen(buffer); } crm_trace("Beginning digest of %d bytes", len); digest = malloc(2 * MD5_DIGEST_SIZE + 1); if(digest) { md5_buffer(buffer, len, raw_digest); for (lpc = 0; lpc < MD5_DIGEST_SIZE; lpc++) { sprintf(digest + (2 * lpc), "%02x", raw_digest[lpc]); } digest[(2 * MD5_DIGEST_SIZE)] = 0; crm_trace("Digest %s.", digest); } else { crm_err("Could not create digest"); } return digest; } #include #include bool crm_compress_string(const char *data, int length, int max, char **result, unsigned int *result_len) { int rc; char *compressed = NULL; char *uncompressed = strdup(data); struct timespec after_t; struct timespec before_t; if(max == 0) { max = (length * 1.1) + 600; /* recomended size */ } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &before_t); #endif /* coverity[returned_null] Ignore */ compressed = malloc(max); *result_len = max; rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length, CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK); free(uncompressed); if (rc != BZ_OK) { crm_err("Compression of %d bytes failed: %s (%d)", length, bz2_strerror(rc), rc); free(compressed); return FALSE; } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &after_t); crm_info("Compressed %d bytes into %d (ratio %d:1) in %dms", length, *result_len, length / (*result_len), (after_t.tv_sec - before_t.tv_sec) * 1000 + (after_t.tv_nsec - before_t.tv_nsec) / 1000000); #else crm_info("Compressed %d bytes into %d (ratio %d:1)", length, *result_len, length / (*result_len)); #endif *result = compressed; return TRUE; } diff --git a/lib/services/services.c b/lib/services/services.c index b9b30f56f9..3fbd374abb 100644 --- a/lib/services/services.c +++ b/lib/services/services.c @@ -1,641 +1,639 @@ /* * Copyright (C) 2010 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include "services_private.h" #if SUPPORT_UPSTART # include #endif #if SUPPORT_SYSTEMD # include #endif /* TODO: Develop a rollover strategy */ static int operations = 0; GHashTable *recurring_actions = NULL; svc_action_t * services_action_create(const char *name, const char *action, int interval, int timeout) { return resources_action_create(name, "lsb", NULL, name, action, interval, timeout, NULL); } const char * resources_find_service_class(const char *agent) { /* Priority is: * - lsb * - systemd * - upstart */ int rc = 0; struct stat st; char *path = NULL; #ifdef LSB_ROOT_DIR rc = asprintf(&path, "%s/%s", LSB_ROOT_DIR, agent); if (rc > 0 && stat(path, &st) == 0) { free(path); return "lsb"; } free(path); #endif #if SUPPORT_SYSTEMD if (systemd_unit_exists(agent)) { return "systemd"; } #endif #if SUPPORT_UPSTART if (upstart_job_exists(agent)) { return "upstart"; } #endif return NULL; } svc_action_t * resources_action_create(const char *name, const char *standard, const char *provider, const char *agent, const char *action, int interval, int timeout, GHashTable * params) { svc_action_t *op = NULL; /* * Do some up front sanity checks before we go off and * build the svc_action_t instance. */ if (crm_strlen_zero(name)) { crm_err("A service or resource action must have a name."); goto return_error; } if (crm_strlen_zero(standard)) { crm_err("A service action must have a valid standard."); goto return_error; } if (!strcasecmp(standard, "ocf") && crm_strlen_zero(provider)) { crm_err("An OCF resource action must have a provider."); goto return_error; } if (crm_strlen_zero(agent)) { crm_err("A service or resource action must have an agent."); goto return_error; } if (crm_strlen_zero(action)) { crm_err("A service or resource action must specify an action."); goto return_error; } if (safe_str_eq(action, "monitor") && (safe_str_eq(standard, "lsb") || safe_str_eq(standard, "service"))) { action = "status"; } /* * Sanity checks passed, proceed! */ op = calloc(1, sizeof(svc_action_t)); op->opaque = calloc(1, sizeof(svc_action_private_t)); op->rsc = strdup(name); op->action = strdup(action); op->interval = interval; op->timeout = timeout; op->standard = strdup(standard); op->agent = strdup(agent); op->sequence = ++operations; if (asprintf(&op->id, "%s_%s_%d", name, action, interval) == -1) { goto return_error; } if (strcasecmp(op->standard, "service") == 0) { const char *expanded = resources_find_service_class(op->agent); if(expanded) { crm_debug("Found a %s agent for %s/%s", expanded, op->rsc, op->agent); free(op->standard); op->standard = strdup(expanded); } else { crm_info("Cannot determine the standard for %s (%s)", op->rsc, op->agent); free(op->standard); op->standard = strdup("lsb"); } } if (strcasecmp(op->standard, "ocf") == 0) { op->provider = strdup(provider); op->params = params; params = NULL; if (asprintf(&op->opaque->exec, "%s/resource.d/%s/%s", OCF_ROOT_DIR, provider, agent) == -1) { crm_err("Internal error: cannot create agent path"); goto return_error; } op->opaque->args[0] = strdup(op->opaque->exec); op->opaque->args[1] = strdup(action); } else if (strcasecmp(op->standard, "lsb") == 0) { if (op->agent[0] == '/') { /* if given an absolute path, use that instead * of tacking on the LSB_ROOT_DIR path to the front */ op->opaque->exec = strdup(op->agent); } else if (asprintf(&op->opaque->exec, "%s/%s", LSB_ROOT_DIR, op->agent) == -1) { crm_err("Internal error: cannot create agent path"); goto return_error; } op->opaque->args[0] = strdup(op->opaque->exec); op->opaque->args[1] = strdup(op->action); op->opaque->args[2] = NULL; #if SUPPORT_SYSTEMD } else if (strcasecmp(op->standard, "systemd") == 0) { op->opaque->exec = strdup("systemd-dbus"); #endif #if SUPPORT_UPSTART } else if (strcasecmp(op->standard, "upstart") == 0) { op->opaque->exec = strdup("upstart-dbus"); #endif } else if (strcasecmp(op->standard, "service") == 0) { op->opaque->exec = strdup(SERVICE_SCRIPT); op->opaque->args[0] = strdup(SERVICE_SCRIPT); op->opaque->args[1] = strdup(agent); op->opaque->args[2] = strdup(action); #if SUPPORT_NAGIOS } else if (strcasecmp(op->standard, "nagios") == 0) { int index = 0; if (op->agent[0] == '/') { /* if given an absolute path, use that instead * of tacking on the NAGIOS_PLUGIN_DIR path to the front */ op->opaque->exec = strdup(op->agent); } else if (asprintf(&op->opaque->exec, "%s/%s", NAGIOS_PLUGIN_DIR, op->agent) == -1) { crm_err("Internal error: cannot create agent path"); goto return_error; } op->opaque->args[0] = strdup(op->opaque->exec); index = 1; if (safe_str_eq(op->action, "monitor") && op->interval == 0) { /* Invoke --version for a nagios probe */ op->opaque->args[index] = strdup("--version"); index++; } else if (params) { GHashTableIter iter; char *key = NULL; char *value = NULL; static int args_size = sizeof(op->opaque->args) / sizeof(char *); g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value) && index <= args_size - 3) { int len = 3; char *long_opt = NULL; if (safe_str_eq(key, XML_ATTR_CRM_VERSION) || strstr(key, CRM_META "_")) { continue; } len += strlen(key); long_opt = calloc(1, len); sprintf(long_opt, "--%s", key); long_opt[len - 1] = 0; op->opaque->args[index] = long_opt; op->opaque->args[index + 1] = strdup(value); index += 2; } } op->opaque->args[index] = NULL; #endif } else { crm_err("Unknown resource standard: %s", op->standard); services_action_free(op); op = NULL; } if(params) { g_hash_table_destroy(params); } return op; return_error: if(params) { g_hash_table_destroy(params); } services_action_free(op); return NULL; } svc_action_t * services_action_create_generic(const char *exec, const char *args[]) { svc_action_t *op; unsigned int cur_arg; op = calloc(1, sizeof(*op)); op->opaque = calloc(1, sizeof(svc_action_private_t)); op->opaque->exec = strdup(exec); op->opaque->args[0] = strdup(exec); for (cur_arg = 1; args && args[cur_arg - 1]; cur_arg++) { op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]); if (cur_arg == DIMOF(op->opaque->args) - 1) { crm_err("svc_action_t args list not long enough for '%s' execution request.", exec); break; } } return op; } void services_action_free(svc_action_t * op) { unsigned int i; if (op == NULL) { return; } if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); } if (op->opaque->stderr_gsource) { mainloop_del_fd(op->opaque->stderr_gsource); op->opaque->stderr_gsource = NULL; } if (op->opaque->stdout_gsource) { mainloop_del_fd(op->opaque->stdout_gsource); op->opaque->stdout_gsource = NULL; } free(op->id); free(op->opaque->exec); for (i = 0; i < DIMOF(op->opaque->args); i++) { free(op->opaque->args[i]); } free(op->opaque); free(op->rsc); free(op->action); free(op->standard); free(op->agent); free(op->provider); free(op->stdout_data); free(op->stderr_data); if (op->params) { g_hash_table_destroy(op->params); op->params = NULL; } free(op); } gboolean cancel_recurring_action(svc_action_t * op) { crm_info("Cancelling operation %s", op->id); if (recurring_actions) { g_hash_table_remove(recurring_actions, op->id); } if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); } return TRUE; } gboolean services_action_cancel(const char *name, const char *action, int interval) { svc_action_t *op = NULL; char id[512]; snprintf(id, sizeof(id), "%s_%s_%d", name, action, interval); if (!(op = g_hash_table_lookup(recurring_actions, id))) { return FALSE; } if (op->pid == 0) { cancel_recurring_action(op); op->status = PCMK_LRM_OP_CANCELLED; if (op->opaque->callback) { op->opaque->callback(op); } services_action_free(op); } else { - int rc; crm_info("Cancelling in-flight op: performing early termination of %s", id); op->cancel = 1; - rc = mainloop_child_kill(op->pid); - if (rc != 0 ) { + if (mainloop_child_kill(op->pid) == FALSE) { /* even though the early termination failed, * the op will be marked as cancelled once it completes. */ crm_err("Termination of %s failed", id); } } return TRUE; } gboolean services_action_kick(const char *name, const char *action, int interval /* ms */) { svc_action_t * op = NULL; char *id = NULL; if (asprintf(&id, "%s_%s_%d", name, action, interval) == -1) { return FALSE; } op = g_hash_table_lookup(recurring_actions, id); free(id); if (op == NULL) { return FALSE; } if (op->pid) { return TRUE; } else { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); } recurring_action_timer(op); return TRUE; } } /* add new recurring operation, check for duplicates. * - if duplicate found, return TRUE, immediately reschedule op. * - if no dup, return FALSE, inserve into recurring op list.*/ static gboolean handle_duplicate_recurring(svc_action_t * op, void (*action_callback) (svc_action_t *)) { svc_action_t * dup = NULL; if (recurring_actions == NULL) { recurring_actions = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, NULL); return FALSE; } /* check for duplicates */ dup = g_hash_table_lookup(recurring_actions, op->id); if (dup && (dup != op)) { /* update user data */ if (op->opaque->callback) { dup->opaque->callback = op->opaque->callback; dup->cb_data = op->cb_data; op->cb_data = NULL; } /* immediately execute the next interval */ if (dup->pid != 0) { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); } recurring_action_timer(dup); } /* free the dup. */ services_action_free(op); return TRUE; } return FALSE; } gboolean services_action_async(svc_action_t * op, void (*action_callback) (svc_action_t *)) { if (action_callback) { op->opaque->callback = action_callback; } if (op->interval > 0) { if (handle_duplicate_recurring(op, action_callback) == TRUE) { /* entry rescheduled, dup freed */ return TRUE; } g_hash_table_replace(recurring_actions, op->id, op); } if (op->standard && strcasecmp(op->standard, "upstart") == 0) { #if SUPPORT_UPSTART return upstart_job_exec(op, FALSE); #endif } if (op->standard && strcasecmp(op->standard, "systemd") == 0) { #if SUPPORT_SYSTEMD return systemd_unit_exec(op, FALSE); #endif } return services_os_action_execute(op, FALSE); } gboolean services_action_sync(svc_action_t * op) { gboolean rc = TRUE; if (op == NULL) { crm_trace("No operation to execute"); return FALSE; } else if (op->standard && strcasecmp(op->standard, "upstart") == 0) { #if SUPPORT_UPSTART rc = upstart_job_exec(op, TRUE); #endif } else if (op->standard && strcasecmp(op->standard, "systemd") == 0) { #if SUPPORT_SYSTEMD rc = systemd_unit_exec(op, TRUE); #endif } else { rc = services_os_action_execute(op, TRUE); } crm_trace(" > %s_%s_%d: %s = %d", op->rsc, op->action, op->interval, op->opaque->exec, op->rc); if (op->stdout_data) { crm_trace(" > stdout: %s", op->stdout_data); } if (op->stderr_data) { crm_trace(" > stderr: %s", op->stderr_data); } return rc; } GList * get_directory_list(const char *root, gboolean files, gboolean executable) { return services_os_get_directory_list(root, files, executable); } GList * services_list(void) { return resources_list_agents("lsb", NULL); } GList * resources_list_standards(void) { GList *standards = NULL; GList *agents = NULL; standards = g_list_append(standards, strdup("ocf")); standards = g_list_append(standards, strdup("lsb")); standards = g_list_append(standards, strdup("service")); #if SUPPORT_SYSTEMD agents = systemd_unit_listall(); #else agents = NULL; #endif if (agents) { standards = g_list_append(standards, strdup("systemd")); g_list_free_full(agents, free); } #if SUPPORT_UPSTART agents = upstart_job_listall(); #else agents = NULL; #endif if (agents) { standards = g_list_append(standards, strdup("upstart")); g_list_free_full(agents, free); } #if SUPPORT_NAGIOS agents = resources_os_list_nagios_agents(); if (agents) { standards = g_list_append(standards, strdup("nagios")); g_list_free_full(agents, free); } #endif return standards; } GList * resources_list_providers(const char *standard) { if (strcasecmp(standard, "ocf") == 0) { return resources_os_list_ocf_providers(); } return NULL; } GList * resources_list_agents(const char *standard, const char *provider) { if (standard == NULL || strcasecmp(standard, "service") == 0) { GList *tmp1; GList *tmp2; GList *result = resources_os_list_lsb_agents(); if (standard == NULL) { tmp1 = result; tmp2 = resources_os_list_ocf_agents(NULL); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } #if SUPPORT_SYSTEMD tmp1 = result; tmp2 = systemd_unit_listall(); if (tmp2) { result = g_list_concat(tmp1, tmp2); } #endif #if SUPPORT_UPSTART tmp1 = result; tmp2 = upstart_job_listall(); if (tmp2) { result = g_list_concat(tmp1, tmp2); } #endif return result; } else if (strcasecmp(standard, "ocf") == 0) { return resources_os_list_ocf_agents(provider); } else if (strcasecmp(standard, "lsb") == 0) { return resources_os_list_lsb_agents(); #if SUPPORT_SYSTEMD } else if (strcasecmp(standard, "systemd") == 0) { return systemd_unit_listall(); #endif #if SUPPORT_UPSTART } else if (strcasecmp(standard, "upstart") == 0) { return upstart_job_listall(); #endif #if SUPPORT_NAGIOS } else if (strcasecmp(standard, "nagios") == 0) { return resources_os_list_nagios_agents(); #endif } return NULL; } diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c index b5f680b153..419e7ffb5c 100644 --- a/lib/services/services_linux.c +++ b/lib/services/services_linux.c @@ -1,767 +1,768 @@ /* * Copyright (C) 2010 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_SIGNALFD_H #include #endif #include "crm/crm.h" #include "crm/common/mainloop.h" #include "crm/services.h" #include "services_private.h" #if SUPPORT_CIBSECRETS # include "crm/common/cib_secrets.h" #endif static inline void set_fd_opts(int fd, int opts) { int flag; if ((flag = fcntl(fd, F_GETFL)) >= 0) { if (fcntl(fd, F_SETFL, flag | opts) < 0) { crm_err("fcntl() write failed"); } } else { crm_err("fcntl() read failed"); } } static gboolean read_output(int fd, svc_action_t * op) { char *data = NULL; int rc = 0, len = 0; gboolean is_err = FALSE; char buf[500]; static const size_t buf_read_len = sizeof(buf) - 1; crm_trace("%p", op); if (fd < 0) { return FALSE; } if (fd == op->opaque->stderr_fd) { is_err = TRUE; if (op->stderr_data) { len = strlen(op->stderr_data); data = op->stderr_data; } } else if (op->stdout_data) { len = strlen(op->stdout_data); data = op->stdout_data; } do { rc = read(fd, buf, buf_read_len); if (rc > 0) { buf[rc] = 0; data = realloc(data, len + rc + 1); sprintf(data + len, "%s", buf); len += rc; } else if (errno != EINTR) { /* error or EOF * Cleanup happens in pipe_done() */ rc = FALSE; break; } } while (rc == buf_read_len || rc < 0); if (data != NULL && is_err) { op->stderr_data = data; } else if (data != NULL) { op->stdout_data = data; } return rc; } static int dispatch_stdout(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return read_output(op->opaque->stdout_fd, op); } static int dispatch_stderr(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return read_output(op->opaque->stderr_fd, op); } static void pipe_out_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; crm_trace("%p", op); op->opaque->stdout_gsource = NULL; if (op->opaque->stdout_fd > STDOUT_FILENO) { close(op->opaque->stdout_fd); } op->opaque->stdout_fd = -1; } static void pipe_err_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; op->opaque->stderr_gsource = NULL; if (op->opaque->stderr_fd > STDERR_FILENO) { close(op->opaque->stderr_fd); } op->opaque->stderr_fd = -1; } static struct mainloop_fd_callbacks stdout_callbacks = { .dispatch = dispatch_stdout, .destroy = pipe_out_done, }; static struct mainloop_fd_callbacks stderr_callbacks = { .dispatch = dispatch_stderr, .destroy = pipe_err_done, }; static void set_ocf_env(const char *key, const char *value, gpointer user_data) { if (setenv(key, value, 1) != 0) { crm_perror(LOG_ERR, "setenv failed for key:%s and value:%s", key, value); } } static void set_ocf_env_with_prefix(gpointer key, gpointer value, gpointer user_data) { char buffer[500]; snprintf(buffer, sizeof(buffer), "OCF_RESKEY_%s", (char *)key); set_ocf_env(buffer, value, user_data); } static void add_OCF_env_vars(svc_action_t * op) { if (!op->standard || strcasecmp("ocf", op->standard) != 0) { return; } if (op->params) { g_hash_table_foreach(op->params, set_ocf_env_with_prefix, NULL); } set_ocf_env("OCF_RA_VERSION_MAJOR", "1", NULL); set_ocf_env("OCF_RA_VERSION_MINOR", "0", NULL); set_ocf_env("OCF_ROOT", OCF_ROOT_DIR, NULL); if (op->rsc) { set_ocf_env("OCF_RESOURCE_INSTANCE", op->rsc, NULL); } if (op->agent != NULL) { set_ocf_env("OCF_RESOURCE_TYPE", op->agent, NULL); } /* Notes: this is not added to specification yet. Sept 10,2004 */ if (op->provider != NULL) { set_ocf_env("OCF_RESOURCE_PROVIDER", op->provider, NULL); } } gboolean recurring_action_timer(gpointer data) { svc_action_t *op = data; crm_debug("Scheduling another invokation of %s", op->id); /* Clean out the old result */ free(op->stdout_data); op->stdout_data = NULL; free(op->stderr_data); op->stderr_data = NULL; services_action_async(op, NULL); return FALSE; } /* Returns FALSE if 'op' should be free'd by the caller */ gboolean operation_finalize(svc_action_t * op) { int recurring = 0; if (op->interval) { if (op->cancel) { op->status = PCMK_LRM_OP_CANCELLED; cancel_recurring_action(op); } else { recurring = 1; op->opaque->repeat_timer = g_timeout_add(op->interval, recurring_action_timer, (void *)op); } } if (op->opaque->callback) { op->opaque->callback(op); } op->pid = 0; if (!recurring) { /* * If this is a recurring action, do not free explicitly. * It will get freed whenever the action gets cancelled. */ services_action_free(op); return TRUE; } return FALSE; } static void operation_finished(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { svc_action_t *op = mainloop_child_userdata(p); char *prefix = g_strdup_printf("%s:%d", op->id, op->pid); mainloop_clear_child_userdata(p); op->status = PCMK_LRM_OP_DONE; CRM_ASSERT(op->pid == pid); if (op->opaque->stderr_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ dispatch_stderr(op); } if (op->opaque->stdout_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ dispatch_stdout(op); } if (signo) { if (mainloop_child_timeout(p)) { crm_warn("%s - timed out after %dms", prefix, op->timeout); op->status = PCMK_LRM_OP_TIMEOUT; op->rc = PCMK_OCF_TIMEOUT; } else { - crm_warn("%s - terminated with signal %d", prefix, signo); + do_crm_log_unlikely((op->cancel) ? LOG_INFO : LOG_WARNING, + "%s - terminated with signal %d", prefix, signo); op->status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_SIGNAL; } } else { op->rc = exitcode; crm_debug("%s - exited with rc=%d", prefix, exitcode); } g_free(prefix); prefix = g_strdup_printf("%s:%d:stderr", op->id, op->pid); crm_log_output(LOG_NOTICE, prefix, op->stderr_data); g_free(prefix); prefix = g_strdup_printf("%s:%d:stdout", op->id, op->pid); crm_log_output(LOG_DEBUG, prefix, op->stdout_data); g_free(prefix); operation_finalize(op); } static void services_handle_exec_error(svc_action_t * op, int error) { op->rc = PCMK_OCF_EXEC_ERROR; op->status = PCMK_LRM_OP_ERROR; /* Need to mimic the return codes for each standard as thats what we'll convert back from in get_uniform_rc() */ if (safe_str_eq(op->standard, "lsb") && safe_str_eq(op->action, "status")) { switch (error) { /* see execve(2) */ case ENOENT: /* No such file or directory */ case EISDIR: /* Is a directory */ op->rc = PCMK_LSB_STATUS_NOT_INSTALLED; op->status = PCMK_LRM_OP_NOT_INSTALLED; break; case EACCES: /* permission denied (various errors) */ /* LSB status ops don't support 'not installed' */ break; } #if SUPPORT_NAGIOS } else if (safe_str_eq(op->standard, "nagios")) { switch (error) { case ENOENT: /* No such file or directory */ case EISDIR: /* Is a directory */ op->rc = NAGIOS_NOT_INSTALLED; op->status = PCMK_LRM_OP_NOT_INSTALLED; break; case EACCES: /* permission denied (various errors) */ op->rc = NAGIOS_INSUFFICIENT_PRIV; break; } #endif } else { switch (error) { case ENOENT: /* No such file or directory */ case EISDIR: /* Is a directory */ op->rc = PCMK_OCF_NOT_INSTALLED; /* Valid for LSB */ op->status = PCMK_LRM_OP_NOT_INSTALLED; break; case EACCES: /* permission denied (various errors) */ op->rc = PCMK_OCF_INSUFFICIENT_PRIV; /* Valid for LSB */ break; } } } /* Returns FALSE if 'op' should be free'd by the caller */ gboolean services_os_action_execute(svc_action_t * op, gboolean synchronous) { int lpc; int stdout_fd[2]; int stderr_fd[2]; sigset_t mask; sigset_t old_mask; struct stat st; if (pipe(stdout_fd) < 0) { crm_err("pipe() failed"); } if (pipe(stderr_fd) < 0) { crm_err("pipe() failed"); } /* Fail fast */ if(stat(op->opaque->exec, &st) != 0) { int rc = errno; crm_warn("Cannot execute '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!synchronous) { return operation_finalize(op); } return FALSE; } if (synchronous) { sigemptyset(&mask); sigaddset(&mask, SIGCHLD); sigemptyset(&old_mask); if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { crm_perror(LOG_ERR, "sigprocmask() failed"); } } op->pid = fork(); switch (op->pid) { case -1: { int rc = errno; close(stdout_fd[0]); close(stdout_fd[1]); close(stderr_fd[0]); close(stderr_fd[1]); crm_err("Could not execute '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!synchronous) { return operation_finalize(op); } return FALSE; } case 0: /* Child */ #if defined(HAVE_SCHED_SETSCHEDULER) if (sched_getscheduler(0) != SCHED_OTHER) { struct sched_param sp; memset(&sp, 0, sizeof(sp)); sp.sched_priority = 0; if (sched_setscheduler(0, SCHED_OTHER, &sp) == -1) { crm_perror(LOG_ERR, "Could not reset scheduling policy to SCHED_OTHER for %s", op->id); } } #endif if (setpriority(PRIO_PROCESS, 0, 0) == -1) { crm_perror(LOG_ERR, "Could not reset process priority to 0 for %s", op->id); } /* Man: The call setpgrp() is equivalent to setpgid(0,0) * _and_ compiles on BSD variants too * need to investigate if it works the same too. */ setpgid(0, 0); close(stdout_fd[0]); close(stderr_fd[0]); if (STDOUT_FILENO != stdout_fd[1]) { if (dup2(stdout_fd[1], STDOUT_FILENO) != STDOUT_FILENO) { crm_err("dup2() failed (stdout)"); } close(stdout_fd[1]); } if (STDERR_FILENO != stderr_fd[1]) { if (dup2(stderr_fd[1], STDERR_FILENO) != STDERR_FILENO) { crm_err("dup2() failed (stderr)"); } close(stderr_fd[1]); } /* close all descriptors except stdin/out/err and channels to logd */ for (lpc = getdtablesize() - 1; lpc > STDERR_FILENO; lpc--) { close(lpc); } #if SUPPORT_CIBSECRETS if (replace_secret_params(op->rsc, op->params) < 0) { /* replacing secrets failed! */ if (safe_str_eq(op->action,"stop")) { /* don't fail on stop! */ crm_info("proceeding with the stop operation for %s", op->rsc); } else { crm_err("failed to get secrets for %s, " "considering resource not configured", op->rsc); _exit(PCMK_OCF_NOT_CONFIGURED); } } #endif /* Setup environment correctly */ add_OCF_env_vars(op); /* execute the RA */ execvp(op->opaque->exec, op->opaque->args); /* Most cases should have been already handled by stat() */ services_handle_exec_error(op, errno); _exit(op->rc); } /* Only the parent reaches here */ close(stdout_fd[1]); close(stderr_fd[1]); op->opaque->stdout_fd = stdout_fd[0]; set_fd_opts(op->opaque->stdout_fd, O_NONBLOCK); op->opaque->stderr_fd = stderr_fd[0]; set_fd_opts(op->opaque->stderr_fd, O_NONBLOCK); if (synchronous) { #ifndef HAVE_SYS_SIGNALFD_H CRM_ASSERT(FALSE); #else int status = 0; int timeout = op->timeout; int sfd = -1; time_t start = -1; struct pollfd fds[3]; int wait_rc = 0; sfd = signalfd(-1, &mask, SFD_NONBLOCK); if (sfd < 0) { crm_perror(LOG_ERR, "signalfd() failed"); } fds[0].fd = op->opaque->stdout_fd; fds[0].events = POLLIN; fds[0].revents = 0; fds[1].fd = op->opaque->stderr_fd; fds[1].events = POLLIN; fds[1].revents = 0; fds[2].fd = sfd; fds[2].events = POLLIN; fds[2].revents = 0; crm_trace("Waiting for %d", op->pid); start = time(NULL); do { int poll_rc = poll(fds, 3, timeout); if (poll_rc > 0) { if (fds[0].revents & POLLIN) { read_output(op->opaque->stdout_fd, op); } if (fds[1].revents & POLLIN) { read_output(op->opaque->stderr_fd, op); } if (fds[2].revents & POLLIN) { struct signalfd_siginfo fdsi; ssize_t s; s = read(sfd, &fdsi, sizeof(struct signalfd_siginfo)); if (s != sizeof(struct signalfd_siginfo)) { crm_perror(LOG_ERR, "Read from signal fd %d failed", sfd); } else if (fdsi.ssi_signo == SIGCHLD) { wait_rc = waitpid(op->pid, &status, WNOHANG); if (wait_rc < 0){ crm_perror(LOG_ERR, "waitpid() for %d failed", op->pid); } else if (wait_rc > 0) { break; } } } } else if (poll_rc == 0) { timeout = 0; break; } else if (poll_rc < 0) { if (errno != EINTR) { crm_perror(LOG_ERR, "poll() failed"); break; } } timeout = op->timeout - (time(NULL) - start) * 1000; } while ((op->timeout < 0 || timeout > 0)); crm_trace("Child done: %d", op->pid); if (wait_rc <= 0) { int killrc = kill(op->pid, SIGKILL); op->rc = PCMK_OCF_UNKNOWN_ERROR; if (op->timeout > 0 && timeout <= 0) { op->status = PCMK_LRM_OP_TIMEOUT; crm_warn("%s:%d - timed out after %dms", op->id, op->pid, op->timeout); } else { op->status = PCMK_LRM_OP_ERROR; } if (killrc && errno != ESRCH) { crm_err("kill(%d, KILL) failed: %d", op->pid, errno); } /* * From sigprocmask(2): * It is not possible to block SIGKILL or SIGSTOP. Attempts to do so are silently ignored. * * This makes it safe to skip WNOHANG here */ waitpid(op->pid, &status, 0); } else if (WIFEXITED(status)) { op->status = PCMK_LRM_OP_DONE; op->rc = WEXITSTATUS(status); crm_info("Managed %s process %d exited with rc=%d", op->id, op->pid, op->rc); } else if (WIFSIGNALED(status)) { int signo = WTERMSIG(status); op->status = PCMK_LRM_OP_ERROR; crm_err("Managed %s process %d exited with signal=%d", op->id, op->pid, signo); } #ifdef WCOREDUMP if (WCOREDUMP(status)) { crm_err("Managed %s process %d dumped core", op->id, op->pid); } #endif read_output(op->opaque->stdout_fd, op); read_output(op->opaque->stderr_fd, op); close(op->opaque->stdout_fd); close(op->opaque->stderr_fd); close(sfd); if (sigismember(&old_mask, SIGCHLD) == 0) { if (sigprocmask(SIG_UNBLOCK, &mask, NULL) < 0) { crm_perror(LOG_ERR, "sigprocmask() to unblocked failed"); } } #endif } else { crm_trace("Async waiting for %d - %s", op->pid, op->opaque->exec); mainloop_child_add(op->pid, op->timeout, op->id, op, operation_finished); op->opaque->stdout_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stdout_fd, op, &stdout_callbacks); op->opaque->stderr_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stderr_fd, op, &stderr_callbacks); } return TRUE; } GList * services_os_get_directory_list(const char *root, gboolean files, gboolean executable) { GList *list = NULL; struct dirent **namelist; int entries = 0, lpc = 0; char buffer[PATH_MAX]; entries = scandir(root, &namelist, NULL, alphasort); if (entries <= 0) { return list; } for (lpc = 0; lpc < entries; lpc++) { struct stat sb; if ('.' == namelist[lpc]->d_name[0]) { free(namelist[lpc]); continue; } snprintf(buffer, sizeof(buffer), "%s/%s", root, namelist[lpc]->d_name); if (stat(buffer, &sb)) { continue; } if (S_ISDIR(sb.st_mode)) { if (files) { free(namelist[lpc]); continue; } } else if (S_ISREG(sb.st_mode)) { if (files == FALSE) { free(namelist[lpc]); continue; } else if (executable && (sb.st_mode & S_IXUSR) == 0 && (sb.st_mode & S_IXGRP) == 0 && (sb.st_mode & S_IXOTH) == 0) { free(namelist[lpc]); continue; } } list = g_list_append(list, strdup(namelist[lpc]->d_name)); free(namelist[lpc]); } free(namelist); return list; } GList * resources_os_list_lsb_agents(void) { return get_directory_list(LSB_ROOT_DIR, TRUE, TRUE); } GList * resources_os_list_ocf_providers(void) { return get_directory_list(OCF_ROOT_DIR "/resource.d", FALSE, TRUE); } GList * resources_os_list_ocf_agents(const char *provider) { GList *gIter = NULL; GList *result = NULL; GList *providers = NULL; if (provider) { char buffer[500]; snprintf(buffer, sizeof(buffer), "%s/resource.d/%s", OCF_ROOT_DIR, provider); return get_directory_list(buffer, TRUE, TRUE); } providers = resources_os_list_ocf_providers(); for (gIter = providers; gIter != NULL; gIter = gIter->next) { GList *tmp1 = result; GList *tmp2 = resources_os_list_ocf_agents(gIter->data); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } g_list_free_full(providers, free); return result; } #if SUPPORT_NAGIOS GList * resources_os_list_nagios_agents(void) { GList *plugin_list = NULL; GList *result = NULL; GList *gIter = NULL; plugin_list = get_directory_list(NAGIOS_PLUGIN_DIR, TRUE, TRUE); /* Make sure both the plugin and its metadata exist */ for (gIter = plugin_list; gIter != NULL; gIter = gIter->next) { const char *plugin = gIter->data; char *metadata = g_strdup_printf(NAGIOS_METADATA_DIR "/%s.xml", plugin); struct stat st; if (stat(metadata, &st) == 0) { result = g_list_append(result, strdup(plugin)); } g_free(metadata); } g_list_free_full(plugin_list, free); return result; } #endif diff --git a/pengine/clone.c b/pengine/clone.c index c4c52d1860..7a69adfeb9 100644 --- a/pengine/clone.c +++ b/pengine/clone.c @@ -1,1591 +1,1597 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #define VARIANT_CLONE 1 #include gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); static void append_parent_colocation(resource_t * rsc, resource_t * child, gboolean all); +static gint +sort_rsc_id(gconstpointer a, gconstpointer b) +{ + const resource_t *resource1 = (const resource_t *)a; + const resource_t *resource2 = (const resource_t *)b; + + CRM_ASSERT(resource1 != NULL); + CRM_ASSERT(resource2 != NULL); + + return strcmp(resource1->id, resource2->id); +} + static node_t * parent_node_instance(const resource_t * rsc, node_t * node) { node_t *ret = NULL; if (node != NULL) { ret = pe_hash_table_lookup(rsc->parent->allowed_nodes, node->details->id); } return ret; } static gboolean did_fail(const resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_failed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (did_fail(child_rsc)) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc = 0; node_t *node1 = NULL; node_t *node2 = NULL; gboolean can1 = TRUE; gboolean can2 = TRUE; const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); /* allocation order: * - active instances * - instances running on nodes with the least copies * - active instances on nodes that cant support them or are to be fenced * - failed instances * - inactive instances */ if (resource1->running_on && resource2->running_on) { if (g_list_length(resource1->running_on) < g_list_length(resource2->running_on)) { crm_trace("%s < %s: running_on", resource1->id, resource2->id); return -1; } else if (g_list_length(resource1->running_on) > g_list_length(resource2->running_on)) { crm_trace("%s > %s: running_on", resource1->id, resource2->id); return 1; } } if (resource1->running_on) { node1 = resource1->running_on->data; } if (resource2->running_on) { node2 = resource2->running_on->data; } if (node1) { node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id); if (match == NULL || match->weight < 0) { crm_trace("%s: current location is unavailable", resource1->id); node1 = NULL; can1 = FALSE; } } if (node2) { node_t *match = pe_hash_table_lookup(resource2->allowed_nodes, node2->details->id); if (match == NULL || match->weight < 0) { crm_trace("%s: current location is unavailable", resource2->id); node2 = NULL; can2 = FALSE; } } if (can1 != can2) { if (can1) { crm_trace("%s < %s: availability of current location", resource1->id, resource2->id); return -1; } crm_trace("%s > %s: availability of current location", resource1->id, resource2->id); return 1; } if (resource1->priority < resource2->priority) { crm_trace("%s < %s: priority", resource1->id, resource2->id); return 1; } else if (resource1->priority > resource2->priority) { crm_trace("%s > %s: priority", resource1->id, resource2->id); return -1; } if (node1 == NULL && node2 == NULL) { crm_trace("%s == %s: not active", resource1->id, resource2->id); return 0; } if (node1 != node2) { if (node1 == NULL) { crm_trace("%s > %s: active", resource1->id, resource2->id); return 1; } else if (node2 == NULL) { crm_trace("%s < %s: active", resource1->id, resource2->id); return -1; } } can1 = can_run_resources(node1); can2 = can_run_resources(node2); if (can1 != can2) { if (can1) { crm_trace("%s < %s: can", resource1->id, resource2->id); return -1; } crm_trace("%s > %s: can", resource1->id, resource2->id); return 1; } node1 = parent_node_instance(resource1, node1); node2 = parent_node_instance(resource2, node2); if (node1 != NULL && node2 == NULL) { crm_trace("%s < %s: not allowed", resource1->id, resource2->id); return -1; } else if (node1 == NULL && node2 != NULL) { crm_trace("%s > %s: not allowed", resource1->id, resource2->id); return 1; } if (node1 == NULL || node2 == NULL) { crm_trace("%s == %s: not allowed", resource1->id, resource2->id); return 0; } if (node1->count < node2->count) { crm_trace("%s < %s: count", resource1->id, resource2->id); return -1; } else if (node1->count > node2->count) { crm_trace("%s > %s: count", resource1->id, resource2->id); return 1; } can1 = did_fail(resource1); can2 = did_fail(resource2); if (can1 != can2) { if (can1) { crm_trace("%s > %s: failed", resource1->id, resource2->id); return 1; } crm_trace("%s < %s: failed", resource1->id, resource2->id); return -1; } if (node1 && node2) { int lpc = 0; int max = 0; node_t *n = NULL; GListPtr gIter = NULL; GListPtr list1 = NULL; GListPtr list2 = NULL; GHashTable *hash1 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); GHashTable *hash2 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); n = node_copy(resource1->running_on->data); g_hash_table_insert(hash1, (gpointer) n->details->id, n); n = node_copy(resource2->running_on->data); g_hash_table_insert(hash2, (gpointer) n->details->id, n); for (gIter = resource1->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights(constraint->rsc_rh, resource1->id, hash1, constraint->node_attribute, (float)constraint->score / INFINITY, 0); } for (gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights(constraint->rsc_lh, resource1->id, hash1, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_positive); } for (gIter = resource2->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights(constraint->rsc_rh, resource2->id, hash2, constraint->node_attribute, (float)constraint->score / INFINITY, 0); } for (gIter = resource2->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights(constraint->rsc_lh, resource2->id, hash2, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_positive); } /* Current location score */ node1 = g_list_nth_data(resource1->running_on, 0); node1 = g_hash_table_lookup(hash1, node1->details->id); node2 = g_list_nth_data(resource2->running_on, 0); node2 = g_hash_table_lookup(hash2, node2->details->id); if (node1->weight < node2->weight) { if (node1->weight < 0) { crm_trace("%s > %s: current score", resource1->id, resource2->id); rc = -1; goto out; } else { crm_trace("%s < %s: current score", resource1->id, resource2->id); rc = 1; goto out; } } else if (node1->weight > node2->weight) { crm_trace("%s > %s: current score", resource1->id, resource2->id); rc = -1; goto out; } /* All location scores */ list1 = g_hash_table_get_values(hash1); list2 = g_hash_table_get_values(hash2); list1 = g_list_sort_with_data(list1, sort_node_weight, g_list_nth_data(resource1->running_on, 0)); list2 = g_list_sort_with_data(list2, sort_node_weight, g_list_nth_data(resource2->running_on, 0)); max = g_list_length(list1); if (max < g_list_length(list2)) { max = g_list_length(list2); } for (; lpc < max; lpc++) { node1 = g_list_nth_data(list1, lpc); node2 = g_list_nth_data(list2, lpc); if (node1 == NULL) { crm_trace("%s < %s: colocated score NULL", resource1->id, resource2->id); rc = 1; break; } else if (node2 == NULL) { crm_trace("%s > %s: colocated score NULL", resource1->id, resource2->id); rc = -1; break; } if (node1->weight < node2->weight) { crm_trace("%s < %s: colocated score", resource1->id, resource2->id); rc = 1; break; } else if (node1->weight > node2->weight) { crm_trace("%s > %s: colocated score", resource1->id, resource2->id); rc = -1; break; } } /* Order by reverse uname - same as sort_node_weight() does? */ out: g_hash_table_destroy(hash1); /* Free mem */ g_hash_table_destroy(hash2); /* Free mem */ g_list_free(list1); g_list_free(list2); if (rc != 0) { return rc; } } rc = strcmp(resource1->id, resource2->id); crm_trace("%s %c %s: default", resource1->id, rc < 0 ? '<' : '>', resource2->id); return rc; } static node_t * can_run_instance(resource_t * rsc, node_t * node) { node_t *local_node = NULL; clone_variant_data_t *clone_data = NULL; if (can_run_resources(node) == FALSE) { goto bail; } else if (is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = parent_node_instance(rsc, node); get_clone_variant_data(clone_data, rsc->parent); if (local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); goto bail; } else if (local_node->weight < 0) { common_update_score(rsc, node->details->id, local_node->weight); pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.", rsc->id, node->details->uname); } else if (local_node->count < clone_data->clone_node_max) { pe_rsc_trace(rsc, "%s can run on %s: %d", rsc->id, node->details->uname, local_node->count); return local_node; } else { pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)", rsc->id, node->details->uname, local_node->count, clone_data->clone_node_max); } bail: if (node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static node_t * color_instance(resource_t * rsc, node_t * prefer, gboolean all_coloc, pe_working_set_t * data_set) { node_t *chosen = NULL; node_t *local_node = NULL; GHashTable *backup = NULL; pe_rsc_trace(rsc, "Processing %s %d", rsc->id, all_coloc); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } /* Only include positive colocation preferences of dependant resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc->parent, rsc, all_coloc); if (prefer) { node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (local_prefer == NULL || local_prefer->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id, prefer->details->uname); return NULL; } } if (rsc->allowed_nodes) { GHashTableIter iter; node_t *try_node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&try_node)) { can_run_instance(rsc, try_node); } } backup = node_hash_dup(rsc->allowed_nodes); chosen = rsc->cmds->allocate(rsc, prefer, data_set); if (chosen) { local_node = pe_hash_table_lookup(rsc->parent->allowed_nodes, chosen->details->id); if (prefer && chosen && chosen->details != prefer->details) { crm_notice("Pre-allocation failed: got %s instead of %s", chosen->details->uname, prefer->details->uname); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = backup; native_deallocate(rsc); chosen = NULL; backup = NULL; } else if (local_node) { local_node->count++; } else if (is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ crm_config_err("%s not found in %s (list=%d)", chosen->details->id, rsc->parent->id, g_hash_table_size(rsc->parent->allowed_nodes)); } } if(backup) { g_hash_table_destroy(backup); } return chosen; } static void append_parent_colocation(resource_t * rsc, resource_t * child, gboolean all) { GListPtr gIter = NULL; gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; if (all || cons->score < 0 || cons->score == INFINITY) { child->rsc_cons = g_list_prepend(child->rsc_cons, cons); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; if (all || cons->score < 0) { child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); } } } node_t * clone_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GHashTableIter iter; GListPtr nIter = NULL; GListPtr gIter = NULL; GListPtr nodes = NULL; node_t *node = NULL; int allocated = 0; int loop_max = 0; int clone_max = 0; int available_nodes = 0; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); pe_rsc_trace(rsc, "Processing %s", rsc->id); /* this information is used by sort_clone_instance() when deciding in which * order to allocate clone instances */ gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; pe_rsc_trace(rsc, "%s: Coloring %s first", rsc->id, constraint->rsc_rh->id); constraint->rsc_rh->cmds->allocate(constraint->rsc_rh, prefer, data_set); } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, (pe_weights_rollback | pe_weights_positive)); } dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); /* count now tracks the number of clones currently allocated */ g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { node->count = 0; if (can_run_resources(node)) { available_nodes++; } } clone_max = clone_data->clone_max; if(available_nodes) { loop_max = clone_data->clone_max / available_nodes; } if (loop_max < 1) { loop_max = 1; } rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set); /* Pre-allocate as many instances as we can to their current location * First pre-sort the list of nodes by their placement score */ nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); for(nIter = nodes; nIter; nIter = nIter->next) { int lpc; node = nIter->data; if(clone_max <= 0) { break; } if (can_run_resources(node) == FALSE || node->weight < 0) { pe_rsc_trace(rsc, "Not Pre-allocatiing %s", node->details->uname); continue; } clone_max--; pe_rsc_trace(rsc, "Pre-allocating %s (%d remaining)", node->details->uname, clone_max); for (lpc = 0; allocated < clone_data->clone_max && node->count < clone_data->clone_node_max && lpc < clone_data->clone_node_max && lpc < loop_max; lpc++) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (child->running_on && is_set(child->flags, pe_rsc_provisional) && is_not_set(child->flags, pe_rsc_failed)) { node_t *child_node = child->running_on->data; if (child_node->details == node->details && color_instance(child, node, clone_data->clone_max < available_nodes, data_set)) { pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id, node->details->uname); allocated++; break; } } } } } pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, clone_data->clone_max); g_list_free(nodes); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (g_list_length(child->running_on) > 0) { node_t *child_node = child->running_on->data; node_t *local_node = parent_node_instance(child, child->running_on->data); if (local_node == NULL) { crm_err("%s is running on %s which isn't allowed", child->id, child_node->details->uname); } } if (is_not_set(child->flags, pe_rsc_provisional)) { } else if (allocated >= clone_data->clone_max) { pe_rsc_debug(rsc, "Child %s not allocated - limit reached", child->id); resource_location(child, NULL, -INFINITY, "clone_color:limit_reached", data_set); } else if (color_instance(child, NULL, clone_data->clone_max < available_nodes, data_set)) { allocated++; } } pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d", allocated, rsc->id, clone_data->clone_max); clear_bit(rsc->flags, pe_rsc_provisional); clear_bit(rsc->flags, pe_rsc_allocating); pe_rsc_trace(rsc, "Done allocating %s", rsc->id); return NULL; } static void clone_update_pseudo_status(resource_t * rsc, gboolean * stopping, gboolean * starting, gboolean * active) { GListPtr gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; clone_update_pseudo_status(child, stopping, starting, active); } return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if (rsc->running_on) { *active = TRUE; } gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (*starting && *stopping) { return; } else if (is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid); continue; } else if (is_set(action->flags, pe_action_pseudo) == FALSE && is_set(action->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid); continue; } else if (safe_str_eq(RSC_STOP, action->task)) { pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid); *stopping = TRUE; } else if (safe_str_eq(RSC_START, action->task)) { if (is_set(action->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); } else { pe_rsc_trace(rsc, "Starting due to: %s", action->uuid); pe_rsc_trace(rsc, "%s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); *starting = TRUE; } } } } static action_t * find_rsc_action(resource_t * rsc, const char *key, gboolean active_only, GListPtr * list) { action_t *match = NULL; GListPtr possible = NULL; GListPtr active = NULL; possible = find_actions(rsc->actions, key, NULL); if (active_only) { GListPtr gIter = possible; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_optional) == FALSE) { active = g_list_prepend(active, op); } } if (active && g_list_length(active) == 1) { match = g_list_nth_data(active, 0); } if (list) { *list = active; active = NULL; } } else if (possible && g_list_length(possible) == 1) { match = g_list_nth_data(possible, 0); } if (list) { *list = possible; possible = NULL; } if (possible) { g_list_free(possible); } if (active) { g_list_free(active); } return match; } static void child_ordering_constraints(resource_t * rsc, pe_working_set_t * data_set) { char *key = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *last_stop = NULL; action_t *last_start = NULL; - GListPtr gIter = rsc->children; + GListPtr gIter = NULL; gboolean active_only = TRUE; /* change to false to get the old behavior */ clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->ordered == FALSE) { return; } + /* we have to maintain a consistent sorted child list when building order constraints */ + rsc->children = g_list_sort(rsc->children, sort_rsc_id); - for (; gIter != NULL; gIter = gIter->next) { + for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; key = stop_key(child); stop = find_rsc_action(child, key, active_only, NULL); free(key); key = start_key(child); start = find_rsc_action(child, key, active_only, NULL); free(key); if (stop) { if (last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_optional); } last_stop = stop; } if (start) { if (last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_optional); } last_start = start; } } } void clone_create_actions(resource_t * rsc, pe_working_set_t * data_set) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; gboolean allow_dependent_migrations = TRUE; action_t *stop = NULL; action_t *stopped = NULL; action_t *start = NULL; action_t *started = NULL; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Creating actions for %s", rsc->id); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; gboolean starting = FALSE; gboolean stopping = FALSE; child_rsc->cmds->create_actions(child_rsc, data_set); clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active); if (stopping && starting) { allow_dependent_migrations = FALSE; } child_stopping |= stopping; child_starting |= starting; } /* start */ start = start_action(rsc, NULL, !child_starting); started = custom_action(rsc, started_key(rsc), RSC_STARTED, NULL, !child_starting, TRUE, data_set); update_action_flags(start, pe_action_pseudo | pe_action_runnable); update_action_flags(started, pe_action_pseudo); started->priority = INFINITY; if (child_active || child_starting) { update_action_flags(started, pe_action_runnable); } child_ordering_constraints(rsc, data_set); if (clone_data->start_notify == NULL) { clone_data->start_notify = create_notification_boundaries(rsc, RSC_START, start, started, data_set); } /* stop */ stop = stop_action(rsc, NULL, !child_stopping); stopped = custom_action(rsc, stopped_key(rsc), RSC_STOPPED, NULL, !child_stopping, TRUE, data_set); stopped->priority = INFINITY; update_action_flags(stop, pe_action_pseudo | pe_action_runnable); if (allow_dependent_migrations) { update_action_flags(stop, pe_action_migrate_runnable); } update_action_flags(stopped, pe_action_pseudo | pe_action_runnable); if (clone_data->stop_notify == NULL) { clone_data->stop_notify = create_notification_boundaries(rsc, RSC_STOP, stop, stopped, data_set); if (clone_data->stop_notify && clone_data->start_notify) { order_actions(clone_data->stop_notify->post_done, clone_data->start_notify->pre, pe_order_optional); } } } void clone_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { resource_t *last_rsc = NULL; - GListPtr gIter = rsc->children; + GListPtr gIter; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id); new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); if (rsc->variant == pe_master) { new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); } - for (; gIter != NULL; gIter = gIter->next) { + if (clone_data->ordered) { + /* we have to maintain a consistent sorted child list when building order constraints */ + rsc->children = g_list_sort(rsc->children, sort_rsc_id); + } + for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->internal_constraints(child_rsc, data_set); order_start_start(rsc, child_rsc, pe_order_runnable_left | pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); if (clone_data->ordered && last_rsc) { order_start_start(last_rsc, child_rsc, pe_order_optional); } order_stop_stop(rsc, child_rsc, pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if (clone_data->ordered && last_rsc) { order_stop_stop(child_rsc, last_rsc, pe_order_optional); } last_rsc = child_rsc; } } static void assign_node(resource_t * rsc, node_t * node, gboolean force) { if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; native_assign_node(child_rsc, NULL, node, force); } return; } native_assign_node(rsc, NULL, node, force); } static resource_t * find_compatible_child_by_node(resource_t * local_child, node_t * local_node, resource_t * rsc, enum rsc_role_e filter, gboolean current) { node_t *node = NULL; GListPtr gIter = NULL; if (local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, local_node->details->uname); gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { /* We only want instances that haven't failed */ node = child_rsc->fns->location(child_rsc, NULL, current); } if (filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_trace("Filtered %s", child_rsc->id); continue; } if (node && local_node && node->details == local_node->details) { crm_trace("Pairing %s with %s on %s", local_child->id, child_rsc->id, node->details->uname); return child_rsc; } else if (node) { crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname, local_node->details->uname); } else { crm_trace("%s - not allocated %d", child_rsc->id, current); } } crm_trace("Can't pair %s with %s", local_child->id, rsc->id); return NULL; } resource_t * find_compatible_child(resource_t * local_child, resource_t * rsc, enum rsc_role_e filter, gboolean current) { resource_t *pair = NULL; GListPtr gIter = NULL; GListPtr scratch = NULL; node_t *local_node = NULL; local_node = local_child->fns->location(local_child, NULL, current); if (local_node) { return find_compatible_child_by_node(local_child, local_node, rsc, filter, current); } scratch = g_hash_table_get_values(local_child->allowed_nodes); scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL); gIter = scratch; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pair = find_compatible_child_by_node(local_child, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id); done: g_list_free(scratch); return pair; } void clone_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ GListPtr gIter = rsc_lh->children; CRM_CHECK(FALSE, crm_err("This functionality is not thought to be used. Please report a bug.")); CRM_CHECK(rsc_lh, return); CRM_CHECK(rsc_rh, return); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, rsc_rh, constraint); } return; } void clone_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { GListPtr gIter = NULL; gboolean do_interleave = FALSE; clone_variant_data_t *clone_data = NULL; clone_variant_data_t *clone_data_lh = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return); CRM_CHECK(rsc_rh != NULL, pe_err("rsc_rh was NULL for %s", constraint->id); return); CRM_CHECK(rsc_lh->variant == pe_native, return); get_clone_variant_data(clone_data, constraint->rsc_rh); pe_rsc_trace(rsc_rh, "Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc_rh->id, constraint->score); if (constraint->rsc_lh->variant >= pe_clone) { get_clone_variant_data(clone_data_lh, constraint->rsc_lh); if (clone_data_lh->interleave && clone_data->clone_node_max != clone_data_lh->clone_node_max) { crm_config_err("Cannot interleave " XML_CIB_TAG_INCARNATION " %s and %s because" " they do not support the same number of" " resources per node", constraint->rsc_lh->id, constraint->rsc_rh->id); /* only the LHS side needs to be labeled as interleave */ } else if (clone_data_lh->interleave) { do_interleave = TRUE; } } if (is_set(rsc_rh->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc_rh, "%s is still provisional", rsc_rh->id); return; } else if (do_interleave) { resource_t *rh_child = NULL; rh_child = find_compatible_child(rsc_lh, rsc_rh, RSC_ROLE_UNKNOWN, FALSE); if (rh_child) { pe_rsc_debug(rsc_rh, "Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); assign_node(rsc_lh, NULL, TRUE); } else { pe_rsc_debug(rsc_rh, "Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); } return; } else if (constraint->score >= INFINITY) { GListPtr rhs = NULL; gIter = rsc_rh->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { rhs = g_list_prepend(rhs, chosen); } } node_list_exclude(rsc_lh->allowed_nodes, rhs, FALSE); g_list_free(rhs); return; } gIter = rsc_rh->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint); } } static enum action_tasks clone_child_action(action_t * action) { enum action_tasks result = no_action; resource_t *child = (resource_t *) action->rsc->children->data; if (safe_str_eq(action->task, "notify") || safe_str_eq(action->task, "notified")) { /* Find the action we're notifying about instead */ int stop = 0; char *key = action->uuid; int lpc = strlen(key); for (; lpc > 0; lpc--) { if (key[lpc] == '_' && stop == 0) { stop = lpc; } else if (key[lpc] == '_') { char *task_mutable = NULL; lpc++; task_mutable = strdup(key + lpc); task_mutable[stop - lpc] = 0; crm_trace("Extracted action '%s' from '%s'", task_mutable, key); result = get_complex_task(child, task_mutable, TRUE); free(task_mutable); break; } } } else { result = get_complex_task(child, action->task, TRUE); } return result; } enum pe_action_flags clone_action_flags(action_t * action, node_t * node) { GListPtr gIter = NULL; gboolean any_runnable = FALSE; gboolean check_runnable = TRUE; enum action_tasks task = clone_child_action(action); enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); const char *task_s = task2text(task); gIter = action->rsc->children; for (; gIter != NULL; gIter = gIter->next) { action_t *child_action = NULL; resource_t *child = (resource_t *) gIter->data; child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node); pe_rsc_trace(child, "Checking for %s in %s on %s", task_s, child->id, node ? node->details->uname : "none"); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (is_set(flags, pe_action_optional) && is_set(child_flags, pe_action_optional) == FALSE) { pe_rsc_trace(child, "%s is manditory because of %s", action->uuid, child_action->uuid); flags = crm_clear_bit(__FUNCTION__, action->rsc->id, flags, pe_action_optional); pe_clear_action_bit(action, pe_action_optional); } if (is_set(child_flags, pe_action_runnable)) { any_runnable = TRUE; } } else { GListPtr gIter2 = child->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { action_t *op = (action_t *) gIter2->data; pe_rsc_trace(child, "%s on %s (%s)", op->uuid, op->node ? op->node->details->uname : "none", op->task); } } } if (check_runnable && any_runnable == FALSE) { pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid); flags = crm_clear_bit(__FUNCTION__, action->rsc->id, flags, pe_action_runnable); if (node == NULL) { pe_clear_action_bit(action, pe_action_runnable); } } return flags; } static enum pe_graph_flags clone_update_actions_interleave(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { gboolean current = FALSE; resource_t *first_child = NULL; GListPtr gIter = then->rsc->children; enum pe_graph_flags changed = pe_graph_none; /*pe_graph_disable */ enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); /* Fix this - lazy */ if (strstr(first->uuid, "_stopped_0") || strstr(first->uuid, "_demoted_0")) { current = TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; CRM_ASSERT(then_child != NULL); first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current); if (first_child == NULL && current) { crm_trace("Ignore"); } else if (first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) { pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); assign_node(then_child, NULL, TRUE); } } else { action_t *first_action = NULL; action_t *then_action = NULL; pe_rsc_debug(then->rsc, "Pairing %s with %s", first_child->id, then_child->id); first_action = find_first_action(first_child->actions, NULL, first_task, node); then_action = find_first_action(then_child->actions, NULL, then->task, node); CRM_CHECK(first_action != NULL || is_set(first_child->flags, pe_rsc_orphan), crm_err("No action found for %s in %s (first)", first_task, first_child->id)); if (then_action == NULL && is_not_set(then_child->flags, pe_rsc_orphan) && crm_str_eq(then->task, RSC_STOP, TRUE) == FALSE && crm_str_eq(then->task, RSC_DEMOTED, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } if (first_action == NULL || then_action == NULL) { continue; } if (order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s -> %s", first_action->uuid, then_action->uuid); changed |= (pe_graph_updated_first | pe_graph_updated_then); } changed |= then_child->cmds->update_actions(first_action, then_action, node, first_child->cmds->action_flags(first_action, node), filter, type); } } return changed; } enum pe_graph_flags clone_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { const char *rsc = "none"; gboolean interleave = FALSE; enum pe_graph_flags changed = pe_graph_none; if (first->rsc != then->rsc && first->rsc && first->rsc->variant >= pe_clone && then->rsc && then->rsc->variant >= pe_clone) { clone_variant_data_t *clone_data = NULL; if (strstr(then->uuid, "_stop_0") || strstr(then->uuid, "_demote_0")) { get_clone_variant_data(clone_data, first->rsc); rsc = first->rsc->id; } else { get_clone_variant_data(clone_data, then->rsc); rsc = then->rsc->id; } interleave = clone_data->interleave; } crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave ? "yes" : "no", rsc); if (interleave) { changed = clone_update_actions_interleave(first, then, node, flags, filter, type); } else if (then->rsc) { GListPtr gIter = then->rsc->children; changed |= native_update_actions(first, then, node, flags, filter, type); for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; action_t *child_action = find_first_action(child->actions, NULL, then->task, node); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (is_set(child_flags, pe_action_runnable)) { changed |= child->cmds->update_actions(first, child_action, node, flags, filter, type); } } } } return changed; } void clone_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { GListPtr gIter = rsc->children; pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); } } void clone_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; rsc->cmds->action_flags(op, NULL); } if (clone_data->start_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->start_notify); expand_notification_data(clone_data->start_notify); create_notifications(rsc, clone_data->start_notify, data_set); } if (clone_data->stop_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->stop_notify); expand_notification_data(clone_data->stop_notify); create_notifications(rsc, clone_data->stop_notify, data_set); } if (clone_data->promote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->promote_notify); expand_notification_data(clone_data->promote_notify); create_notifications(rsc, clone_data->promote_notify, data_set); } if (clone_data->demote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->demote_notify); expand_notification_data(clone_data->demote_notify); create_notifications(rsc, clone_data->demote_notify, data_set); } /* Now that the notifcations have been created we can expand the children */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } native_expand(rsc, data_set); /* The notifications are in the graph now, we can destroy the notify_data */ free_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; free_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; free_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; free_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } -static gint -sort_rsc_id(gconstpointer a, gconstpointer b) -{ - const resource_t *resource1 = (const resource_t *)a; - const resource_t *resource2 = (const resource_t *)b; - - CRM_ASSERT(resource1 != NULL); - CRM_ASSERT(resource2 != NULL); - - return strcmp(resource1->id, resource2->id); -} - node_t * rsc_known_on(resource_t * rsc, GListPtr * list) { GListPtr gIter = NULL; node_t *one = NULL; GListPtr result = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; rsc_known_on(child, &result); } } else if (rsc->known_on) { result = g_hash_table_get_values(rsc->known_on); } if (result && g_list_length(result) == 1) { one = g_list_nth_data(result, 0); } if (list) { GListPtr gIter = NULL; gIter = result; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) { *list = g_list_prepend(*list, node); } } } g_list_free(result); return one; } static resource_t * find_instance_on(resource_t * rsc, node_t * node) { GListPtr gIter = NULL; gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; GListPtr known_list = NULL; resource_t *child = (resource_t *) gIter->data; rsc_known_on(child, &known_list); gIter2 = known_list; for (; gIter2 != NULL; gIter2 = gIter2->next) { node_t *known = (node_t *) gIter2->data; if (node->details == known->details) { g_list_free(known_list); return child; } } g_list_free(known_list); } return NULL; } gboolean clone_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean any_created = FALSE; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); rsc->children = g_list_sort(rsc->children, sort_rsc_id); if (rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return FALSE; } if (is_not_set(rsc->flags, pe_rsc_unique) && clone_data->clone_node_max == 1) { /* only look for one copy */ resource_t *child = NULL; /* Try whoever we probed last time */ child = find_instance_on(rsc, node); if (child) { return child->cmds->create_probe(child, node, complete, force, data_set); } /* Try whoever we plan on starting there */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; node_t *local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if (local_node == NULL) { continue; } if (local_node->details == node->details) { return child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set); } } /* Fall back to the first clone instance */ child = rsc->children->data; return child->cmds->create_probe(child, node, complete, force, data_set); } gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)) { any_created = TRUE; } if (any_created && is_not_set(rsc->flags, pe_rsc_unique) && clone_data->clone_node_max == 1) { /* only look for one copy (clone :0) */ break; } } return any_created; } void clone_append_meta(resource_t * rsc, xmlNode * xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); name = crm_meta_name(XML_RSC_ATTR_UNIQUE); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); free(name); name = crm_meta_name(XML_RSC_ATTR_NOTIFY); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_notify) ? "true" : "false"); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX); crm_xml_add_int(xml, name, clone_data->clone_max); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX); crm_xml_add_int(xml, name, clone_data->clone_node_max); free(name); } GHashTable * clone_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } diff --git a/pengine/regression.sh b/pengine/regression.sh index cd920047f7..1d164074e5 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,747 +1,748 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # core=`dirname $0` . $core/regression.core.sh || exit 1 create_mode="true" info Generating test outputs for these tests... # do_test file description info Done. echo "" info Performing the following tests from $io_dir create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" do_test group-fail "Ensure stop order is preserved for partially active groups" do_test group-unmanaged "No need to restart r115 because r114 is unmanaged" do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails" do_test group-dependants "Account for the location preferences of things colocated with a group" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" do_test orphan-2 "Orphan stop, remove failcount" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" do_test params-6 "Params: Detect reload in previously migrated resource" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test domain "Failover domains" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" --rc 4 do_test standby "Standby" do_test comments "Comments" echo "" do_test one-or-more-0 "Everything starts" do_test one-or-more-1 "Nothing starts because of A" do_test one-or-more-2 "D can start because of C" do_test one-or-more-3 "D cannot start because of B and C" do_test one-or-more-4 "D cannot start because of target-role" do_test one-or-more-5 "Start A and F even though C and D are stopped" do_test one-or-more-6 "Leave A running even though B is stopped" do_test one-or-more-7 "Leave A running even though C is stopped" do_test bug-5140-require-all-false "Allow basegrp:0 to stop" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" +do_test clone-order-16instances "Verify ordering of 16 cloned resources" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" do_test ordered-set-basic-startup "Constraint set with default order settings." do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop" do_test coloc_fp_logic "Verify floating point calculations in colocation are working" do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc." do_test colo_slave_w_native "cl#5070 - Verify promotion order is affected when colocating slave to native rsc." do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " do_test per-node-attrs "Per node resource parameters" echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor" do_test stop-failure-no-quorum "Stop failure without quorum" do_test stop-failure-no-fencing "Stop failure without fencing available" do_test stop-failure-with-fencing "Stop failure with fencing available" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from" do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" do_test migrate-both-vms "Migrate two VMs that have no colocation" do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B." do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B" do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both" do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable" do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable" do_test 6-migrate-group "Advanced migrate logic, migrate a group" do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false" do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping" do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping" do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A" do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping" #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" do_test bug-cl-5168 "Don't shuffle clones" do_test bug-cl-5170 "Prevent clone from starting with on-fail=block" do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block" do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" do_test master-demote-2 "Demote does not clear past failure" do_test master-move "Move master based on failure of colocated group" do_test master-probed-score "Observe the promotion score of probed resources" do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint" do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint" do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint" do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion." do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive" do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score" do_test master-demote-block "Block promotion if demote fails with on-fail=block" do_test master-dependant-ban "Don't stop instances from being active because a dependant is banned from that host" do_test master-stop "Stop instances due to location constraint with role=Started" do_test master-partially-demoted-group "Allow partially demoted group to finish demoting" echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource" do_test bug-5028-detach "Ensure detach still works" do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged " do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged " do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged " do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged " echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition" do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" do_test 696 "OSDL #696 - CRM starts stonith RA without monitor" do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id" do_test 829 "OSDL #829" do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test stonith-4 "Stonith node state" --rc 4 do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false" do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false" do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts." do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false" do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false." do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false." do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false" do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true" do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources." do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases" do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload" do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change." do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart" do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed." do_test failcount "Ensure failcounts are correctly expired" do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present" do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart" do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop" do_test bug-5059 "No need to restart p_stateful1:*" do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled." do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled." do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections" do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group" do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)." do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group." do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs." do_test probe-timeout "cl#5099 - Default probe timeout" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" do_test node-maintenance-1 "cl#5128 - Node maintenance" do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)" do_test rsc-maintenance "Per-resource maintenance" echo "" do_test not-installed-agent "The resource agent is missing" do_test not-installed-tools "Something the resource agent needs is missing" echo "" do_test stopped-monitor-00 "Stopped Monitor - initial start" do_test stopped-monitor-01 "Stopped Monitor - failed started" do_test stopped-monitor-02 "Stopped Monitor - started multi-up" do_test stopped-monitor-03 "Stopped Monitor - stop started" do_test stopped-monitor-04 "Stopped Monitor - failed stop" do_test stopped-monitor-05 "Stopped Monitor - start unmanaged" do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08 "Stopped Monitor - migrate" do_test stopped-monitor-09 "Stopped Monitor - unmanage started" do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")" do_test stopped-monitor-20 "Stopped Monitor - initial stop" do_test stopped-monitor-21 "Stopped Monitor - stopped single-up" do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23 "Stopped Monitor - start stopped" do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role="Started")" do_test stopped-monitor-30 "Stopped Monitor - new node started" do_test stopped-monitor-31 "Stopped Monitor - new node stopped" echo"" do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)" do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)" do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)" do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)" do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)" do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)" do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)" do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)" do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)" do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)" do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)" do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)" do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)" do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)" do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)" do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)" do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)" do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)" do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)" do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)" do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)" do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)" do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)" do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)" do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)" do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)" do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)" do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)" do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)" do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)" do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)" do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)" do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)" do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)" do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)" do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)" do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)" do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)" do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)" do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)" do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)" do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)" do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)" do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)" do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)" do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)" do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)" do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)" do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)" do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)" do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)" do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)" do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)" do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)" do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)" do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)" do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)" do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)" do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)" do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)" do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)" do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)" do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)" do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)" do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)" do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)" do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)" do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)" do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)" do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)" do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)" do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)" do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)" do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)" do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)" do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)" do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)" do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)" do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)" do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)" do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)" do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)" do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)" do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)" do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)" do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)" do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)" do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)" do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)" do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)" do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)" do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)" do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)" echo "" do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)" do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)" do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)" do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)" do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)" do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)" do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)" do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)" do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)" do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)" echo "" do_test template-1 "Template - 1" do_test template-2 "Template - 2" do_test template-3 "Template - 3 (merge operations)" do_test template-coloc-1 "Template - Colocation 1" do_test template-coloc-2 "Template - Colocation 2" do_test template-coloc-3 "Template - Colocation 3" do_test template-order-1 "Template - Order 1" do_test template-order-2 "Template - Order 2" do_test template-order-3 "Template - Order 3" do_test template-ticket "Template - Ticket" do_test template-rsc-sets-1 "Template - Resource Sets 1" do_test template-rsc-sets-2 "Template - Resource Sets 2" do_test template-rsc-sets-3 "Template - Resource Sets 3" do_test template-rsc-sets-4 "Template - Resource Sets 4" do_test template-clone-primitive "Cloned primitive from template" do_test template-clone-group "Cloned group from template" do_test location-sets-templates "Resource sets and templates - Location" echo "" do_test container-1 "Container - initial" do_test container-2 "Container - monitor failed" do_test container-3 "Container - stop failed" do_test container-4 "Container - reached migration-threshold" do_test container-group-1 "Container in group - initial" do_test container-group-2 "Container in group - monitor failed" do_test container-group-3 "Container in group - stop failed" do_test container-group-4 "Container in group - reached migration-threshold" echo "" do_test whitebox-fail1 "Fail whitebox container rsc." do_test whitebox-fail2 "Fail whitebox container rsc lrmd connection." do_test whitebox-fail3 "Failed containers should not run nested on remote nodes." do_test whitebox-start "Start whitebox container with resources assigned to it" do_test whitebox-stop "Stop whitebox container with resources assigned to it" do_test whitebox-move "Move whitebox container with resources assigned to it" do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource" do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established" do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established" do_test whitebox-orphaned "Properly shutdown orphaned whitebox container" do_test whitebox-orphan-ms "Properly tear down orphan ms resources on remote-nodes" do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start." echo "" do_test remote-startup-probes "Baremetal remote-node startup probes" do_test remote-startup "Startup a newly discovered remote-nodes with no status." do_test remote-fence-unclean "Fence unclean baremetal remote-node" do_test remote-move "Move remote-node connection resource" do_test remote-disable "Disable a baremetal remote-node" do_test remote-orphaned "Properly shutdown orphaned connection resource" echo "" test_results diff --git a/pengine/test10/clone-order-16instances.dot b/pengine/test10/clone-order-16instances.dot new file mode 100644 index 0000000000..3ebda6079a --- /dev/null +++ b/pengine/test10/clone-order-16instances.dot @@ -0,0 +1,155 @@ + digraph "g" { +"clvmd-clone_running_0" [ style=dashed color="red" fontcolor="orange"] +"clvmd-clone_start_0" -> "clvmd-clone_running_0" [ style = dashed] +"clvmd-clone_start_0" [ style=dashed color="red" fontcolor="orange"] +"clvmd:10_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:10_start_0 " -> "clvmd:11_start_0 " [ style = dashed] +"clvmd:10_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:11_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:11_start_0 " -> "clvmd:12_start_0 " [ style = dashed] +"clvmd:11_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:12_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:12_start_0 " -> "clvmd:13_start_0 " [ style = dashed] +"clvmd:12_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:13_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:13_start_0 " -> "clvmd:14_start_0 " [ style = dashed] +"clvmd:13_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:14_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:14_start_0 " -> "clvmd:15_start_0 " [ style = dashed] +"clvmd:14_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:15_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:15_start_0 " -> "clvmd:2_start_0 " [ style = dashed] +"clvmd:15_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:1_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:1_start_0 " -> "clvmd:10_start_0 " [ style = dashed] +"clvmd:1_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:2_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:2_start_0 " -> "clvmd:3_start_0 " [ style = dashed] +"clvmd:2_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:3_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:3_start_0 " -> "clvmd:4_start_0 " [ style = dashed] +"clvmd:3_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:4_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:4_start_0 " -> "clvmd:5_start_0 " [ style = dashed] +"clvmd:4_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:5_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:5_start_0 " -> "clvmd:6_start_0 " [ style = dashed] +"clvmd:5_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:6_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:6_start_0 " -> "clvmd:7_start_0 " [ style = dashed] +"clvmd:6_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:7_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:7_start_0 " -> "clvmd:8_start_0 " [ style = dashed] +"clvmd:7_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:8_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:8_start_0 " -> "clvmd:9_start_0 " [ style = dashed] +"clvmd:8_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd:9_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:9_start_0 " [ style=dashed color="red" fontcolor="black"] +"clvmd_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd_start_0 " -> "clvmd:1_start_0 " [ style = dashed] +"clvmd_start_0 " [ style=dashed color="red" fontcolor="black"] +"dlm-clone_running_0" -> "clvmd-clone_start_0" [ style = dashed] +"dlm-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dlm-clone_start_0" -> "dlm-clone_running_0" [ style = bold] +"dlm-clone_start_0" -> "dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dlm:10_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:10_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:11_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:12_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:13_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:14_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:15_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:1_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:2_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:3_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:4_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:5_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:6_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:7_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:8_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:9_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd_start_0 " [ style = dashed] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:11_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:12_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:13_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:14_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:15_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:3_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:4_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:5_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:6_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:7_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:8_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:9_monitor_30000 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_monitor_30000 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +} diff --git a/pengine/test10/clone-order-16instances.exp b/pengine/test10/clone-order-16instances.exp new file mode 100644 index 0000000000..76fdf0e07f --- /dev/null +++ b/pengine/test10/clone-order-16instances.exp @@ -0,0 +1,467 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/clone-order-16instances.scores b/pengine/test10/clone-order-16instances.scores new file mode 100644 index 0000000000..f5dadd706a --- /dev/null +++ b/pengine/test10/clone-order-16instances.scores @@ -0,0 +1,1073 @@ +Allocation scores: +clone_color: clvmd-clone allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd-clone allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: clvmd:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm-clone allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 1 +clone_color: dlm:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 1 +clone_color: dlm:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +clone_color: dlm:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: clvmd:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: clvmd:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 1 +native_color: dlm:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 1 +native_color: dlm:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: virt-fencing allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 diff --git a/pengine/test10/clone-order-16instances.summary b/pengine/test10/clone-order-16instances.summary new file mode 100644 index 0000000000..d0c3fbb2a7 --- /dev/null +++ b/pengine/test10/clone-order-16instances.summary @@ -0,0 +1,68 @@ + +Current cluster status: +Online: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + + virt-fencing (stonith:fence_xvm): Started virt-010.cluster-qe.lab.eng.brq.redhat.com + Clone Set: dlm-clone [dlm] + Started: [ virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com ] + Stopped: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + Clone Set: clvmd-clone [clvmd] + Stopped: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + +Transition Summary: + * Start dlm:10 (virt-009.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:11 (virt-013.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:12 (virt-014.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:13 (virt-015.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:14 (virt-016.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:15 (virt-020.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:2 (virt-027.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:3 (virt-028.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:4 (virt-029.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:5 (virt-030.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:6 (virt-031.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:7 (virt-032.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:8 (virt-033.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:9 (virt-034.cluster-qe.lab.eng.brq.redhat.com) + +Executing cluster transition: + * Pseudo action: dlm-clone_start_0 + * Resource action: dlm start on virt-009.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-013.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-014.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-015.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-016.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-020.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-027.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-028.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-029.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-030.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-031.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-032.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-033.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm start on virt-034.cluster-qe.lab.eng.brq.redhat.com + * Pseudo action: dlm-clone_running_0 + * Resource action: dlm monitor=30000 on virt-009.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-013.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-014.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-015.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-016.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-020.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-027.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-028.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-029.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-030.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-031.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-032.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-033.cluster-qe.lab.eng.brq.redhat.com + * Resource action: dlm monitor=30000 on virt-034.cluster-qe.lab.eng.brq.redhat.com + +Revised cluster status: +Online: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + + virt-fencing (stonith:fence_xvm): Started virt-010.cluster-qe.lab.eng.brq.redhat.com + Clone Set: dlm-clone [dlm] + Started: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + Clone Set: clvmd-clone [clvmd] + Stopped: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + diff --git a/pengine/test10/clone-order-16instances.xml b/pengine/test10/clone-order-16instances.xml new file mode 100644 index 0000000000..fb77fe8e99 --- /dev/null +++ b/pengine/test10/clone-order-16instances.xml @@ -0,0 +1,406 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tools/cib_shadow.c b/tools/cib_shadow.c index a1030c1d3a..e12b1e81f3 100644 --- a/tools/cib_shadow.c +++ b/tools/cib_shadow.c @@ -1,623 +1,624 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include int exit_code = pcmk_ok; GMainLoop *mainloop = NULL; const char *host = NULL; void usage(const char *cmd, int exit_status); int command_options = cib_sync_call; const char *cib_action = NULL; cib_t *real_cib = NULL; int dump_data_element(int depth, char **buffer, int *max, int *offset, const char *prefix, xmlNode * data, gboolean formatted); void print_xml_diff(FILE * where, xmlNode * diff); static int force_flag = 0; static int batch_flag = 0; static int print_spaces(char *buffer, int depth, int max) { int lpc = 0; int spaces = 2 * depth; max--; /* <= so that we always print 1 space - prevents problems with syslog */ for (lpc = 0; lpc <= spaces && lpc < max; lpc++) { if (sprintf(buffer + lpc, "%c", ' ') < 1) { return -1; } } return lpc; } static char * get_shadow_prompt(const char *name) { return g_strdup_printf("shadow[%.40s] # ", name); } static void shadow_setup(char *name, gboolean do_switch) { const char *prompt = getenv("PS1"); const char *shell = getenv("SHELL"); char *new_prompt = get_shadow_prompt(name); printf("Setting up shadow instance\n"); if (safe_str_eq(new_prompt, prompt)) { /* nothing to do */ goto done; } else if (batch_flag == FALSE && shell != NULL) { setenv("PS1", new_prompt, 1); setenv("CIB_shadow", name, 1); printf("Type Ctrl-D to exit the crm_shadow shell\n"); if (strstr(shell, "bash")) { execl(shell, shell, "--norc", "--noprofile", NULL); } else { execl(shell, shell, "--noprofile", NULL); } } else if (do_switch) { printf("To switch to the named shadow instance, paste the following into your shell:\n"); } else { printf ("A new shadow instance was created. To begin using it paste the following into your shell:\n"); } printf(" CIB_shadow=%s ; export CIB_shadow\n", name); done: free(new_prompt); } static void shadow_teardown(char *name) { const char *prompt = getenv("PS1"); char *our_prompt = get_shadow_prompt(name); if (prompt != NULL && strstr(prompt, our_prompt)) { printf("Now type Ctrl-D to exit the crm_shadow shell\n"); } else { printf ("Please remember to unset the CIB_shadow variable by pasting the following into your shell:\n"); printf(" unset CIB_shadow\n"); } free(our_prompt); } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\t\tThis text"}, {"version", 0, 0, '$', "\t\tVersion information" }, {"verbose", 0, 0, 'V', "\t\tIncrease debug output"}, {"-spacer-", 1, 0, '-', "\nQueries:"}, {"which", no_argument, NULL, 'w', "\t\tIndicate the active shadow copy"}, {"display", no_argument, NULL, 'p', "\t\tDisplay the contents of the active shadow copy"}, {"edit", no_argument, NULL, 'E', "\t\tEdit the contents of the active shadow copy with your favorite $EDITOR"}, {"diff", no_argument, NULL, 'd', "\t\tDisplay the changes in the active shadow copy\n"}, {"file", no_argument, NULL, 'F', "\t\tDisplay the location of the active shadow copy file\n"}, {"-spacer-", 1, 0, '-', "\nCommands:"}, {"create", required_argument, NULL, 'c', "\tCreate the named shadow copy of the active cluster configuration"}, {"create-empty", required_argument, NULL, 'e', "Create the named shadow copy with an empty cluster configuration"}, {"commit", required_argument, NULL, 'C', "\tUpload the contents of the named shadow copy to the cluster"}, {"delete", required_argument, NULL, 'D', "\tDelete the contents of the named shadow copy"}, {"reset", required_argument, NULL, 'r', "\tRecreate the named shadow copy from the active cluster configuration"}, {"switch", required_argument, NULL, 's', "\t(Advanced) Switch to the named shadow copy"}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"force", no_argument, NULL, 'f', "\t\t(Advanced) Force the action to be performed"}, {"batch", no_argument, NULL, 'b', "\t\t(Advanced) Don't spawn a new shell" }, {"all", no_argument, NULL, 'a', "\t\t(Advanced) Upload the entire CIB, including status, with --commit" }, {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "Create a blank shadow configuration:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_shadow --create-empty myShadow", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Create a shadow configuration from the running cluster:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_shadow --create myShadow", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the current shadow configuration:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_shadow --display", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Discard the current shadow configuration (named myShadow):", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_shadow --delete myShadow", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Upload the current shadow configuration (named myShadow) to the running cluster:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_shadow --commit myShadow", pcmk_option_example}, {0, 0, 0, 0} }; /* *INDENT-ON* */ int main(int argc, char **argv) { int rc = 0; int flag; int argerr = 0; static int command = '?'; char *shadow = NULL; char *shadow_file = NULL; gboolean full_upload = FALSE; gboolean dangerous_cmd = FALSE; struct stat buf; int option_index = 0; crm_log_cli_init("crm_shadow"); crm_set_options(NULL, "(query|command) [modifiers]", long_options, "Perform configuration changes in a sandbox before updating the live cluster." "\n\nSets up an environment in which configuration tools (cibadmin, crm_resource, etc) work" " offline instead of against a live cluster, allowing changes to be previewed and tested" " for side-effects.\n"); if (argc < 2) { crm_help('?', EX_USAGE); } while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1 || flag == 0) break; switch (flag) { case 'a': full_upload = TRUE; break; case 'd': case 'E': case 'p': case 'w': case 'F': command = flag; free(shadow); shadow = NULL; { const char *env = getenv("CIB_shadow"); if(env) { shadow = strdup(env); } else { fprintf(stderr, "No active shadow configuration defined\n"); crm_exit(ENOENT); } } break; case 'e': case 'c': case 's': case 'r': command = flag; free(shadow); shadow = strdup(optarg); break; case 'C': case 'D': command = flag; dangerous_cmd = TRUE; free(shadow); shadow = strdup(optarg); break; case 'V': command_options = command_options | cib_verbose; crm_bump_log_level(argc, argv); break; case '$': case '?': crm_help(flag, EX_OK); break; case 'f': command_options |= cib_quorum_override; force_flag = 1; break; case 'b': batch_flag = 1; break; default: printf("Argument code 0%o (%c)" " is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); crm_help('?', EX_USAGE); } if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', EX_USAGE); } if (command == 'w') { /* which shadow instance is active? */ const char *local = getenv("CIB_shadow"); if (local == NULL) { fprintf(stderr, "No shadow instance provided\n"); rc = -ENXIO; goto done; } fprintf(stdout, "%s\n", local); rc = 0; goto done; } if (shadow == NULL) { fprintf(stderr, "No shadow instance provided\n"); fflush(stderr); rc = -EINVAL; goto done; } else if (command != 's' && command != 'c') { const char *local = getenv("CIB_shadow"); if (local != NULL && safe_str_neq(local, shadow) && force_flag == FALSE) { fprintf(stderr, "The supplied shadow instance (%s) is not the same as the active one (%s).\n" " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n", shadow, local); fflush(stderr); rc = EX_USAGE; goto done; } } if (dangerous_cmd && force_flag == FALSE) { fprintf(stderr, "The supplied command is considered dangerous." " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n"); fflush(stderr); rc = EX_USAGE; goto done; } shadow_file = get_shadow_file(shadow); if (command == 'D') { /* delete the file */ rc = stat(shadow_file, &buf); if (rc == 0) { rc = unlink(shadow_file); if (rc != 0) { fprintf(stderr, "Could not remove shadow instance '%s': %s\n", shadow, strerror(errno)); goto done; } } shadow_teardown(shadow); goto done; } else if (command == 'F') { printf("%s\n", shadow_file); rc = 0; goto done; } if (command == 'd' || command == 'r' || command == 'c' || command == 'C') { real_cib = cib_new_no_shadow(); rc = real_cib->cmds->signon(real_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { fprintf(stderr, "Signon to CIB failed: %s\n", pcmk_strerror(rc)); goto done; } } rc = stat(shadow_file, &buf); if (command == 'e' || command == 'c') { if (rc == 0 && force_flag == FALSE) { fprintf(stderr, "A shadow instance '%s' already exists.\n" " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n", shadow); rc = -ENOTUNIQ; goto done; } } else if (rc != 0) { fprintf(stderr, "Could not access shadow instance '%s': %s\n", shadow, strerror(errno)); rc = -ENXIO; goto done; } rc = pcmk_ok; - if (command == 'c' || command == 'e') { + if (command == 'c' || command == 'e' || command == 'r') { xmlNode *output = NULL; /* create a shadow instance based on the current cluster config */ - if (command == 'c') { + if (command == 'c' || command == 'r') { rc = real_cib->cmds->query(real_cib, NULL, &output, command_options); if (rc != pcmk_ok) { fprintf(stderr, "Could not connect to the CIB: %s\n", pcmk_strerror(rc)); goto done; } } else { output = createEmptyCib(); crm_xml_add(output, XML_ATTR_GENERATION, "0"); crm_xml_add(output, XML_ATTR_NUMUPDATES, "0"); crm_xml_add(output, XML_ATTR_GENERATION_ADMIN, "0"); crm_xml_add(output, XML_ATTR_VALIDATION, LATEST_SCHEMA_VERSION); } rc = write_xml_file(output, shadow_file, FALSE); free_xml(output); if (rc < 0) { - fprintf(stderr, "Could not create the shadow instance '%s': %s\n", + fprintf(stderr, "Could not %s the shadow instance '%s': %s\n", + command == 'r' ? "reset" : "create", shadow, strerror(errno)); goto done; } shadow_setup(shadow, FALSE); rc = pcmk_ok; } else if (command == 'E') { const char *err = NULL; char *editor = getenv("EDITOR"); if (editor == NULL) { fprintf(stderr, "No value for $EDITOR defined\n"); rc = -EINVAL; goto done; } execlp(editor, "--", shadow_file, NULL); err = strerror(errno); fprintf(stderr, "Could not invoke $EDITOR (%s %s): %s\n", editor, shadow_file, err); rc = -EINVAL; goto done; } else if (command == 's') { shadow_setup(shadow, TRUE); rc = 0; goto done; } else if (command == 'P') { /* display the current contents */ char *output_s = NULL; xmlNode *output = filename2xml(shadow_file); output_s = dump_xml_formatted(output); printf("%s", output_s); free(output_s); free_xml(output); } else if (command == 'd') { /* diff against cluster */ xmlNode *diff = NULL; xmlNode *old_config = NULL; xmlNode *new_config = filename2xml(shadow_file); rc = real_cib->cmds->query(real_cib, NULL, &old_config, command_options); if (rc != pcmk_ok) { fprintf(stderr, "Could not query the CIB: %s\n", pcmk_strerror(rc)); goto done; } diff = diff_xml_object(old_config, new_config, FALSE); if (diff != NULL) { print_xml_diff(stdout, diff); rc = 1; goto done; } rc = 0; goto done; } else if (command == 'C') { /* commit to the cluster */ xmlNode *input = filename2xml(shadow_file); if (full_upload) { rc = real_cib->cmds->replace(real_cib, NULL, input, command_options); } else { xmlNode *config = first_named_child(input, XML_CIB_TAG_CONFIGURATION); rc = real_cib->cmds->replace(real_cib, XML_CIB_TAG_CONFIGURATION, config, command_options); } if (rc != pcmk_ok) { fprintf(stderr, "Could not commit shadow instance '%s' to the CIB: %s\n", shadow, pcmk_strerror(rc)); return rc; } shadow_teardown(shadow); free_xml(input); } done: free(shadow_file); free(shadow); return crm_exit(rc); } #define bhead(buffer, offset) ((*buffer) + (*offset)) #define bremain(max, offset) ((*max) - (*offset)) #define update_buffer_head(len) do { \ int total = (*offset) + len + 1; \ if(total >= (*max)) { /* too late */ \ (*buffer) = EOS; return -1; \ } else if(((*max) - total) < 256) { \ (*max) *= 10; \ *buffer = realloc(*buffer, (*max)); \ } \ (*offset) += len; \ } while(0) int dump_data_element(int depth, char **buffer, int *max, int *offset, const char *prefix, xmlNode * data, gboolean formatted) { int printed = 0; int has_children = 0; xmlNode *child = NULL; const char *name = NULL; CRM_CHECK(data != NULL, return 0); name = crm_element_name(data); CRM_CHECK(name != NULL, return 0); CRM_CHECK(buffer != NULL && *buffer != NULL, return 0); crm_trace("Dumping %s...", name); if (prefix) { printed = snprintf(bhead(buffer, offset), bremain(max, offset), "%s", prefix); update_buffer_head(printed); } if (formatted) { printed = print_spaces(bhead(buffer, offset), depth, bremain(max, offset)); update_buffer_head(printed); } printed = snprintf(bhead(buffer, offset), bremain(max, offset), "<%s", name); update_buffer_head(printed); if (data) { xmlAttrPtr xIter = NULL; for (xIter = data->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(data, prop_name); crm_trace("Dumping <%s %s=\"%s\"...", name, prop_name, prop_value); printed = snprintf(bhead(buffer, offset), bremain(max, offset), " %s=\"%s\"", prop_name, prop_value); update_buffer_head(printed); } } has_children = xml_has_children(data); printed = snprintf(bhead(buffer, offset), bremain(max, offset), "%s>%s", has_children == 0 ? "/" : "", formatted ? "\n" : ""); update_buffer_head(printed); if (has_children == 0) { return 0; } for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { if (dump_data_element(depth + 1, buffer, max, offset, prefix, child, formatted) < 0) { return -1; } } if (prefix) { printed = snprintf(bhead(buffer, offset), bremain(max, offset), "%s", prefix); update_buffer_head(printed); } if (formatted) { printed = print_spaces(bhead(buffer, offset), depth, bremain(max, offset)); update_buffer_head(printed); } printed = snprintf(bhead(buffer, offset), bremain(max, offset), "%s", name, formatted ? "\n" : ""); update_buffer_head(printed); crm_trace("Dumped %s...", name); return has_children; } void print_xml_diff(FILE * where, xmlNode * diff) { char *buffer = NULL; xmlNode *child = NULL; int max = 1024, len = 0; gboolean is_first = TRUE; xmlNode *added = find_xml_node(diff, "diff-added", FALSE); xmlNode *removed = find_xml_node(diff, "diff-removed", FALSE); is_first = TRUE; for (child = __xml_first_child(removed); child != NULL; child = __xml_next(child)) { len = 0; max = 1024; free(buffer); buffer = calloc(1, max); if (is_first) { is_first = FALSE; } else { fprintf(where, " --- \n"); } CRM_CHECK(dump_data_element(0, &buffer, &max, &len, "-", child, TRUE) >= 0, continue); fprintf(where, "%s", buffer); } is_first = TRUE; for (child = __xml_first_child(added); child != NULL; child = __xml_next(child)) { len = 0; max = 1024; free(buffer); buffer = calloc(1, max); if (is_first) { is_first = FALSE; } else { fprintf(where, " +++ \n"); } CRM_CHECK(dump_data_element(0, &buffer, &max, &len, "+", child, TRUE) >= 0, continue); fprintf(where, "%s", buffer); } }