diff --git a/fencing/commands.c b/fencing/commands.c index c79d42b737..06d8135798 100644 --- a/fencing/commands.c +++ b/fencing/commands.c @@ -1,1476 +1,1466 @@ /* * Copyright (C) 2009 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include GHashTable *device_list = NULL; GHashTable *topology = NULL; GList *cmd_list = NULL; static int active_children = 0; static gboolean stonith_device_dispatch(gpointer user_data); -static void st_child_done(GPid pid, gint status, gpointer user_data); +static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data); + +typedef struct async_command_s { + + int id; + int pid; + int fd_stdout; + int options; + int default_timeout; + int timeout; + + char *op; + char *origin; + char *client; + char *client_name; + char *remote; + + char *victim; + char *action; + char *device; + char *mode; + + GListPtr device_list; + GListPtr device_next; + + void (*done)(GPid pid, int rc, const char *output, gpointer user_data); + guint timer_sigterm; + guint timer_sigkill; + /*! If the operation timed out, this is the last signal + * we sent to the process to get it to terminate */ + int last_timeout_signo; +} async_command_t; + +static xmlNode * +stonith_construct_async_reply(async_command_t *cmd, const char *output, xmlNode *data, int rc); static int get_action_timeout(stonith_device_t *device, const char *action, int default_timeout) { char buffer[512] = { 0, }; char *value = NULL; CRM_CHECK(action != NULL, return default_timeout); if (!device->params) { return default_timeout; } snprintf(buffer, sizeof(buffer) - 1, "pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, buffer); if (!value) { return default_timeout; } return atoi(value); } static void free_async_command(async_command_t *cmd) { if (!cmd) { return; } cmd_list = g_list_remove(cmd_list, cmd); g_list_free(cmd->device_list); free(cmd->device); free(cmd->action); free(cmd->victim); free(cmd->remote); free(cmd->client); free(cmd->client_name); free(cmd->origin); free(cmd->op); free(cmd); } static async_command_t *create_async_command(xmlNode *msg) { async_command_t *cmd = NULL; xmlNode *op = get_xpath_object("//@"F_STONITH_ACTION, msg, LOG_ERR); const char *action = crm_element_value(op, F_STONITH_ACTION); CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL); crm_log_xml_trace(msg, "Command"); cmd = calloc(1, sizeof(async_command_t)); crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id)); crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options)); crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; cmd->origin = crm_element_value_copy(msg, F_ORIG); cmd->remote = crm_element_value_copy(msg, F_STONITH_REMOTE); cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID); cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME); cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION); cmd->action = strdup(action); cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET); cmd->mode = crm_element_value_copy(op, F_STONITH_MODE); cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE); - cmd->done = st_child_done; CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL); CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient")); cmd_list = g_list_append(cmd_list, cmd); return cmd; } static int stonith_manual_ack(xmlNode *msg, remote_fencing_op_t *op) { async_command_t *cmd = create_async_command(msg); xmlNode *dev = get_xpath_object("//@"F_STONITH_TARGET, msg, LOG_ERR); if(cmd == NULL) { return -EINVAL; } cmd->device = strdup("manual_ack"); cmd->remote = strdup(op->id); crm_notice("Injecting manual confirmation that %s is safely off/down", crm_element_value(dev, F_STONITH_TARGET)); - st_child_done(0, 0, cmd); + st_child_done(0, 0, NULL, cmd); return pcmk_ok; } static gboolean stonith_device_execute(stonith_device_t *device) { int rc = 0; int exec_rc = 0; async_command_t *cmd = NULL; + stonith_action_t *action = NULL; CRM_CHECK(device != NULL, return FALSE); if(device->active_pid) { crm_trace("%s is still active with pid %u", device->id, device->active_pid); return TRUE; } if(device->pending_ops) { GList *first = device->pending_ops; device->pending_ops = g_list_remove_link(device->pending_ops, first); cmd = first->data; g_list_free_1(first); } if(cmd == NULL) { crm_trace("Nothing further to do for %s", device->id); return TRUE; } - exec_rc = run_stonith_agent(device->agent, cmd->action, cmd->victim, - device->params, device->aliases, &rc, NULL, cmd); + action = stonith_action_create(device->agent, + cmd->action, + cmd->victim, + cmd->timeout, + device->params, + device->aliases); + + exec_rc = stonith_action_execute_async(action, (void *) cmd, st_child_done); if(exec_rc > 0) { crm_debug("Operation %s%s%s on %s now running with pid=%d, timeout=%dms", cmd->action, cmd->victim?" for node ":"", cmd->victim?cmd->victim:"", device->id, exec_rc, cmd->timeout); device->active_pid = exec_rc; } else { crm_warn("Operation %s%s%s on %s failed (%d/%d)", cmd->action, cmd->victim?" for node ":"", cmd->victim?cmd->victim:"", device->id, exec_rc, rc); - st_child_done(0, rc<0?rc:exec_rc, cmd); + st_child_done(0, rc<0?rc:exec_rc, NULL, cmd); } return TRUE; } static gboolean stonith_device_dispatch(gpointer user_data) { return stonith_device_execute(user_data); } static void schedule_stonith_command(async_command_t *cmd, stonith_device_t *device) { CRM_CHECK(cmd != NULL, return); CRM_CHECK(device != NULL, return); if (cmd->device) { free(cmd->device); } cmd->device = strdup(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); crm_debug("Scheduling %s on %s for %s (timeout=%dms)", cmd->action, device->id, cmd->remote?cmd->remote:cmd->client, cmd->timeout); device->pending_ops = g_list_append(device->pending_ops, cmd); mainloop_set_trigger(device->work); } void free_device(gpointer data) { GListPtr gIter = NULL; stonith_device_t *device = data; g_hash_table_destroy(device->params); g_hash_table_destroy(device->aliases); for(gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) { async_command_t *cmd = gIter->data; crm_warn("Removal of device '%s' purged operation %s", device->id, cmd->action); - st_child_done(0, -ENODEV, cmd); + st_child_done(0, -ENODEV, NULL, cmd); free_async_command(cmd); } g_list_free(device->pending_ops); g_list_free_full(device->targets, free); free(device->namespace); free(device->agent); free(device->id); free(device); } static GHashTable *build_port_aliases(const char *hostmap, GListPtr *targets) { char *name = NULL; int last = 0, lpc = 0, max = 0, added = 0; GHashTable *aliases = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); if(hostmap == NULL) { return aliases; } max = strlen(hostmap); for(; lpc <= max; lpc++) { switch(hostmap[lpc]) { /* Assignment chars */ case '=': case ':': if(lpc > last) { free(name); name = calloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; break; /* Delimeter chars */ /* case ',': Potentially used to specify multiple ports */ case 0: case ';': case ' ': case '\t': if(name) { char *value = NULL; value = calloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if(targets) { *targets = g_list_append(*targets, strdup(value)); } value=NULL; name=NULL; added++; } else if(lpc > last) { crm_debug("Parse error at offset %d near '%s'", lpc-last, hostmap+last); } last = lpc + 1; break; } if(hostmap[lpc] == 0) { break; } } if(added == 0) { crm_info("No host mappings detected in '%s'", hostmap); } free(name); return aliases; } static void parse_host_line(const char *line, GListPtr *output) { int lpc = 0; int max = 0; int last = 0; if(line) { max = strlen(line); } else { return; } /* Check for any complaints about additional parameters that the device doesn't understand */ if(strstr(line, "invalid") || strstr(line, "variable")) { crm_debug("Skipping: %s", line); return; } crm_trace("Processing: %s", line); /* Skip initial whitespace */ for(lpc = 0; lpc <= max && isspace(line[lpc]); lpc++) { last = lpc+1; } /* Now the actual content */ for(lpc = 0; lpc <= max; lpc++) { gboolean a_space = isspace(line[lpc]); if(a_space && lpc < max && isspace(line[lpc+1])) { /* fast-forward to the end of the spaces */ } else if(a_space || line[lpc] == ',' || line[lpc] == 0) { int rc = 1; char *entry = NULL; if(lpc != last) { entry = calloc(1, 1 + lpc - last); rc = sscanf(line+last, "%[a-zA-Z0-9_-.]", entry); } if(entry == NULL) { /* Skip */ } else if(rc != 1) { crm_warn("Could not parse (%d %d): %s", last, lpc, line+last); } else if(safe_str_neq(entry, "on") && safe_str_neq(entry, "off")) { crm_trace("Adding '%s'", entry); *output = g_list_append(*output, entry); entry = NULL; } free(entry); last = lpc + 1; } } } static GListPtr parse_host_list(const char *hosts) { int lpc = 0; int max = 0; int last = 0; GListPtr output = NULL; if(hosts == NULL) { return output; } max = strlen(hosts); for(lpc = 0; lpc <= max; lpc++) { if(hosts[lpc] == '\n' || hosts[lpc] == 0) { char *line = NULL; line = calloc(1, 2 + lpc - last); snprintf(line, 1 + lpc - last, "%s", hosts+last); parse_host_line(line, &output); free(line); last = lpc + 1; } } return output; } static stonith_device_t *build_device_from_xml(xmlNode *msg) { xmlNode *dev = get_xpath_object("//"F_STONITH_DEVICE, msg, LOG_ERR); stonith_device_t *device = NULL; device = calloc(1, sizeof(stonith_device_t)); device->id = crm_element_value_copy(dev, XML_ATTR_ID); device->agent = crm_element_value_copy(dev, "agent"); device->namespace = crm_element_value_copy(dev, "namespace"); device->params = xml2list(dev); device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device); /* TODO: Hook up priority */ return device; } static const char * target_list_type(stonith_device_t *dev) { const char *check_type = NULL; check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK); if(check_type == NULL) { if(g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) { check_type = "static-list"; } else if(g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) { check_type = "static-list"; } else { check_type = "dynamic-list"; } } return check_type; } static void update_dynamic_list(stonith_device_t *dev) { time_t now = time(NULL); /* Host/alias must be in the list output to be eligable to be fenced * * Will cause problems if down'd nodes aren't listed or (for virtual nodes) * if the guest is still listed despite being moved to another machine */ if(dev->targets_age < 0) { crm_trace("Port list queries disabled for %s", dev->id); } else if(dev->targets == NULL || dev->targets_age + 60 < now) { + stonith_action_t *action = NULL; char *output = NULL; int rc = pcmk_ok; int exec_rc = pcmk_ok; if(dev->active_pid != 0) { crm_notice("Port list query can not execute because device is busy, using cache: %s", dev->targets ? "YES" : "NO"); return; } - exec_rc = run_stonith_agent(dev->agent, "list", NULL, dev->params, NULL, &rc, &output, NULL); + action = stonith_action_create(dev->agent, "list", NULL, 5, dev->params, NULL); + exec_rc = stonith_action_execute(action, &rc, &output); + if(rc != 0 && dev->active_pid == 0) { /* This device probably only supports a single * connection, which appears to already be in use, * likely involved in a montior or (less likely) * metadata operation. * * Avoid disabling port list queries in the hope that * the op would succeed next time */ crm_info("Couldn't query ports for %s. Call failed with rc=%d and active_pid=%d: %s", dev->agent, rc, dev->active_pid, output); } else if(exec_rc < 0 || rc != 0) { crm_notice("Disabling port list queries for %s (%d/%d): %s", dev->id, exec_rc, rc, output); dev->targets_age = -1; /* Fall back to status */ g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status")); g_list_free_full(dev->targets, free); dev->targets = NULL; } else { crm_info("Refreshing port list for %s", dev->id); g_list_free_full(dev->targets, free); dev->targets = parse_host_list(output); dev->targets_age = now; } free(output); } } int stonith_device_register(xmlNode *msg, const char **desc) { const char *value = NULL; stonith_device_t *device = build_device_from_xml(msg); value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST); if(value) { device->targets = parse_host_list(value); } value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP); device->aliases = build_port_aliases(value, &(device->targets)); value = target_list_type(device); if (safe_str_eq(value, "dynamic-list")) { /* set the dynamic list during the register to guarantee we have * targets cached */ update_dynamic_list(device); } g_hash_table_replace(device_list, device->id, device); crm_notice("Added '%s' to the device list (%d active devices)", device->id, g_hash_table_size(device_list)); if(desc) { *desc = device->id; } return pcmk_ok; } static int stonith_device_remove(xmlNode *msg, const char **desc) { xmlNode *dev = get_xpath_object("//"F_STONITH_DEVICE, msg, LOG_ERR); const char *id = crm_element_value(dev, XML_ATTR_ID); if(g_hash_table_remove(device_list, id)) { crm_info("Removed '%s' from the device list (%d active devices)", id, g_hash_table_size(device_list)); } else { crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list)); } if(desc) { *desc = id; } return pcmk_ok; } static int count_active_levels(stonith_topology_t *tp) { int lpc = 0; int count = 0; for(lpc = 0; lpc < ST_LEVEL_MAX; lpc++) { if(tp->levels[lpc] != NULL) { count++; } } return count; } void free_topology_entry(gpointer data) { stonith_topology_t *tp = data; int lpc = 0; for(lpc = 0; lpc < ST_LEVEL_MAX; lpc++) { if(tp->levels[lpc] != NULL) { g_list_free_full(tp->levels[lpc], free); } } free(tp->node); free(tp); } int stonith_level_register(xmlNode *msg, char **desc) { int id = 0; int rc = pcmk_ok; xmlNode *child = NULL; xmlNode *level = get_xpath_object("//"F_STONITH_LEVEL, msg, LOG_ERR); const char *node = crm_element_value(level, F_STONITH_TARGET); stonith_topology_t *tp = g_hash_table_lookup(topology, node); crm_element_value_int(level, XML_ATTR_ID, &id); if(desc) { *desc = g_strdup_printf("%s[%d]", node, id); } if(id <= 0 || id >= ST_LEVEL_MAX) { return -EINVAL; } if(tp == NULL) { tp = calloc(1, sizeof(stonith_topology_t)); tp->node = strdup(node); g_hash_table_replace(topology, tp->node, tp); crm_trace("Added %s to the topology (%d active entries)", node, g_hash_table_size(topology)); } if(tp->levels[id] != NULL) { crm_info("Adding to the existing %s[%d] topology entry (%d active entries)", node, id, count_active_levels(tp)); } for (child = __xml_first_child(level); child != NULL; child = __xml_next(child)) { const char *device = ID(child); crm_trace("Adding device '%s' for %s (%d)", device, node, id); tp->levels[id] = g_list_append(tp->levels[id], strdup(device)); } crm_info("Node %s has %d active fencing levels", node, count_active_levels(tp)); return rc; } int stonith_level_remove(xmlNode *msg, char **desc) { int id = 0; xmlNode *level = get_xpath_object("//"F_STONITH_LEVEL, msg, LOG_ERR); const char *node = crm_element_value(level, F_STONITH_TARGET); stonith_topology_t *tp = g_hash_table_lookup(topology, node); if(desc) { *desc = g_strdup_printf("%s[%d]", node, id); } crm_element_value_int(level, XML_ATTR_ID, &id); if(tp == NULL) { crm_info("Node %s not found (%d active entries)", node, g_hash_table_size(topology)); return pcmk_ok; } else if(id < 0 || id >= ST_LEVEL_MAX) { return -EINVAL; } if(id == 0 && g_hash_table_remove(topology, node)) { crm_info("Removed all %s related entries from the topology (%d active entries)", node, g_hash_table_size(topology)); } else if(id > 0 && tp->levels[id] != NULL) { g_list_free_full(tp->levels[id], free); tp->levels[id] = NULL; crm_info("Removed entry '%d' from %s's topology (%d active entries remaining)", id, node, count_active_levels(tp)); } return pcmk_ok; } static gboolean string_in_list(GListPtr list, const char *item) { int lpc = 0; int max = g_list_length(list); for(lpc = 0; lpc < max; lpc ++) { const char *value = g_list_nth_data(list, lpc); if(safe_str_eq(item, value)) { return TRUE; } } return FALSE; } static int stonith_device_action(xmlNode *msg, char **output) { int rc = pcmk_ok; xmlNode *dev = get_xpath_object("//"F_STONITH_DEVICE, msg, LOG_ERR); const char *id = crm_element_value(dev, F_STONITH_DEVICE); async_command_t *cmd = NULL; stonith_device_t *device = NULL; if(id) { crm_trace("Looking for '%s'", id); device = g_hash_table_lookup(device_list, id); } if(device) { cmd = create_async_command(msg); if(cmd == NULL) { free_device(device); return -EPROTO; } schedule_stonith_command(cmd, device); rc = -EINPROGRESS; } else { crm_info("Device %s not found", id?id:""); rc = -ENODEV; } return rc; } static gboolean can_fence_host_with_device(stonith_device_t *dev, const char *host) { gboolean can = FALSE; const char *alias = host; const char *check_type = NULL; if(dev == NULL) { return FALSE; } else if(host == NULL) { return TRUE; } if(g_hash_table_lookup(dev->aliases, host)) { alias = g_hash_table_lookup(dev->aliases, host); } check_type = target_list_type(dev); if(safe_str_eq(check_type, "none")) { can = TRUE; } else if(safe_str_eq(check_type, "static-list")) { /* Presence in the hostmap is sufficient * Only use if all hosts on which the device can be active can always fence all listed hosts */ if(string_in_list(dev->targets, host)) { can = TRUE; } else if(g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP) && g_hash_table_lookup(dev->aliases, host)) { can = TRUE; } } else if(safe_str_eq(check_type, "dynamic-list")) { update_dynamic_list(dev); if(string_in_list(dev->targets, alias)) { can = TRUE; } } else if(safe_str_eq(check_type, "status")) { int rc = 0; int exec_rc = 0; - + stonith_action_t *action = NULL; /* Run the status operation for the device/target combination * Will cause problems if the device doesn't return 2 for down'd nodes or * (for virtual nodes) if the device doesn't return 1 for guests that * have been moved to another host */ - exec_rc = run_stonith_agent( - dev->agent, "status", host, dev->params, dev->aliases, &rc, NULL, NULL); + action = stonith_action_create(dev->agent, "status", host, 5, dev->params, dev->aliases); + exec_rc = stonith_action_execute(action, &rc, NULL); if(exec_rc != 0) { crm_err("Could not invoke %s: rc=%d", dev->id, exec_rc); } else if(rc == 1 /* unkown */) { crm_trace("Host %s is not known by %s", host, dev->id); } else if(rc == 0 /* active */ || rc == 2 /* inactive */) { can = TRUE; } else { crm_notice("Unkown result when testing if %s can fence %s: rc=%d", dev->id, host, rc); } } else { crm_err("Unknown check type: %s", check_type); } if(safe_str_eq(host, alias)) { crm_info("%s can%s fence %s: %s", dev->id, can?"":" not", host, check_type); } else { crm_info("%s can%s fence %s (aka. '%s'): %s", dev->id, can?"":" not", host, alias, check_type); } return can; } struct device_search_s { const char *host; GListPtr capable; }; static void search_devices( gpointer key, gpointer value, gpointer user_data) { stonith_device_t *dev = value; struct device_search_s *search = user_data; if(can_fence_host_with_device(dev, search->host)) { search->capable = g_list_append(search->capable, value); } } static int stonith_query(xmlNode *msg, xmlNode **list) { struct device_search_s search; int available_devices = 0; const char *action = NULL; xmlNode *dev = get_xpath_object("//@"F_STONITH_TARGET, msg, LOG_DEBUG_3); search.host = NULL; search.capable = NULL; if(dev) { const char *device = crm_element_value(dev, F_STONITH_DEVICE); search.host = crm_element_value(dev, F_STONITH_TARGET); if(device && safe_str_eq(device, "manual_ack")) { /* No query necessary */ if(list) { *list = NULL; } return pcmk_ok; } action = crm_element_value(dev, F_STONITH_ACTION); } crm_log_xml_debug(msg, "Query"); g_hash_table_foreach(device_list, search_devices, &search); available_devices = g_list_length(search.capable); if(search.host) { crm_debug("Found %d matching devices for '%s'", available_devices, search.host); } else { crm_debug("%d devices installed", available_devices); } /* Pack the results into data */ if(list) { GListPtr lpc = NULL; *list = create_xml_node(NULL, __FUNCTION__); crm_xml_add(*list, F_STONITH_TARGET, search.host); crm_xml_add_int(*list, "st-available-devices", available_devices); for(lpc = search.capable; lpc != NULL; lpc = lpc->next) { stonith_device_t *device = (stonith_device_t*)lpc->data; int action_specific_timeout = get_action_timeout(device, action, 0); dev = create_xml_node(*list, F_STONITH_DEVICE); crm_xml_add(dev, XML_ATTR_ID, device->id); crm_xml_add(dev, "namespace", device->namespace); crm_xml_add(dev, "agent", device->agent); if (action_specific_timeout) { crm_xml_add_int(dev, F_STONITH_ACTION_TIMEOUT, action_specific_timeout); } if(search.host == NULL) { xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS); g_hash_table_foreach(device->params, hash2field, attrs); } } } g_list_free(search.capable); return available_devices; } static void log_operation(async_command_t *cmd, int rc, int pid, const char *next, const char *output) { if(rc == 0) { next = NULL; } if(cmd->victim != NULL) { do_crm_log(rc==0?LOG_NOTICE:LOG_ERR, "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned: %d (%s)%s%s", cmd->action, pid, cmd->id, cmd->client, cmd->victim, cmd->device, rc, pcmk_strerror(rc), next?". Trying: ":"", next?next:""); } else { do_crm_log_unlikely(rc==0?LOG_DEBUG:LOG_NOTICE, "Operation '%s' [%d] for device '%s' returned: %d (%s)%s%s", cmd->action, pid, cmd->device, rc, pcmk_strerror(rc), next?". Trying: ":"", next?next:""); } if(output) { /* Logging the whole string confuses syslog when the string is xml */ char *local_copy = strdup(output); int lpc = 0, last = 0, more = strlen(local_copy); for(lpc = 0; lpc < more; lpc++) { if(local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; do_crm_log(rc==0?LOG_INFO:LOG_WARNING, "%s: %s", cmd->device, local_copy+last); last = lpc+1; } } crm_debug("%s: %s (total %d bytes)", cmd->device, local_copy+last, more); free(local_copy); } } static void stonith_send_async_reply(async_command_t *cmd, const char *output, int rc, GPid pid) { xmlNode *reply = NULL; gboolean bcast = TRUE; reply = stonith_construct_async_reply(cmd, output, NULL, rc); if(safe_str_eq(cmd->action, "metadata")) { /* Too verbose to log */ bcast = FALSE; output = NULL; crm_trace("Directed reply: %s op", cmd->action); } else if(crm_str_eq(cmd->action, "monitor", TRUE) || crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) { crm_trace("Directed reply: %s op", cmd->action); bcast = FALSE; } else if(safe_str_eq(cmd->mode, "slave")) { crm_trace("Directed reply: Complex op with %s", cmd->device); bcast = FALSE; } log_operation(cmd, rc, pid, NULL, output); crm_log_xml_trace(reply, "Reply"); if(bcast && !stand_alone) { /* Send reply as T_STONITH_NOTIFY so everyone does notifications * Potentially limit to unsucessful operations to the originator? */ crm_trace("Broadcast reply"); crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY); send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE); } else if(cmd->origin) { crm_trace("Directed reply to %s", cmd->origin); send_cluster_message(cmd->origin, crm_msg_stonith_ng, reply, FALSE); } else { crm_trace("Directed local %ssync reply to %s", (cmd->options & st_opt_sync_call)?"":"a-", cmd->client); do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE); } if(stand_alone) { /* Do notification with a clean data object */ xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE); crm_xml_add_int(notify_data, F_STONITH_RC, rc); crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim); crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op); crm_xml_add(notify_data, F_STONITH_DELEGATE, cmd->device); crm_xml_add(notify_data, F_STONITH_REMOTE, cmd->remote); crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client); do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data, NULL); } free_xml(reply); } static void cancel_stonith_command(async_command_t *cmd) { stonith_device_t *device; CRM_CHECK(cmd != NULL, return); if (!cmd->device) { return; } device = g_hash_table_lookup(device_list, cmd->device); if (device) { crm_trace("Cancel scheduled %s on %s", cmd->action, device->id); device->pending_ops = g_list_remove(device->pending_ops, cmd); } } #define READ_MAX 500 -static void st_child_done(GPid pid, gint status, gpointer user_data) +static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data) { - int rc = -pcmk_err_generic; - - int len = 0; - int more = 0; - - char *output = NULL; - stonith_device_t *device = NULL; async_command_t *cmd = user_data; GListPtr gIter = NULL; GListPtr gIterNext = NULL; CRM_CHECK(cmd != NULL, return); - if(cmd->timer_sigterm > 0) { - g_source_remove(cmd->timer_sigterm); - } - if(cmd->timer_sigkill > 0) { - g_source_remove(cmd->timer_sigkill); - } - - if(cmd->last_timeout_signo) { - crm_notice("Child process %d performing action '%s' with '%s' timed out with signal %d", - pid, cmd->action, cmd->device, cmd->last_timeout_signo); - rc = -ETIME; - } else if(WIFSIGNALED(status)) { - int signo = WTERMSIG(status); - - rc = -ECONNABORTED; - crm_notice("Child process %d performing action '%s' with '%s' terminated with signal %d", - pid, cmd->action, cmd->device, signo); - } else if(WIFEXITED(status)) { - rc = WEXITSTATUS(status); - crm_debug("Child process %d performing action '%s' with '%s' exited with rc %d", - pid, cmd->action, cmd->device, rc); - } - active_children--; /* The device is ready to do something else now */ device = g_hash_table_lookup(device_list, cmd->device); if(device) { device->active_pid = 0; mainloop_set_trigger(device->work); } - do { - char buffer[READ_MAX]; - - errno = 0; - if(cmd->fd_stdout > 0) { - memset(&buffer, 0, READ_MAX); - more = read(cmd->fd_stdout, buffer, READ_MAX-1); - crm_trace("Got %d more bytes: %s", more, buffer); - } - - if(more > 0) { - output = realloc(output, len + more + 1); - sprintf(output+len, "%s", buffer); - len += more; - } - - } while (more == (READ_MAX-1) || (more < 0 && errno == EINTR)); - - if(cmd->fd_stdout) { - close(cmd->fd_stdout); - cmd->fd_stdout = 0; - } - crm_trace("Operation on %s completed with rc=%d (%d remaining)", cmd->device, rc, g_list_length(cmd->device_next)); if(rc != 0 && cmd->device_next) { stonith_device_t *dev = cmd->device_next->data; log_operation(cmd, rc, pid, dev->id, output); cmd->device_next = cmd->device_next->next; schedule_stonith_command(cmd, dev); /* Prevent cmd from being freed */ cmd = NULL; goto done; } if(rc > 0) { rc = -pcmk_err_generic; } stonith_send_async_reply(cmd, output, rc, pid); if(rc != 0) { goto done; } /* Check to see if any operations are scheduled to do the exact * same thing that just completed. If so, rather than * performing the same fencing operation twice, return the result * of this operation for all pending commands it matches. */ for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd_other = gIter->data; gIterNext = gIter->next; if(cmd == cmd_other) { continue; } /* A pending scheduled command matches the command that just finished if. * 1. The client connections are different. * 2. The node victim is the same. * 3. The fencing action is the same. * 4. The device scheduled to execute the action is the same. */ if(safe_str_eq(cmd->client, cmd_other->client) || safe_str_neq(cmd->victim, cmd_other->victim) || safe_str_neq(cmd->action, cmd_other->action) || safe_str_neq(cmd->device, cmd_other->device)) { continue; } crm_notice("Merging stonith action %s for node %s originating from client %s with identical stonith request from client %s", cmd_other->action, cmd_other->victim, cmd_other->client, cmd->client); cmd_list = g_list_remove_link(cmd_list, gIter); stonith_send_async_reply(cmd_other, output, rc, pid); cancel_stonith_command(cmd_other); free_async_command(cmd_other); g_list_free_1(gIter); } done: free_async_command(cmd); - free(output); } static gint sort_device_priority(gconstpointer a, gconstpointer b) { const stonith_device_t *dev_a = a; const stonith_device_t *dev_b = a; if(dev_a->priority > dev_b->priority) { return -1; } else if(dev_a->priority < dev_b->priority) { return 1; } return 0; } static int stonith_fence(xmlNode *msg) { int options = 0; const char *device_id = NULL; stonith_device_t *device = NULL; async_command_t *cmd = create_async_command(msg); xmlNode *dev = get_xpath_object("//@"F_STONITH_TARGET, msg, LOG_ERR); if(cmd == NULL) { return -EPROTO; } device_id = crm_element_value(dev, F_STONITH_DEVICE); if(device_id) { device = g_hash_table_lookup(device_list, device_id); if(device == NULL) { crm_err("Requested device '%s' is not available", device_id); } } else { struct device_search_s search; search.capable = NULL; search.host = crm_element_value(dev, F_STONITH_TARGET); crm_element_value_int(msg, F_STONITH_CALLOPTS, &options); if(options & st_opt_cs_nodeid) { int nodeid = crm_atoi(search.host, NULL); crm_node_t *node = crm_get_peer(nodeid, NULL); if(node) { search.host = node->uname; } } g_hash_table_foreach(device_list, search_devices, &search); crm_info("Found %d matching devices for '%s'", g_list_length(search.capable), search.host); if(g_list_length(search.capable) > 0) { /* Order based on priority */ search.capable = g_list_sort(search.capable, sort_device_priority); device = search.capable->data; if(g_list_length(search.capable) > 1) { cmd->device_list = search.capable; cmd->device_next = cmd->device_list->next; } else { g_list_free(search.capable); } } } if(device) { schedule_stonith_command(cmd, device); return -EINPROGRESS; } free_async_command(cmd); return -EHOSTUNREACH; } xmlNode *stonith_construct_reply(xmlNode *request, char *output, xmlNode *data, int rc) { int lpc = 0; xmlNode *reply = NULL; const char *name = NULL; const char *value = NULL; const char *names[] = { F_STONITH_OPERATION, F_STONITH_CALLID, F_STONITH_CLIENTID, F_STONITH_CLIENTNAME, F_STONITH_REMOTE, F_STONITH_CALLOPTS }; crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); crm_xml_add(reply, F_TYPE, T_STONITH_NG); crm_xml_add(reply, "st_output", output); crm_xml_add_int(reply, F_STONITH_RC, rc); CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply); for(lpc = 0; lpc < DIMOF(names); lpc++) { name = names[lpc]; value = crm_element_value(request, name); crm_xml_add(reply, name, value); } if(data != NULL) { crm_trace("Attaching reply output"); add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } -xmlNode *stonith_construct_async_reply(async_command_t *cmd, const char *output, xmlNode *data, int rc) +static xmlNode * +stonith_construct_async_reply(async_command_t *cmd, const char *output, xmlNode *data, int rc) { xmlNode *reply = NULL; crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); crm_xml_add(reply, F_TYPE, T_STONITH_NG); crm_xml_add(reply, F_STONITH_OPERATION, cmd->op); crm_xml_add(reply, F_STONITH_DEVICE, cmd->device); crm_xml_add(reply, F_STONITH_REMOTE, cmd->remote); crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client); crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name); crm_xml_add(reply, F_STONITH_TARGET, cmd->victim); crm_xml_add(reply, F_STONITH_ACTION, cmd->op); crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin); crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id); crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options); crm_xml_add_int(reply, F_STONITH_RC, rc); crm_xml_add(reply, "st_output", output); if(data != NULL) { crm_info("Attaching reply output"); add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } void stonith_command(stonith_client_t *client, uint32_t id, uint32_t flags, xmlNode *request, const char *remote) { int call_options = 0; int rc = -EOPNOTSUPP; gboolean is_reply = FALSE; gboolean always_reply = FALSE; xmlNode *reply = NULL; xmlNode *data = NULL; char *output = NULL; const char *op = crm_element_value(request, F_STONITH_OPERATION); const char *client_id = crm_element_value(request, F_STONITH_CLIENTID); crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); if(get_xpath_object("//"T_STONITH_REPLY, request, LOG_DEBUG_3)) { is_reply = TRUE; } crm_debug("Processing %s%s from %s (%16x)", op, is_reply?" reply":"", client?client->name:remote, call_options); if(is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(client == NULL || client->request_id == id); } if(crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { xmlNode *reply = create_xml_node(NULL, "reply"); CRM_ASSERT(client); crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(reply, F_STONITH_CLIENTID, client->id); crm_ipcs_send(client->channel, id, reply, FALSE); client->request_id = 0; free_xml(reply); return; } else if(crm_str_eq(op, STONITH_OP_EXEC, TRUE)) { rc = stonith_device_action(request, &output); } else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) { const char *call_id = crm_element_value(request, F_STONITH_CALLID); const char *client_id = crm_element_value(request, F_STONITH_CLIENTID); int op_timeout = 0; crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); return; } else if(is_reply && crm_str_eq(op, STONITH_OP_QUERY, TRUE)) { process_remote_stonith_query(request); return; } else if(crm_str_eq(op, STONITH_OP_QUERY, TRUE)) { create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */ rc = stonith_query(request, &data); always_reply = TRUE; if(!data) { return; } } else if(is_reply && crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) { process_remote_stonith_exec(request); return; } else if(is_reply && crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { /* Reply to a complex fencing op */ process_remote_stonith_exec(request); return; } else if(crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) { const char *flag_name = NULL; CRM_ASSERT(client); flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE); if(flag_name) { crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id); client->flags |= get_stonith_flag(flag_name); } flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE); if(flag_name) { crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id); client->flags |= get_stonith_flag(flag_name); } if(flags & crm_ipc_client_response) { crm_ipcs_send_ack(client->channel, id, "ack", __FUNCTION__, __LINE__); client->request_id = 0; } return; /* } else if(is_reply && crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { */ /* process_remote_stonith_exec(request); */ /* return; */ } else if(is_reply == FALSE && crm_str_eq(op, STONITH_OP_RELAY, TRUE)) { if(initiate_remote_stonith_op(NULL, request, FALSE) != NULL) { rc = -EINPROGRESS; } } else if(is_reply == FALSE && crm_str_eq(op, STONITH_OP_FENCE, TRUE)) { if(remote || stand_alone) { rc = stonith_fence(request); } else if(call_options & st_opt_manual_ack) { remote_fencing_op_t *rop = initiate_remote_stonith_op(client, request, TRUE); rc = stonith_manual_ack(request, rop); } else { const char *alternate_host = NULL; xmlNode *dev = get_xpath_object("//@"F_STONITH_TARGET, request, LOG_TRACE); const char *target = crm_element_value_copy(dev, F_STONITH_TARGET); if(g_hash_table_lookup(topology, target) && safe_str_eq(target, stonith_our_uname)) { GHashTableIter gIter; crm_node_t *entry = NULL; int membership = crm_proc_plugin | crm_proc_heartbeat | crm_proc_cpg; g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target); if(entry->uname && (entry->processes & membership) && safe_str_neq(entry->uname, target)) { alternate_host = entry->uname; break; } } if(alternate_host == NULL) { crm_err("No alternate host available to handle complex self fencing request"); g_hash_table_iter_init(&gIter, crm_peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { crm_notice("Peer[%d] %s", entry->id, entry->uname); } } } if(alternate_host) { crm_notice("Forwarding complex self fencing request to peer %s", alternate_host); crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY); crm_xml_add(request, F_STONITH_CLIENTID, client->id); send_cluster_message(alternate_host, crm_msg_stonith_ng, request, FALSE); rc = -EINPROGRESS; } else if(initiate_remote_stonith_op(client, request, FALSE) != NULL) { rc = -EINPROGRESS; } } } else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) { rc = stonith_fence_history(request, &data); always_reply = TRUE; } else if(crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { return; } else if(crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) { const char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_device_register(request, &id); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list)); do_stonith_notify(call_options, op, rc, notify_data, NULL); free_xml(notify_data); } else if(crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) { const char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_device_remove(request, &id); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list)); do_stonith_notify(call_options, op, rc, notify_data, NULL); free_xml(notify_data); } else if(crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) { char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_level_register(request, &id); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology)); do_stonith_notify(call_options, op, rc, notify_data, NULL); free_xml(notify_data); } else if(crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) { char *id = NULL; xmlNode *notify_data = create_xml_node(NULL, op); rc = stonith_level_remove(request, &id); crm_xml_add(notify_data, F_STONITH_DEVICE, id); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology)); do_stonith_notify(call_options, op, rc, notify_data, NULL); free_xml(notify_data); } else if(crm_str_eq(op, STONITH_OP_CONFIRM, TRUE)) { async_command_t *cmd = create_async_command(request); xmlNode *reply = stonith_construct_async_reply(cmd, NULL, NULL, 0); crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY); crm_notice("Broadcasting manual fencing confirmation for node %s", cmd->victim); send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE); free_async_command(cmd); free_xml(reply); } else { crm_err("Unknown %s%s from %s", op, is_reply?" reply":"", client?client->name:remote); crm_log_xml_warn(request, "UnknownOp"); } do_crm_log_unlikely(rc>0?LOG_DEBUG:LOG_INFO,"Processed %s%s from %s: %s (%d)", op, is_reply?" reply":"", client?client->name:remote, rc>0?"":pcmk_strerror(rc), rc); if(is_reply || rc == -EINPROGRESS) { /* Nothing (yet) */ } else if(remote) { reply = stonith_construct_reply(request, output, data, rc); send_cluster_message(remote, crm_msg_stonith_ng, reply, FALSE); free_xml(reply); } else if(rc <= pcmk_ok || always_reply) { reply = stonith_construct_reply(request, output, data, rc); do_local_reply(reply, client_id, call_options & st_opt_sync_call, remote!=NULL); free_xml(reply); } free(output); free_xml(data); } diff --git a/fencing/fence_true b/fencing/fence_true index afbd6e1581..ed62f22824 100644 --- a/fencing/fence_true +++ b/fencing/fence_true @@ -1,75 +1,77 @@ #!/usr/bin/python # The Following Agent Has Been Tested On: # # Virsh 0.3.3 on RHEL 5.2 with xen-3.0.3-51 # import sys, time sys.path.append("/usr/share/fence") from fencing import * #BEGIN_VERSION_GENERATION RELEASE_VERSION="3.1.6" BUILD_DATE="(built Mon Oct 24 12:14:08 UTC 2011)" REDHAT_COPYRIGHT="Copyright (C) Red Hat, Inc. 2004-2010 All rights reserved." #END_VERSION_GENERATION plug_status="on" def get_outlets_status(conn, options): result={} # This fake agent has no port data to list, so we have to make # something up for the list action. if options.has_key("-o") and options["-o"] == "list": result["fake_port_1"]=[plug_status, "fake"] result["fake_port_2"]=[plug_status, "fake"] elif (options.has_key("-n") == 0): fail_usage("Failed: You have to enter existing machine!") else: port=options["-n"] result[port]=[plug_status, "fake"] return result def get_power_status(conn, options): outlets=get_outlets_status(conn,options) if len(outlets) == 0 or options.has_key("-n") == 0: fail_usage("Failed: You have to enter existing machine!") else: return outlets[options["-n"]][0] def set_power_status(conn, options): global plug_status plug_status = "off" + if options.has_key("-o") and options["-o"] == "on": + plug_status = "on" def main(): device_opt = [ "help", "version", "agent", "quiet", "verbose", "debug", "action", "port", "no_password", "power_wait", "power_timeout", ] atexit.register(atexit_handler) pinput = process_input(device_opt) # Fake options to keep the library happy #pinput["-p"] = "none" pinput["-a"] = "localhost" pinput["-C"] = "," options = check_input(device_opt, pinput) if options.has_key("-o") and (options["-o"] == "monitor"): sys.exit(0) ## Defaults for fence agent docs = { } docs["shortdesc"] = "Fake fence agent" docs["longdesc"] = "fence_true is a fake Fencing agent which always reports success without doing anything." show_docs(options, docs) ## Operate the fencing device result = fence_action(None, options, set_power_status, get_power_status, get_outlets_status) sys.exit(result) if __name__ == "__main__": main() diff --git a/fencing/internal.h b/fencing/internal.h index ade5e6cac8..841d59eb32 100644 --- a/fencing/internal.h +++ b/fencing/internal.h @@ -1,110 +1,107 @@ #include typedef struct stonith_device_s { char *id; char *agent; char *namespace; GListPtr targets; time_t targets_age; gboolean has_attr_map; guint priority; guint active_pid; GHashTable *params; GHashTable *aliases; GList *pending_ops; crm_trigger_t *work; } stonith_device_t; typedef struct stonith_client_s { char *id; char *name; int request_id; char *channel_name; qb_ipcs_connection_t *channel; long long flags; } stonith_client_t; typedef struct remote_fencing_op_s { char *id; char *target; char *action; guint replies; guint op_timer; guint query_timer; guint base_timeout; char *delegate; time_t completed; long long call_options; enum op_state state; char *originator; char *client_id; char *client_name; GListPtr query_results; xmlNode *request; guint level; /* ABI */ GListPtr devices; /* ABI */ int topology_device_number; } remote_fencing_op_t; typedef struct stonith_topology_s { char *node; GListPtr levels[ST_LEVEL_MAX]; } stonith_topology_t; extern long long get_stonith_flag(const char *name); extern void stonith_command(stonith_client_t * client, uint32_t id, uint32_t flags, xmlNode * op_request, const char *remote); extern int stonith_device_register(xmlNode * msg, const char **desc); extern int stonith_level_register(xmlNode * msg, char **desc); extern int stonith_level_remove(xmlNode * msg, char **desc); extern void do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer); extern xmlNode *stonith_construct_reply(xmlNode * request, char *output, xmlNode * data, int rc); -extern xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, - int rc);; - void do_stonith_async_timeout_update(const char *client, const char *call_id, int timeout); extern void do_stonith_notify(int options, const char *type, int result, xmlNode * data, const char *remote); extern remote_fencing_op_t *initiate_remote_stonith_op(stonith_client_t * client, xmlNode * request, gboolean manual_ack); extern int process_remote_stonith_exec(xmlNode * msg); extern int process_remote_stonith_query(xmlNode * msg); extern void *create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer); extern int stonith_fence_history(xmlNode * msg, xmlNode ** output); extern void free_device(gpointer data); extern void free_topology_entry(gpointer data); extern char *stonith_our_uname; extern gboolean stand_alone; extern GHashTable *device_list; extern GHashTable *topology; extern GHashTable *client_list; diff --git a/fencing/regression.py.in b/fencing/regression.py.in index 4f29c21b20..18d45f3efa 100644 --- a/fencing/regression.py.in +++ b/fencing/regression.py.in @@ -1,691 +1,704 @@ #!/usr/bin/python # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import os import sys import subprocess import shlex import time def output_from_command(command): test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE) test.wait() return test.communicate()[0].split("\n") class Test: def __init__(self, name, description, verbose = 0, with_cpg = 0): self.name = name self.description = description self.cmds = [] self.verbose = verbose self.result_txt = "" self.cmd_tool_output = "" self.result_exitcode = 0; self.stonith_options = "-s" self.enable_corosync = 0 if with_cpg: self.stonith_options = "-c" self.enable_corosync = 1 self.stonith_process = None self.stonith_output = "" self.stonith_patterns = [] self.negative_stonith_patterns = [] self.executed = 0 rsc_classes = output_from_command("crm_resource --list-standards") self.has_systemd = 0 if "systemd" in rsc_classes: self.has_systemd = 1 def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None): self.cmds.append( { "cmd" : cmd, "kill" : kill, "args" : args, "expected_exitcode" : exitcode, "stdout_match" : stdout_match, "stdout_negative_match" : stdout_negative_match, "no_wait" : no_wait, } ) def start_corosync(self): if self.enable_corosync == 0: return if self.has_systemd: cmd = shlex.split("systemctl start corosync.service") else: cmd = shlex.split("service corosync start") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() def stop_corosync(self): if self.enable_corosync == 0: return if self.has_systemd: cmd = shlex.split("systemctl stop corosync.service") else: cmd = shlex.split("service corosync stop") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() def stop_pacemaker(self): if self.has_systemd: cmd = shlex.split("systemctl stop pacemaker.service") else: cmd = shlex.split("service pacemaker stop") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() def start_environment(self): ### make sure we are in full control here ### self.stop_pacemaker() self.stop_corosync() cmd = shlex.split("killall -q -9 stonithd") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() self.start_corosync() self.stonith_process = subprocess.Popen( shlex.split("@CRM_DAEMON_DIR@/stonithd %s -V" % self.stonith_options), stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(1) def clean_environment(self): if self.stonith_process: self.stonith_process.terminate() self.stonith_output = self.stonith_process.communicate()[1] self.stonith_process = None if self.verbose: print self.stonith_output self.stop_corosync() def add_stonith_log_pattern(self, pattern): self.stonith_patterns.append(pattern) def add_stonith_negative_log_pattern(self, pattern): self.negative_stonith_patterns.append(pattern) def add_cmd(self, cmd, args): self.__new_cmd(cmd, args, 0, "") def add_cmd_no_wait(self, cmd, args): self.__new_cmd(cmd, args, 0, "", 1) def add_cmd_check_stdout(self, cmd, args, match, no_match = ""): self.__new_cmd(cmd, args, 0, match, 0, no_match) def add_expected_fail_cmd(self, cmd, args, exitcode = 255): self.__new_cmd(cmd, args, exitcode, "") def get_exitcode(self): return self.result_exitcode def print_result(self, filler): print "%s%s" % (filler, self.result_txt) def run_cmd(self, args): cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print "\n\nRunning: "+" ".join(cmd) test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print "Also running: "+args['kill'] subprocess.Popen(shlex.split(args['kill'])) if args['no_wait'] == 0: test.wait() else: return 0 output = test.communicate()[0] if self.verbose: print output if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: test.returncode = -2 print "STDOUT string '%s' was not found in cmd output: %s" % (args['stdout_match'], output) if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: test.returncode = -2 print "STDOUT string '%s' was found in cmd output: %s" % (args['stdout_negative_match'], output) return test.returncode; def count_negative_matches(self, outline): count = 0 for line in self.negative_stonith_patterns: if outline.count(line): count = 1 if self.verbose: print "This pattern should not have matched = '%s" % (line) return count def match_stonith_patterns(self): negative_matches = 0 cur = 0 total_patterns = len(self.stonith_patterns) if len(self.stonith_patterns) == 0: return for line in self.stonith_output.split("\n"): negative_matches = negative_matches + self.count_negative_matches(line) if cur == total_patterns: continue if line.count(self.stonith_patterns[cur]): cur = cur + 1 if cur != len(self.stonith_patterns) or negative_matches: for item in range(total_patterns): if self.verbose and item > (cur -1): print "Pattern Not Matched = '%s'" % self.stonith_patterns[item] self.result_txt = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." % (self.name, total_patterns - cur, total_patterns, negative_matches) self.result_exitcode = -1 def run(self): res = 0 i = 1 self.start_environment() if self.verbose: print "\n--- START TEST - %s" % self.name self.result_txt = "SUCCESS - '%s'" % (self.name) self.result_exitcode = 0 for cmd in self.cmds: res = self.run_cmd(cmd) if res != cmd['expected_exitcode']: print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode']) self.result_txt = "FAILURE - '%s' failed at step %d. Command: lrmd_test %s" % (self.name, i, cmd['args']) self.result_exitcode = -1 break else: if self.verbose: print "Step %d SUCCESS" % (i) i = i + 1 self.clean_environment() if self.result_exitcode == 0: self.match_stonith_patterns() print self.result_txt if self.verbose: print "--- END TEST - %s\n" % self.name self.executed = 1 return res class Tests: def __init__(self, verbose = 0): self.tests = [] self.verbose = verbose self.autogen_corosync_cfg = 0 if not os.path.exists("/etc/corosync/corosync.conf"): self.autogen_corosync_cfg = 1 def new_test(self, name, description, with_cpg = 0): test = Test(name, description, self.verbose, with_cpg) self.tests.append(test) return test def print_list(self): print "\n==== %d TESTS FOUND ====" % (len(self.tests)) print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION") print "%35s - %s" % ("--------------------", "--------------------") for test in self.tests: print "%35s - %s" % (test.name, test.description) print "==== END OF LIST ====\n" def run_single(self, name): for test in self.tests: if test.name == name: test.run() break; def run_tests_matching(self, pattern): for test in self.tests: if test.name.count(pattern) != 0: test.run() def run_cpg_only(self): for test in self.tests: if test.enable_corosync: test.run() def run_no_cpg(self): for test in self.tests: if not test.enable_corosync: test.run() def run_tests(self): for test in self.tests: test.run() def exit(self): for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != 0: sys.exit(-1) sys.exit(0) def print_results(self): failures = 0; success = 0; print "\n\n======= FINAL RESULTS ==========" print "\n--- FAILURE RESULTS:" for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != 0: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print " None" print "\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures) def build_api_sanity_tests(self): verbose_arg = "" if self.verbose: verbose_arg = "-V" test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.") test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-t %s" % (verbose_arg)) test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", 1) test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-m %s" % (verbose_arg)) def build_custom_timeout_tests(self): # custom timeout without topology test = self.new_test("cpg_custom_timeout_1", "Verify per device timeouts work as expected without using topology.", 1) test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"") test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4\"") test.add_cmd("stonith_admin", "-F node3 -t 2") # timeout is 2+1+4 = 7 test.add_stonith_log_pattern("remote op timeout set to 7") # custom timeout _WITH_ topology test = self.new_test("cpg_custom_timeout_2", "Verify per device timeouts work as expected _WITH_ topology.", 1) test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"") test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4000\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 3 -v false2") test.add_cmd("stonith_admin", "-F node3 -t 2") # timeout is 2+1+4000 = 4003 test.add_stonith_log_pattern("remote op timeout set to 4003") def build_standalone_tests(self): test_types = [ { "prefix" : "standalone" , "use_cpg" : 0, }, { "prefix" : "cpg" , "use_cpg" : 1, }, ] # test what happens when all devices timeout for test_type in test_types: test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"], "Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false3 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", 194) if test_type["use_cpg"] == 1: test.add_stonith_log_pattern("remote op timeout set to 6") test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -62") test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -62") test.add_stonith_log_pattern("for host 'node3' with device 'false3' returned: -62") # test what happens when multiple devices can fence a node, but the first device fails. for test_type in test_types: test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"], "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-F node3 -t 2") if test_type["use_cpg"] == 1: test.add_stonith_log_pattern("remote op timeout set to 6") # simple topology test for one device for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_simple" % test_type["prefix"], "Verify all fencing devices at a level are used.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("remote op timeout set to 2") test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") # test what happens when the first fencing level has multiple devices. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_device_fails" % test_type["prefix"], "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R false -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("remote op timeout set to 4") test.add_stonith_log_pattern("for host 'node3' with device 'false' returned: -62") test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") # test what happens when the first fencing level fails. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true2 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true4 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("remote op timeout set to 12") test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -62") test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -62") test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") # Test what happens if multiple fencing levels are defined, and then the first one is removed. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_level_removal" % test_type["prefix"], "Verify level removal works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true2 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true4 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") # Now remove level 2, verify none of the devices in level two are hit. test.add_cmd("stonith_admin", "-d node3 -i 2") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("remote op timeout set to 8") test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -62") test.add_stonith_negative_log_pattern("for host 'node3' with device 'false2' returned: -62") test.add_stonith_negative_log_pattern("for host 'node3' with device 'false2' returned: -1001") test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") # test the stonith builds the correct list of devices that can fence a node. for test_type in test_types: test = self.new_test("%s_list_devices" % test_type["prefix"], "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R true2 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true2", "true1") test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true3", "true1") # simple test of device monitor for test_type in test_types: test = self.new_test("%s_monitor" % test_type["prefix"], "Verify device is reachable", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-Q true1") test.add_cmd("stonith_admin", "-Q false1") test.add_expected_fail_cmd("stonith_admin", "-Q true2", 237) # simple register test for test_type in test_types: test = self.new_test("%s_register" % test_type["prefix"], "Verify devices can be registered and un-registered", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-Q true1") test.add_cmd("stonith_admin", "-D true1") test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) + + # simple reboot test + for test_type in test_types: + test = self.new_test("%s_reboot" % test_type["prefix"], + "Verify devices can be rebooted", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") + + test.add_cmd("stonith_admin", "-B node3 -t 2") + + test.add_cmd("stonith_admin", "-D true1") + + test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) + # test fencing history. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_fence_history" % test_type["prefix"], "Verify last fencing operation is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-F node3 -t 2 -V") test.add_cmd_check_stdout("stonith_admin", "-H node3", "was able to turn off node node3", "") def setup_environment(self): if self.autogen_corosync_cfg: corosync_conf = ("""compatibility: whitetank totem { version: 2 token: 5000 token_retransmits_before_loss_const: 10 join: 1000 consensus: 6000 vsftype: none max_messages: 20 send_join: 45 clear_node_high_bit: yes secauth: off threads: 0 interface { ringnumber: 0 bindnetaddr: 127.0.0.1 mcastaddr: 226.94.1.1 mcastport: 4000 } } logging { debug: off fileline: off to_syslog: yes to_stderr: off syslog_facility: daemon timestamp: on } amf { mode: disabled } quorum { provider: corosync_votequorum expected_votes: 2 votes: 1 wait_for_all: 0 last_man_standing: 0 auto_tie_breaker: 0 }""") os.system("cat <<-END >>/etc/corosync/corosync.conf\n%s\nEND" % (corosync_conf)) os.system("cp /usr/share/pacemaker/tests/cts/fence_false /usr/sbin/fence_false") os.system("cp /usr/share/pacemaker/tests/cts/fence_true /usr/sbin/fence_true") def cleanup_environment(self): if self.autogen_corosync_cfg: os.system("rm -f /etc/corosync/corosync.conf") class TestOptions: def __init__(self): self.options = {} self.options['list-tests'] = 0 self.options['run-all'] = 1 self.options['run-only'] = "" self.options['run-only-pattern'] = "" self.options['verbose'] = 0 self.options['invalid-arg'] = "" self.options['cpg-only'] = 0 self.options['no-cpg'] = 0 self.options['show-usage'] = 0 def build_options(self, argv): args = argv[1:] skip = 0 for i in range(0, len(args)): if skip: skip = 0 continue elif args[i] == "-h" or args[i] == "--help": self.options['show-usage'] = 1 elif args[i] == "-l" or args[i] == "--list-tests": self.options['list-tests'] = 1 elif args[i] == "-V" or args[i] == "--verbose": self.options['verbose'] = 1 elif args[i] == "-n" or args[i] == "--no-cpg": self.options['no-cpg'] = 1 elif args[i] == "-c" or args[i] == "--cpg-only": self.options['cpg-only'] = 1 elif args[i] == "-r" or args[i] == "--run-only": self.options['run-only'] = args[i+1] skip = 1 elif args[i] == "-p" or args[i] == "--run-only-pattern": self.options['run-only-pattern'] = args[i+1] skip = 1 def show_usage(self): print "usage: " + sys.argv[0] + " [options]" print "If no options are provided, all tests will run" print "Options:" print "\t [--help | -h] Show usage" print "\t [--list-tests | -l] Print out all registered tests." print "\t [--cpg-only | -c] Only run tests that require corosync." print "\t [--no-cpg | -n] Only run tests that do not require corosync" print "\t [--run-only | -r 'testname'] Run a specific test" print "\t [--verbose | -V] Verbose output" print "\t [--run-only-pattern | -p 'string'] Run only tests containing the string value" print "\n\tExample: Run only the test 'start_top'" print "\t\t python ./regression.py --run-only start_stop" print "\n\tExample: Run only the tests with the string 'systemd' present in them" print "\t\t python ./regression.py --run-only-pattern systemd" def main(argv): o = TestOptions() o.build_options(argv) tests = Tests(o.options['verbose']) tests.build_standalone_tests() tests.build_custom_timeout_tests() tests.build_api_sanity_tests() print "Starting ..." tests.setup_environment() if o.options['list-tests']: tests.print_list() elif o.options['show-usage']: o.show_usage() elif o.options['run-only-pattern'] != "": tests.run_tests_matching(o.options['run-only-pattern']) tests.print_results() elif o.options['run-only'] != "": tests.run_single(o.options['run-only']) tests.print_results() elif o.options['no-cpg']: tests.run_no_cpg() tests.print_results() elif o.options['cpg-only']: tests.run_cpg_only() tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_environment() tests.exit() if __name__=="__main__": main(sys.argv) diff --git a/fencing/remote.c b/fencing/remote.c index cc57bb7950..cc273a4c17 100644 --- a/fencing/remote.c +++ b/fencing/remote.c @@ -1,794 +1,795 @@ /* * Copyright (C) 2009 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef struct st_query_result_s { char *host; int devices; GListPtr device_list; GHashTable *custom_action_timeouts; } st_query_result_t; GHashTable *remote_op_list = NULL; void call_remote_stonith(remote_fencing_op_t *op, st_query_result_t *peer); extern xmlNode *stonith_create_op( int call_id, const char *token, const char *op, xmlNode *data, int call_options); static void free_remote_query(gpointer data) { if(data) { st_query_result_t *query = data; crm_trace("Free'ing query result from %s", query->host); free(query->host); g_hash_table_destroy(query->custom_action_timeouts); free(query); } } static void free_remote_op(gpointer data) { remote_fencing_op_t *op = data; crm_trace("Free'ing op %s for %s", op->id, op->target); crm_log_xml_debug(op->request, "Destroying"); free(op->id); free(op->action); free(op->target); free(op->client_id); free(op->client_name); free(op->originator); if(op->query_timer) { g_source_remove(op->query_timer); } if(op->op_timer) { g_source_remove(op->op_timer); } if(op->query_results) { g_list_free_full(op->query_results, free_remote_query); } if(op->request) { free_xml(op->request); op->request = NULL; } free(op); } static void remote_op_done(remote_fencing_op_t *op, xmlNode *data, int rc) { xmlNode *reply = NULL; xmlNode *local_data = NULL; xmlNode *notify_data = NULL; op->completed = time(NULL); if(op->query_timer) { g_source_remove(op->query_timer); op->query_timer = 0; } if(op->op_timer) { g_source_remove(op->op_timer); op->op_timer = 0; } if(data == NULL) { data = create_xml_node(NULL, "remote-op"); local_data = data; } else { op->delegate = crm_element_value_copy(data, F_ORIG); } crm_xml_add_int(data, "state", op->state); crm_xml_add(data, F_STONITH_TARGET, op->target); crm_xml_add(data, F_STONITH_OPERATION, op->action); if(op->request != NULL) { reply = stonith_construct_reply(op->request, NULL, data, rc); crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate); do_crm_log(rc==pcmk_ok?LOG_NOTICE:LOG_ERR, "Operation %s of %s by %s for %s[%s]: %s", op->action, op->target, op->delegate?op->delegate:"", op->originator, op->client_id, pcmk_strerror(rc)); } else { crm_err("Already sent notifications for '%s of %s by %s' (op=%s, for=%s, state=%d): %s", op->action, op->target, op->delegate, op->id, op->client_id, op->state, pcmk_strerror(rc)); return; } if(reply) { do_local_reply(reply, op->client_id, op->call_options & st_opt_sync_call, FALSE); } /* Do notification with a clean data object */ notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE); crm_xml_add_int(notify_data, "state", op->state); crm_xml_add_int(notify_data, F_STONITH_RC, rc); crm_xml_add(notify_data, F_STONITH_TARGET, op->target); crm_xml_add(notify_data, F_STONITH_OPERATION, op->action); crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate); crm_xml_add(notify_data, F_STONITH_REMOTE, op->id); crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator); crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id); crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name); do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data, NULL); free_xml(notify_data); free_xml(local_data); free_xml(reply); /* Free non-essential parts of the record * Keep the record around so we can query the history */ if(op->query_results) { g_list_free_full(op->query_results, free_remote_query); op->query_results = NULL; } if(op->request) { free_xml(op->request); op->request = NULL; } } static gboolean remote_op_timeout(gpointer userdata) { remote_fencing_op_t *op = userdata; op->query_timer = 0; if(op->state == st_done) { crm_debug("Action %s (%s) for %s already completed", op->action, op->id, op->target); return FALSE; } crm_debug("Action %s (%s) for %s timed out", op->action, op->id, op->target); remote_op_done(op, NULL, -ETIME); op->state = st_failed; return FALSE; } static gboolean remote_op_query_timeout(gpointer data) { remote_fencing_op_t *op = data; op->query_timer = 0; if(op->state == st_done) { crm_debug("Operation %s for %s already completed", op->id, op->target); } else if(op->state == st_exec) { crm_debug("Operation %s for %s already in progress", op->id, op->target); } else if(op->query_results) { crm_debug("Query %s for %s complete: %d", op->id, op->target, op->state); call_remote_stonith(op, NULL); } else { if(op->op_timer) { g_source_remove(op->op_timer); op->op_timer = 0; } remote_op_timeout(op); } return FALSE; } static int stonith_topology_next(remote_fencing_op_t *op) { stonith_topology_t *tp = NULL; if(op->target) { /* Queries don't have a target set */ tp = g_hash_table_lookup(topology, op->target); } if(tp == NULL) { return pcmk_ok; } set_bit(op->call_options, st_opt_topology); do { op->level++; } while(op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL); if(op->level < ST_LEVEL_MAX) { crm_trace("Attempting fencing level %d for %s (%d devices)", op->level, op->target, g_list_length(tp->levels[op->level])); op->devices = tp->levels[op->level]; return pcmk_ok; } crm_notice("All fencing options for %s failed", op->target); return -EINVAL; } void *create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) { remote_fencing_op_t *op = NULL; xmlNode *dev = get_xpath_object("//@"F_STONITH_TARGET, request, LOG_TRACE); if(remote_op_list == NULL) { remote_op_list = g_hash_table_new_full( crm_str_hash, g_str_equal, NULL, free_remote_op); } if(peer && dev) { const char *peer_id = crm_element_value(dev, F_STONITH_REMOTE); CRM_CHECK(peer_id != NULL, return NULL); op = g_hash_table_lookup(remote_op_list, peer_id); if(op) { crm_debug("%s already exists", peer_id); return op; } } op = calloc(1, sizeof(remote_fencing_op_t)); crm_element_value_int(request, F_STONITH_TIMEOUT, (int*)&(op->base_timeout)); if(peer && dev) { op->id = crm_element_value_copy(dev, F_STONITH_REMOTE); crm_trace("Recorded new stonith op: %s", op->id); } else { op->id = crm_generate_uuid(); crm_trace("Generated new stonith op: %s", op->id); } g_hash_table_replace(remote_op_list, op->id, op); CRM_LOG_ASSERT(g_hash_table_lookup(remote_op_list, op->id) != NULL); op->state = st_query; op->action = crm_element_value_copy(dev, F_STONITH_ACTION); op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN); if(op->originator == NULL) { /* Local request */ op->originator = strdup(stonith_our_uname); } if(client) { op->client_id = strdup(client); } op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME); op->target = crm_element_value_copy(dev, F_STONITH_TARGET); op->request = copy_xml(request); /* TODO: Figure out how to avoid this */ crm_element_value_int(request, F_STONITH_CALLOPTS, (int*)&(op->call_options)); if(op->call_options & st_opt_cs_nodeid) { int nodeid = crm_atoi(op->target, NULL); crm_node_t *node = crm_get_peer(nodeid, NULL); /* Ensure the conversion only happens once */ op->call_options &= ~st_opt_cs_nodeid; if(node && node->uname) { free(op->target); op->target = strdup(node->uname); } else { crm_warn("Could not expand nodeid '%s' into a host name (%p)", op->target, node); } } if(stonith_topology_next(op) != pcmk_ok) { op->state = st_failed; } return op; } remote_fencing_op_t *initiate_remote_stonith_op(stonith_client_t *client, xmlNode *request, gboolean manual_ack) { xmlNode *query = NULL; const char *client_id = NULL; remote_fencing_op_t *op = NULL; if(client) { client_id = client->id; } else { client_id = crm_element_value(request, F_STONITH_CLIENTID); } CRM_LOG_ASSERT(client_id != NULL); op = create_remote_stonith_op(client_id, request, FALSE); query = stonith_create_op(0, op->id, STONITH_OP_QUERY, NULL, 0); if(!manual_ack) { op->query_timer = g_timeout_add(100*op->base_timeout, remote_op_query_timeout, op); } else { crm_xml_add(query, F_STONITH_DEVICE, "manual_ack"); } crm_xml_add(query, F_STONITH_REMOTE, op->id); crm_xml_add(query, F_STONITH_TARGET, op->target); crm_xml_add(query, F_STONITH_ACTION, op->action); crm_xml_add(query, F_STONITH_ORIGIN, op->originator); crm_xml_add(query, F_STONITH_CLIENTID, op->client_id); crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name); crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout); crm_info("Initiating remote operation %s for %s: %s", op->action, op->target, op->id); CRM_CHECK(op->action, return NULL); send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE); free_xml(query); return op; } static gint sort_strings(gconstpointer a, gconstpointer b) { return strcmp(a, b); } static st_query_result_t *stonith_choose_peer(remote_fencing_op_t *op) { GListPtr iter = NULL; do { if(op->devices) { crm_trace("Checking for someone to fence %s with %s", op->target, (char*)op->devices->data); } else { crm_trace("Checking for someone to fence %s", op->target); } for(iter = op->query_results; iter != NULL; iter = iter->next) { st_query_result_t *peer = iter->data; if(is_set(op->call_options, st_opt_topology)) { /* Do they have the next device of the current fencing level? */ GListPtr match = NULL; if(op->devices) { match = g_list_find_custom(peer->device_list, op->devices->data, sort_strings); } if(match) { crm_trace("Removing %s from %s (%d remaining)", (char*)match->data, peer->host, g_list_length(peer->device_list)); peer->device_list = g_list_remove(peer->device_list, match->data); return peer; } } else if(peer && peer->devices > 0) { /* No topology: Use the current best peer */ crm_trace("Simple fencing"); return peer; } } /* Try the next fencing level if there is one */ } while(is_set(op->call_options, st_opt_topology) && stonith_topology_next(op) == pcmk_ok); if(op->devices) { crm_trace("Couldn't find anyone to fence %s with %s", op->target, (char*)op->devices->data); } else { crm_trace("Couldn't find anyone to fence %s", op->target); } return NULL; } static int get_device_timeout(st_query_result_t *peer, const char *device, int default_timeout) { gpointer res; if (!peer || !device) { return default_timeout; } res = g_hash_table_lookup(peer->custom_action_timeouts, device); return res ? GPOINTER_TO_INT(res) : default_timeout; } static int get_op_total_timeout(remote_fencing_op_t *op, st_query_result_t *chosen_peer, int default_timeout) { stonith_topology_t *tp = g_hash_table_lookup(topology, op->target); int total_timeout = 0; if (is_set(op->call_options, st_opt_topology) && tp) { int i; GListPtr device_list = NULL; GListPtr iter = NULL; /* Yep, this looks scary, nested loops all over the place. * Here is what is going on. * Loop1: Iterate through fencing levels. * Loop2: If a fencing level has devices, loop through each device * Loop3: For each device in a fencing level, see what peer owns it * and what that peer has reported the timeout is for the device. */ for (i = 0; i < ST_LEVEL_MAX; i++) { if (!tp->levels[i]) { continue; } for (device_list = tp->levels[i]; device_list; device_list = device_list->next) { for(iter = op->query_results; iter != NULL; iter = iter->next) { st_query_result_t *peer = iter->data; if (g_list_find_custom(peer->device_list, device_list->data, sort_strings)) { total_timeout += get_device_timeout(chosen_peer, device_list->data, default_timeout); break; } } /* End Loop3: match device with peer that owns device, find device's timeout period */ } /* End Loop2: iterate through devices at a specific level */ } /*End Loop1: iterate through fencing levels */ } else if (chosen_peer) { GListPtr cur = NULL; for (cur = chosen_peer->device_list; cur; cur = cur->next) { total_timeout += get_device_timeout(chosen_peer, cur->data, default_timeout); } } else { total_timeout = default_timeout; } return total_timeout ? total_timeout : default_timeout; } static void report_timeout_period(remote_fencing_op_t *op, int op_timeout) { xmlNode *update = NULL; const char *client_node = NULL; const char *client_id = NULL; const char *call_id = NULL; if (op->call_options & st_opt_sync_call) { /* There is no reason to report the timeout for a syncronous call. It * is impossible to use the reported timeout to do anything when the client * is blocking for the response. This update is only important for * async calls that require a callback to report the results in. */ return; } else if (!op->request) { return; } client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE); call_id = crm_element_value(op->request, F_STONITH_CALLID); client_id = crm_element_value(op->request, F_STONITH_CLIENTID); if (!client_node || !call_id || !client_id) { return; } if (safe_str_eq(client_node, stonith_our_uname)) { /* The client is connected to this node, send the update direclty to them */ do_stonith_async_timeout_update(client_id, call_id, op_timeout); return; } /* The client is connected to another node, relay this update to them */ update = stonith_create_op(0, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0); crm_xml_add(update, F_STONITH_REMOTE, op->id); crm_xml_add(update, F_STONITH_CLIENTID, client_id); crm_xml_add(update, F_STONITH_CALLID, call_id); crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout); send_cluster_message(client_node, crm_msg_stonith_ng, update, FALSE); free_xml(update); } void call_remote_stonith(remote_fencing_op_t *op, st_query_result_t *peer) { const char *device = NULL; int timeout = op->base_timeout; if(peer == NULL && !is_set(op->call_options, st_opt_topology)) { peer = stonith_choose_peer(op); } if(!op->op_timer) { int op_timeout = get_op_total_timeout(op, peer, op->base_timeout); op->op_timer = g_timeout_add((1200 * op_timeout), remote_op_timeout, op); report_timeout_period(op, op_timeout); crm_info("Total remote op timeout set to %d for fencing of node %s", op_timeout, op->target); } if(is_set(op->call_options, st_opt_topology)) { /* Ignore any preference, they might not have the device we need */ /* When using topology, the stonith_choose_peer function pops off * the peer from the op's query results. Make sure to calculate * the op_timeout before calling this function when topology is in use */ peer = stonith_choose_peer(op); device = op->devices->data; timeout = get_device_timeout(peer, device, timeout); } if(peer) { xmlNode *query = stonith_create_op(0, op->id, STONITH_OP_FENCE, NULL, 0); crm_xml_add(query, F_STONITH_REMOTE, op->id); crm_xml_add(query, F_STONITH_TARGET, op->target); crm_xml_add(query, F_STONITH_ACTION, op->action); crm_xml_add(query, F_STONITH_ORIGIN, op->originator); crm_xml_add(query, F_STONITH_CLIENTID, op->client_id); crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name); crm_xml_add_int(query, F_STONITH_TIMEOUT, timeout); if(device) { crm_info("Requesting that %s perform op %s %s with %s", peer->host, op->action, op->target, device); crm_xml_add(query, F_STONITH_DEVICE, device); crm_xml_add(query, F_STONITH_MODE, "slave"); } else { crm_info("Requesting that %s perform op %s %s", peer->host, op->action, op->target); crm_xml_add(query, F_STONITH_MODE, "smart"); } op->state = st_exec; send_cluster_message(peer->host, crm_msg_stonith_ng, query, FALSE); free_xml(query); return; } else if(op->query_timer == 0) { /* We've exhausted all available peers */ crm_info("No remaining peers capable of terminating %s", op->target); remote_op_timeout(op); } else if(device) { crm_info("Waiting for additional peers capable of terminating %s with %s", op->target, device); } else { crm_info("Waiting for additional peers capable of terminating %s", op->target); } free_remote_query(peer); } static gint sort_peers(gconstpointer a, gconstpointer b) { const st_query_result_t *peer_a = a; const st_query_result_t *peer_b = a; if(peer_a->devices > peer_b->devices) { return -1; } else if(peer_a->devices > peer_b->devices) { return 1; } return 0; } int process_remote_stonith_query(xmlNode *msg) { int devices = 0; const char *id = NULL; const char *host = NULL; remote_fencing_op_t *op = NULL; st_query_result_t *result = NULL; xmlNode *dev = get_xpath_object("//@"F_STONITH_REMOTE, msg, LOG_ERR); xmlNode *child = NULL; CRM_CHECK(dev != NULL, return -EPROTO); id = crm_element_value(dev, F_STONITH_REMOTE); CRM_CHECK(id != NULL, return -EPROTO); dev = get_xpath_object("//@st-available-devices", msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); crm_element_value_int(dev, "st-available-devices", &devices); op = g_hash_table_lookup(remote_op_list, id); if(op == NULL) { crm_debug("Unknown or expired remote op: %s", id); return -EOPNOTSUPP; } op->replies++; host = crm_element_value(msg, F_ORIG); if(devices <= 0) { /* If we're doing 'known' then we might need to fire anyway */ crm_trace("Query result from %s (%d devices)", host, devices); return pcmk_ok; } else if(op->call_options & st_opt_allow_suicide) { crm_trace("Allowing %s to potentialy fence itself", op->target); } else if(safe_str_eq(host, op->target)) { crm_info("Ignoring reply from %s, hosts are not permitted to commit suicide", op->target); return pcmk_ok; } crm_debug("Query result from %s (%d devices)", host, devices); result = calloc(1, sizeof(st_query_result_t)); result->host = strdup(host); result->devices = devices; result->custom_action_timeouts = g_hash_table_new_full( crm_str_hash, g_str_equal, free, NULL); for (child = __xml_first_child(dev); child != NULL; child = __xml_next(child)) { const char *device = ID(child); int action_timeout = 0; if(device) { result->device_list = g_list_prepend(result->device_list, strdup(device)); crm_element_value_int(child, F_STONITH_ACTION_TIMEOUT, &action_timeout); if (action_timeout) { crm_trace("Peer %s with device %s returned action timeout %d", result->host, device, action_timeout); g_hash_table_insert(result->custom_action_timeouts, strdup(device), GINT_TO_POINTER(action_timeout)); } } } CRM_CHECK(devices == g_list_length(result->device_list), crm_err("Mis-match: Query claimed to have %d devices but %d found", devices, g_list_length(result->device_list))); op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers); if(op->state == st_query && is_set(op->call_options, st_opt_all_replies) == FALSE) { call_remote_stonith(op, result); } else if(op->state == st_done) { crm_info("Discarding query result from %s (%d devices): Operation is in state %d", result->host, result->devices, op->state); } return pcmk_ok; } int process_remote_stonith_exec(xmlNode *msg) { int rc = 0; const char *id = NULL; remote_fencing_op_t *op = NULL; xmlNode *dev = get_xpath_object("//@"F_STONITH_REMOTE, msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); id = crm_element_value(dev, F_STONITH_REMOTE); CRM_CHECK(id != NULL, return -EPROTO); dev = get_xpath_object("//@"F_STONITH_RC, msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); crm_element_value_int(dev, F_STONITH_RC, &rc); if(remote_op_list) { op = g_hash_table_lookup(remote_op_list, id); } if(op == NULL && rc == pcmk_ok) { /* Record successful fencing operations */ const char *client_id = crm_element_value(msg, F_STONITH_CLIENTID); op = create_remote_stonith_op(client_id, msg, TRUE); } if(op == NULL) { /* Could be for an event that began before we started */ /* TODO: Record the op for later querying */ crm_info("Unknown or expired remote op: %s", id); return -EOPNOTSUPP; } if(is_set(op->call_options, st_opt_topology)) { const char *device = crm_element_value(msg, F_STONITH_DEVICE); crm_notice("Call to %s for %s on behalf of %s: %s (%d)", device, op->target, op->originator, rc == pcmk_ok?"passed":"failed", rc); if(safe_str_eq(op->originator, stonith_our_uname)) { if(op->state == st_done) { remote_op_done(op, msg, rc); return rc; } else if(rc == pcmk_ok && op->devices) { /* Success, are there any more? */ op->devices = op->devices->next; } if(op->devices == NULL) { crm_trace("Broadcasting completion of complex fencing op for %s", op->target); send_cluster_message(NULL, crm_msg_stonith_ng, msg, FALSE); op->state = st_done; return rc; } } else { op->state = st_done; remote_op_done(op, msg, rc); + return rc; } } else if(rc == pcmk_ok && op->devices == NULL) { crm_trace("All done for %s", op->target); op->state = st_done; remote_op_done(op, msg, rc); return rc; } /* Retry on failure or execute the rest of the topology */ crm_trace("Next for %s (rc was %d)", op->target, rc); call_remote_stonith(op, NULL); return rc; } int stonith_fence_history(xmlNode *msg, xmlNode **output) { int rc = 0; const char *target = NULL; xmlNode *dev = get_xpath_object("//@"F_STONITH_TARGET, msg, LOG_TRACE); if(dev) { int options = 0; target = crm_element_value(dev, F_STONITH_TARGET); crm_element_value_int(msg, F_STONITH_CALLOPTS, &options); if(target && (options & st_opt_cs_nodeid)) { int nodeid = crm_atoi(target, NULL); crm_node_t *node = crm_get_peer(nodeid, NULL); if(node) { target = node->uname; } } } *output = create_xml_node(NULL, F_STONITH_HISTORY_LIST); if (remote_op_list) { GHashTableIter iter; remote_fencing_op_t *op = NULL; g_hash_table_iter_init(&iter, remote_op_list); while(g_hash_table_iter_next(&iter, NULL, (void**)&op)) { xmlNode *entry = NULL; if (target && strcmp(op->target, target) != 0) { continue; } rc = 0; entry = create_xml_node(*output, STONITH_OP_EXEC); crm_xml_add(entry, F_STONITH_TARGET, op->target); crm_xml_add(entry, F_STONITH_ACTION, op->action); crm_xml_add(entry, F_STONITH_ORIGIN, op->originator); crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate); crm_xml_add_int(entry, F_STONITH_DATE, op->completed); crm_xml_add_int(entry, F_STONITH_STATE, op->state); } } return rc; } diff --git a/fencing/test.c b/fencing/test.c index 0c9022653a..b33bf14078 100644 --- a/fencing/test.c +++ b/fencing/test.c @@ -1,641 +1,643 @@ /* * Copyright (C) 2009 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include GMainLoop *mainloop = NULL; crm_trigger_t *trig = NULL; int mainloop_iter = 0; int callback_rc = 0; typedef void (*mainloop_test_iteration_cb)(int check_event); #define MAINLOOP_DEFAULT_TIMEOUT 2 #define mainloop_test_done(pass) \ if (pass) { \ crm_info("SUCCESS - %s", __PRETTY_FUNCTION__); \ mainloop_iter++; \ mainloop_set_trigger(trig); \ } else { \ crm_info("FAILURE = %s async_callback %d", __PRETTY_FUNCTION__, callback_rc); \ exit(-1); \ } \ callback_rc = 0; \ /* *INDENT-OFF* */ enum test_modes { /* class dev test using a very specific environment */ test_standard = 0, /* watch notifications only */ test_passive, /* sanity test stonith client api using fence_true and fence_false */ test_api_sanity, /* sanity test mainloop code with async respones. */ test_api_mainloop, }; static struct crm_option long_options[] = { {"verbose", 0, 0, 'V'}, {"version", 0, 0, '$'}, {"help", 0, 0, '?'}, {"passive", 0, 0, 'p'}, {"api_test", 0, 0, 't'}, {"mainloop_api_test", 0, 0, 'm'}, {0, 0, 0, 0} }; /* *INDENT-ON* */ stonith_t *st = NULL; struct pollfd pollfd; int st_opts = st_opt_sync_call; int expected_notifications = 0; int verbose = 0; static void dispatch_helper(int timeout) { int rc; crm_debug("Looking for notification"); pollfd.events = POLLIN; while(true) { rc = poll( &pollfd, 1, timeout); /* wait 10 minutes, -1 forever */ if (rc > 0 ) { - stonith_dispatch( st ); + if (!stonith_dispatch(st)) { + break; + } } else { break; } } } static void st_callback(stonith_t *st, stonith_event_t *e) { if(st->state == stonith_disconnected) { exit(1); } crm_notice("Operation %s requested by %s %s for peer %s. %s reported: %s (ref=%s)", e->operation, e->origin, e->result == pcmk_ok?"completed":"failed", e->target, e->executioner ? e->executioner : "", pcmk_strerror(e->result), e->id); if (expected_notifications) { expected_notifications--; } } static void st_global_callback(stonith_t * stonith, stonith_callback_data_t *data) { crm_notice("Call id %d completed with rc %d", data->call_id, data->rc); } static void passive_test(void) { int rc = 0; rc = st->cmds->connect(st, crm_system_name, &pollfd.fd); crm_debug("Connect: %d", rc); st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback", st_global_callback); dispatch_helper(600 * 1000); } #define single_test(cmd, str, num_notifications, expected_rc) \ { \ int rc = 0; \ rc = cmd; \ expected_notifications = 0; \ if (num_notifications) { \ expected_notifications = num_notifications; \ dispatch_helper(500); \ } \ if (rc != expected_rc) { \ crm_info("FAILURE - expected rc %d != %d(%s) for cmd - %s\n", expected_rc, rc, pcmk_strerror(rc), str); \ exit(-1); \ } else if (expected_notifications) { \ crm_info("FAILURE - expected %d notifications, got only %d for cmd - %s\n", \ num_notifications, num_notifications - expected_notifications, str); \ exit(-1); \ } else { \ if (verbose) { \ crm_info("SUCCESS - %s: %d", str, rc); \ } else { \ crm_debug("SUCCESS - %s: %d", str, rc); \ } \ } \ }\ static void run_fence_failure_test(void) { stonith_key_value_t *params = NULL; params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); single_test(st->cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_false", params), "Register device1 for failure test", 1, 0); single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3), "Fence failure results off", 1, -62); single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3), "Fence failure results reboot", 1, -62); single_test(st->cmds->remove_device(st, st_opts, "test-id1"), "Remove device1 for failure test", 1, 0); stonith_key_value_freeall(params, 1, 1); } static void run_fence_failure_rollover_test(void) { stonith_key_value_t *params = NULL; params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); single_test(st->cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_false", params), "Register device1 for rollover test", 1, 0); single_test(st->cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_true", params), "Register device2 for rollover test", 1, 0); single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3), "Fence rollover results off", 1, 0); single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3), "Fence rollover results on", 1, 0); single_test(st->cmds->remove_device(st, st_opts, "test-id1"), "Remove device1 for rollover tests", 1, 0); single_test(st->cmds->remove_device(st, st_opts, "test-id2"), "Remove device2 for rollover tests", 1, 0); stonith_key_value_freeall(params, 1, 1); } static void run_standard_test(void) { stonith_key_value_t *params = NULL; params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); single_test(st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_true", params), "Register", 1, 0); single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0); single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0); single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node2", 1), "Status false_1_node2", 1, 0); single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1), "Status false_1_node1", 1, 0); single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1), "Fence unknown-host (expected failure)", 0, -113); single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1), "Fence false_1_node1", 1, 0); single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1), "Unfence false_1_node1", 1, 0); single_test(st->cmds->remove_device(st, st_opts, "test-id"), "Remove test-id", 1, 0); stonith_key_value_freeall(params, 1, 1); } static void sanity_tests(void) { int rc = 0; rc = st->cmds->connect(st, crm_system_name, &pollfd.fd); crm_debug("Connect: %d", rc); st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback", st_global_callback); crm_info("Starting API Sanity Tests"); run_standard_test(); run_fence_failure_test(); run_fence_failure_rollover_test(); crm_info("Sanity Tests Passed"); } static void standard_dev_test(void) { int rc = 0; char *tmp = NULL; stonith_key_value_t *params = NULL; rc = st->cmds->connect(st, crm_system_name, &pollfd.fd); crm_debug("Connect: %d", rc); params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 true_1_node1=3,4"); rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params); crm_debug("Register: %d", rc); rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10); crm_debug("List: %d output: %s\n", rc, tmp ? tmp : ""); rc = st->cmds->monitor(st, st_opts, "test-id", 10); crm_debug("Monitor: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node2", 10); crm_debug("Status false_1_node2: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60); crm_debug("Fence unknown-host: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60); crm_debug("Fence false_1_node1: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10); crm_debug("Unfence false_1_node1: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); rc = st->cmds->fence(st, st_opts, "some-host", "off", 10); crm_debug("Fence alias: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10); crm_debug("Status alias: %d", rc); rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10); crm_debug("Unfence false_1_node1: %d", rc); rc = st->cmds->remove_device(st, st_opts, "test-id"); crm_debug("Remove test-id: %d", rc); stonith_key_value_freeall(params, 1, 1); } static void iterate_mainloop_tests(gboolean event_ready); static void mainloop_callback(stonith_t * stonith, stonith_callback_data_t *data) { callback_rc = data->rc; iterate_mainloop_tests(TRUE); } static int register_callback_helper(int callid) { return st->cmds->register_callback( st, callid, MAINLOOP_DEFAULT_TIMEOUT, st_opt_timeout_updates, NULL, "callback", mainloop_callback); } static void test_async_fence_pass(int check_event) { int rc = 0; if (check_event) { if (callback_rc != 0) { mainloop_test_done(FALSE); } else { mainloop_test_done(TRUE); } return; } rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT); if (rc < 0) { crm_err("fence failed with rc %d", rc); mainloop_test_done(FALSE); } register_callback_helper(rc); /* wait for event */ } #define CUSTOM_TIMEOUT_ADDITION 10 static void test_async_fence_custom_timeout(int check_event) { int rc = 0; static time_t begin = 0; if (check_event) { uint32_t diff = (time(NULL) - begin); if (callback_rc != -ETIME) { mainloop_test_done(FALSE); } else if (diff < CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT) { crm_err("Custom timeout test failed, callback expiration should be updated to %d, actual timeout was %d", CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT, diff); mainloop_test_done(FALSE); } else { mainloop_test_done(TRUE); } return; } begin = time(NULL); rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT); if (rc < 0) { crm_err("fence failed with rc %d", rc); mainloop_test_done(FALSE); } register_callback_helper(rc); /* wait for event */ } static void test_async_fence_timeout(int check_event) { int rc = 0; if (check_event) { if (callback_rc != -ETIME) { mainloop_test_done(FALSE); } else { mainloop_test_done(TRUE); } return; } rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT); if (rc < 0) { crm_err("fence failed with rc %d", rc); mainloop_test_done(FALSE); } register_callback_helper(rc); /* wait for event */ } static void test_async_monitor(int check_event) { int rc = 0; if (check_event) { if (callback_rc) { mainloop_test_done(FALSE); } else { mainloop_test_done(TRUE); } return; } rc = st->cmds->monitor(st, 0, "false_1", MAINLOOP_DEFAULT_TIMEOUT); if (rc < 0) { crm_err("monitor failed with rc %d", rc); mainloop_test_done(FALSE); } register_callback_helper(rc); /* wait for event */ } static void test_register_async_devices(int check_event) { char buf[16] = { 0, }; stonith_key_value_t *params = NULL; params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2"); st->cmds->register_device(st, st_opts, "false_1", "stonith-ng", "fence_false", params); stonith_key_value_freeall(params, 1, 1); params = NULL; params = stonith_key_value_add(params, "pcmk_host_map", "true_1_node1=1,2"); st->cmds->register_device(st, st_opts, "true_1", "stonith-ng", "fence_true", params); stonith_key_value_freeall(params, 1, 1); params = NULL; params = stonith_key_value_add(params, "pcmk_host_map", "custom_timeout_node1=1,2"); snprintf(buf, sizeof(buf) -1, "%d", MAINLOOP_DEFAULT_TIMEOUT + CUSTOM_TIMEOUT_ADDITION); params = stonith_key_value_add(params, "pcmk_off_timeout", buf); st->cmds->register_device(st, st_opts, "false_custom_timeout", "stonith-ng", "fence_false", params); stonith_key_value_freeall(params, 1, 1); mainloop_test_done(TRUE); } static void try_mainloop_connect(int check_event) { int tries = 10; int i = 0; int rc = 0; for (i = 0; i < tries; i++) { rc = st->cmds->connect(st, crm_system_name, NULL); if (!rc) { crm_info("stonith client connection established"); mainloop_test_done(TRUE); return; } else { crm_info("stonith client connection failed"); } sleep(1); } crm_err("API CONNECTION FAILURE\n"); mainloop_test_done(FALSE); } static void iterate_mainloop_tests(gboolean event_ready) { static mainloop_test_iteration_cb callbacks[] = { try_mainloop_connect, test_register_async_devices, test_async_monitor, test_async_fence_pass, test_async_fence_timeout, test_async_fence_custom_timeout, }; if (mainloop_iter == (sizeof(callbacks) / sizeof(mainloop_test_iteration_cb))) { /* all tests ran, everything passed */ crm_info("ALL MAINLOOP TESTS PASSED!"); exit(0); } callbacks[mainloop_iter](event_ready); } static gboolean trigger_iterate_mainloop_tests(gpointer user_data) { iterate_mainloop_tests(FALSE); return TRUE; } static void test_shutdown(int nsig) { int rc = 0; if (st) { rc = st->cmds->disconnect(st); crm_info("Disconnect: %d", rc); crm_debug("Destroy"); stonith_api_delete(st); } if (rc) { exit(-1); } } static void mainloop_tests(void) { trig = mainloop_add_trigger(G_PRIORITY_HIGH, trigger_iterate_mainloop_tests, NULL); mainloop_set_trigger(trig); mainloop_add_signal(SIGTERM, test_shutdown); crm_info("Starting"); mainloop = g_main_new(FALSE); g_main_run(mainloop); } int main(int argc, char ** argv) { int argerr = 0; int flag; int option_index = 0; enum test_modes mode = test_standard; crm_set_options(NULL, "mode [options]", long_options, "Provides a summary of cluster's current state." "\n\nOutputs varying levels of detail in a number of different formats.\n"); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) { break; } switch(flag) { case 'V': verbose = 1; break; case '$': case '?': crm_help(flag, EX_OK); break; case 'p': mode = test_passive; break; case 't': mode = test_api_sanity; break; case 'm': mode = test_api_mainloop; break; default: ++argerr; break; } } crm_log_init("stonith-test", LOG_INFO, TRUE, verbose ? TRUE : FALSE, argc, argv, FALSE); if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', EX_USAGE); } crm_debug("Create"); st = stonith_api_new(); switch (mode) { case test_standard: standard_dev_test(); break; case test_passive: passive_test(); break; case test_api_sanity: sanity_tests(); break; case test_api_mainloop: mainloop_tests(); break; } test_shutdown(0); return 0; } diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h index 842c532945..eb21e66806 100644 --- a/include/crm/fencing/internal.h +++ b/include/crm/fencing/internal.h @@ -1,135 +1,121 @@ /* * Copyright (C) 2011 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef STONITH_NG_INTERNAL__H # define STONITH_NG_INTERNAL__H # include # include -typedef struct async_command_s { - - int id; - int pid; - int fd_stdout; - int options; - int default_timeout; - int timeout; - - char *op; - char *origin; - char *client; - char *client_name; - char *remote; - - char *victim; - char *action; - char *device; - char *mode; - - GListPtr device_list; - GListPtr device_next; - - void (*done)(GPid pid, gint status, gpointer user_data); - guint timer_sigterm; - guint timer_sigkill; - /*! If the operation timed out, this is the last signal - * we sent to the process to get it to terminate */ - int last_timeout_signo; -} async_command_t; - -int run_stonith_agent(const char *agent, const char *action, const char *victim, - GHashTable * dev_hash, GHashTable * port_map, int *agent_result, - char **output, async_command_t * track); +struct stonith_action_s; +typedef struct stonith_action_s stonith_action_t; + +stonith_action_t * +stonith_action_create(const char *agent, + const char *_action, + const char *victim, + int timeout, + GHashTable * device_args, + GHashTable * port_map); + +GPid +stonith_action_execute_async(stonith_action_t *action, + void *userdata, + void (*done)(GPid pid, int rc, const char *output, gpointer user_data)); + +int +stonith_action_execute(stonith_action_t *action, + int *agent_result, + char **output); gboolean is_redhat_agent(const char *agent); xmlNode *create_level_registration_xml(const char *node, int level, stonith_key_value_t * device_list); xmlNode *create_device_registration_xml(const char *id, const char *namespace, const char *agent, stonith_key_value_t * params); #define ST_LEVEL_MAX 10 #define F_STONITH_CLIENTID "st_clientid" #define F_STONITH_CALLOPTS "st_callopt" #define F_STONITH_CALLID "st_callid" #define F_STONITH_CALLDATA "st_calldata" #define F_STONITH_OPERATION "st_op" #define F_STONITH_TARGET "st_target" #define F_STONITH_REMOTE "st_remote_op" #define F_STONITH_RC "st_rc" /*! Timeout period per a device execution */ #define F_STONITH_TIMEOUT "st_timeout" /*! Action specific timeout period returned in query of fencing devices. */ #define F_STONITH_ACTION_TIMEOUT "st_action_timeout" #define F_STONITH_CALLBACK_TOKEN "st_async_id" #define F_STONITH_CLIENTNAME "st_clientname" #define F_STONITH_CLIENTNODE "st_clientnode" #define F_STONITH_NOTIFY_TYPE "st_notify_type" #define F_STONITH_NOTIFY_ACTIVATE "st_notify_activate" #define F_STONITH_NOTIFY_DEACTIVATE "st_notify_deactivate" #define F_STONITH_DELEGATE "st_delegate" /*! The node initiating the stonith operation. If an operation * is relayed, this is the last node the operation lands on. When * in standalone mode, origin is the client's id that originated the * operation. */ #define F_STONITH_ORIGIN "st_origin" #define F_STONITH_HISTORY_LIST "st_history" #define F_STONITH_DATE "st_date" #define F_STONITH_STATE "st_state" #define F_STONITH_LEVEL "st_level" #define F_STONITH_ACTIVE "st_active" #define F_STONITH_DEVICE "st_device_id" #define F_STONITH_ACTION "st_device_action" #define F_STONITH_MODE "st_mode" #define T_STONITH_NG "stonith-ng" #define T_STONITH_REPLY "st-reply" /*! For async operations, an event from the server containing * the total amount of time the server is allowing for the operation * to take place is returned to the client. */ #define T_STONITH_TIMEOUT_VALUE "st-async-timeout-value" #define T_STONITH_NOTIFY "st_notify" #define STONITH_ATTR_ARGMAP "pcmk_arg_map" #define STONITH_ATTR_HOSTARG "pcmk_host_argument" #define STONITH_ATTR_HOSTMAP "pcmk_host_map" #define STONITH_ATTR_HOSTLIST "pcmk_host_list" #define STONITH_ATTR_HOSTCHECK "pcmk_host_check" #define STONITH_ATTR_ACTION_OP "action" #define STONITH_OP_EXEC "st_execute" #define STONITH_OP_TIMEOUT_UPDATE "st_timeout_update" #define STONITH_OP_QUERY "st_query" #define STONITH_OP_FENCE "st_fence" #define STONITH_OP_RELAY "st_relay" #define STONITH_OP_CONFIRM "st_confirm" #define STONITH_OP_DEVICE_ADD "st_device_register" #define STONITH_OP_DEVICE_DEL "st_device_remove" #define STONITH_OP_DEVICE_METADATA "st_device_metadata" #define STONITH_OP_FENCE_HISTORY "st_fence_history" #define STONITH_OP_LEVEL_ADD "st_level_add" #define STONITH_OP_LEVEL_DEL "st_level_remove" #define stonith_channel "st_command" #define stonith_channel_callback "st_callback" #endif diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c index 5d1a509c25..53df8b0768 100644 --- a/lib/fencing/st_client.c +++ b/lib/fencing/st_client.c @@ -1,2103 +1,2265 @@ /* * Copyright (c) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* Add it for compiling on OSX */ #include #include #include #include #include #ifdef HAVE_STONITH_STONITH_H # include # define LHA_STONITH_LIBRARY "libstonith.so.1" static void *lha_agents_lib = NULL; #endif #include CRM_TRACE_INIT_DATA(stonith); +struct stonith_action_s { + char *agent; + char *action; + char *victim; + char *args; + int timeout; + int async; + void *userdata; + void (*done_cb)(GPid pid, gint status, const char *output, gpointer user_data); + + /* async track data */ + int fd_stdout; + int last_timeout_signo; + guint timer_sigterm; + guint timer_sigkill; + + /* output data */ + GPid pid; + int rc; + char *output; +}; + typedef struct stonith_private_s { char *token; crm_ipc_t *ipc; mainloop_io_t *source; GHashTable *stonith_op_callback_table; GList *notify_list; void (*op_callback) (stonith_t * st, stonith_callback_data_t *data); } stonith_private_t; typedef struct stonith_notify_client_s { const char *event; const char *obj_id; /* implement one day */ const char *obj_type; /* implement one day */ void (*notify) (stonith_t * st, stonith_event_t *e); } stonith_notify_client_t; typedef struct stonith_callback_client_s { void (*callback) (stonith_t * st, stonith_callback_data_t *data); const char *id; void *user_data; gboolean only_success; gboolean allow_timeout_updates; struct timer_rec_s *timer; } stonith_callback_client_t; struct notify_blob_s { stonith_t *stonith; xmlNode *xml; }; struct timer_rec_s { int call_id; int timeout; guint ref; stonith_t *stonith; }; typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); static const char META_TEMPLATE[] = "\n" "\n" "\n" " 1.0\n" " \n" "%s\n" " \n" " %s\n" "%s\n" " \n" " \n" " \n" " \n" " \n" " \n" " \n" " \n" " 2.0\n" " \n" "\n"; bool stonith_dispatch(stonith_t * st); int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata); void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc); xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout); static void stonith_connection_destroy(gpointer user_data); static void stonith_send_notification(gpointer data, gpointer user_data); static void stonith_connection_destroy(gpointer user_data) { stonith_t *stonith = user_data; stonith_private_t *native = NULL; struct notify_blob_s blob; crm_trace("Sending destroyed notification"); blob.stonith = stonith; blob.xml = create_xml_node(NULL, "notify"); native = stonith->private; native->ipc = NULL; native->source = NULL; stonith->state = stonith_disconnected; crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT); g_list_foreach(native->notify_list, stonith_send_notification, &blob); free_xml(blob.xml); } xmlNode * create_device_registration_xml(const char *id, const char *namespace, const char *agent, stonith_key_value_t * params) { xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE); xmlNode *args = create_xml_node(data, XML_TAG_ATTRS); crm_xml_add(data, XML_ATTR_ID, id); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, "agent", agent); crm_xml_add(data, "namespace", namespace); for (; params; params = params->next) { hash2field((gpointer) params->key, (gpointer) params->value, args); } return data; } static int stonith_api_register_device(stonith_t * st, int call_options, const char *id, const char *namespace, const char *agent, stonith_key_value_t * params) { int rc = 0; xmlNode *data = NULL; #if HAVE_STONITH_STONITH_H namespace = get_stonith_provider(agent, namespace); if (safe_str_eq(namespace, "heartbeat")) { stonith_key_value_add(params, "plugin", agent); agent = "fence_legacy"; } #endif data = create_device_registration_xml(id, namespace, agent, params); rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_device(stonith_t * st, int call_options, const char *name) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, XML_ATTR_ID, name); rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_level(stonith_t * st, int options, const char *node, int level) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_LEVEL); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add_int(data, XML_ATTR_ID, level); rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0); free_xml(data); return rc; } xmlNode * create_level_registration_xml(const char *node, int level, stonith_key_value_t * device_list) { xmlNode *data = create_xml_node(NULL, F_STONITH_LEVEL); crm_xml_add_int(data, XML_ATTR_ID, level); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add(data, "origin", __FUNCTION__); for (; device_list; device_list = device_list->next) { xmlNode *dev = create_xml_node(data, F_STONITH_DEVICE); crm_xml_add(dev, XML_ATTR_ID, device_list->value); } return data; } static int stonith_api_register_level(stonith_t * st, int options, const char *node, int level, stonith_key_value_t * device_list) { int rc = 0; xmlNode *data = create_level_registration_xml(node, level, device_list); rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0); free_xml(data); return rc; } static void append_arg(gpointer key, gpointer value, gpointer user_data) { int len = 3; /* =, \n, \0 */ int last = 0; char **args = user_data; CRM_CHECK(key != NULL, return); CRM_CHECK(value != NULL, return); if (strstr(key, "pcmk_")) { return; } else if (strstr(key, CRM_META)) { return; } else if (safe_str_eq(key, "crm_feature_set")) { return; } len += strlen(key); len += strlen(value); if (*args != NULL) { last = strlen(*args); } *args = realloc(*args, last + len); crm_trace("Appending: %s=%s", (char *)key, (char *)value); sprintf((*args) + last, "%s=%s\n", (char *)key, (char *)value); } static void append_const_arg(const char *key, const char *value, char **arg_list) { char *glib_sucks_key = strdup(key); char *glib_sucks_value = strdup(value); append_arg(glib_sucks_key, glib_sucks_value, arg_list); free(glib_sucks_value); free(glib_sucks_key); } static void append_host_specific_args(const char *victim, const char *map, GHashTable * params, char **arg_list) { char *name = NULL; int last = 0, lpc = 0, max = 0; if (map == NULL) { /* The best default there is for now... */ crm_debug("Using default arg map: port=uname"); append_const_arg("port", victim, arg_list); return; } max = strlen(map); crm_debug("Processing arg map: %s", map); for (; lpc < max + 1; lpc++) { if (isalpha(map[lpc])) { /* keep going */ } else if (map[lpc] == '=' || map[lpc] == ':') { free(name); name = calloc(1, 1 + lpc - last); memcpy(name, map + last, lpc - last); crm_debug("Got name: %s", name); last = lpc + 1; } else if (map[lpc] == 0 || map[lpc] == ',' || isspace(map[lpc])) { char *param = NULL; const char *value = NULL; param = calloc(1, 1 + lpc - last); memcpy(param, map + last, lpc - last); last = lpc + 1; crm_debug("Got key: %s", param); if (name == NULL) { crm_err("Misparsed '%s', found '%s' without a name", map, param); free(param); continue; } if (safe_str_eq(param, "uname")) { value = victim; } else { char *key = crm_meta_name(param); value = g_hash_table_lookup(params, key); free(key); } if (value) { crm_debug("Setting '%s'='%s' (%s) for %s", name, value, param, victim); append_const_arg(name, value, arg_list); } else { crm_err("No node attribute '%s' for '%s'", name, victim); } free(name); name = NULL; free(param); if (map[lpc] == 0) { break; } } else if (isspace(map[lpc])) { last = lpc; } } free(name); } static char * make_args(const char *action, const char *victim, GHashTable * device_args, GHashTable * port_map) { char buffer[512]; char *arg_list = NULL; const char *value = NULL; CRM_CHECK(action != NULL, return NULL); if (device_args) { g_hash_table_foreach(device_args, append_arg, &arg_list); } buffer[511] = 0; snprintf(buffer, 511, "pcmk_%s_action", action); if (device_args) { value = g_hash_table_lookup(device_args, buffer); } if (value == NULL && device_args) { /* Legacy support for early 1.1 releases - Remove for 1.4 */ snprintf(buffer, 511, "pcmk_%s_cmd", action); value = g_hash_table_lookup(device_args, buffer); } if (value == NULL && device_args && safe_str_eq(action, "off")) { /* Legacy support for late 1.1 releases - Remove for 1.4 */ value = g_hash_table_lookup(device_args, "pcmk_poweroff_action"); } if (value) { crm_info("Substituting action '%s' for requested operation '%s'", value, action); action = value; } append_const_arg(STONITH_ATTR_ACTION_OP, action, &arg_list); if (victim && device_args) { const char *alias = victim; const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG); if (port_map && g_hash_table_lookup(port_map, victim)) { alias = g_hash_table_lookup(port_map, victim); } /* Always supply the node's name too: * https://fedorahosted.org/cluster/wiki/FenceAgentAPI */ append_const_arg("nodename", victim, &arg_list); /* Check if we need to supply the victim in any other form */ if (param == NULL) { const char *map = g_hash_table_lookup(device_args, STONITH_ATTR_ARGMAP); if (map == NULL) { param = "port"; value = g_hash_table_lookup(device_args, param); } else { /* Legacy handling */ append_host_specific_args(alias, map, device_args, &arg_list); value = map; /* Nothing more to do */ } } else if (safe_str_eq(param, "none")) { value = param; /* Nothing more to do */ } else { value = g_hash_table_lookup(device_args, param); } /* Don't overwrite explictly set values for $param */ if (value == NULL || safe_str_eq(value, "dynamic")) { crm_debug("Performing %s action for node '%s' as '%s=%s'", action, victim, param, alias); append_const_arg(param, alias, &arg_list); } } crm_trace("Calculated: %s", arg_list); return arg_list; } static gboolean st_child_term(gpointer data) { int rc = 0; - async_command_t * track = data; + stonith_action_t *track = data; crm_info("Child %d timed out, sending SIGTERM", track->pid); track->timer_sigterm = 0; track->last_timeout_signo = SIGTERM; rc = kill(track->pid, SIGTERM); if(rc < 0) { crm_perror(LOG_ERR, "Couldn't send SIGTERM to %d", track->pid); } return FALSE; } static gboolean st_child_kill(gpointer data) { int rc = 0; - async_command_t * track = data; + stonith_action_t *track = data; crm_info("Child %d timed out, sending SIGKILL", track->pid); track->timer_sigkill = 0; track->last_timeout_signo = SIGKILL; rc = kill(track->pid, SIGKILL); if(rc < 0) { crm_perror(LOG_ERR, "Couldn't send SIGKILL to %d", track->pid); } return FALSE; } -/* Borrowed from libfence and extended */ -int -run_stonith_agent(const char *agent, const char *action, const char *victim, - GHashTable * device_args, GHashTable * port_map, int *agent_result, char **output, - async_command_t * track) +static void +stonith_action_destroy(stonith_action_t *action) +{ + + if (action->timer_sigterm > 0) { + g_source_remove(action->timer_sigterm); + } + if (action->timer_sigkill > 0) { + g_source_remove(action->timer_sigkill); + } + + if (action->fd_stdout) { + close(action->fd_stdout); + } + free(action->agent); + free(action->args); + free(action->output); + free(action->action); + free(action->victim); + free(action); +} + +stonith_action_t * +stonith_action_create(const char *agent, + const char *_action, + const char *victim, + int timeout, + GHashTable * device_args, + GHashTable * port_map) +{ + stonith_action_t *action; + + action = calloc(1, sizeof(stonith_action_t)); + action->args = make_args(_action, victim, device_args, port_map); + action->agent = strdup(agent); + action->action = strdup(_action); + if (victim) { + action->victim = strdup(victim); + } + action->timeout = timeout; + + return action; +} + +#define READ_MAX 500 +static char* +read_output(int fd) +{ + char buffer[READ_MAX]; + char *output = NULL; + int len = 0; + int more = 0; + + if (!fd) { + return NULL; + } + + do { + errno = 0; + memset(&buffer, 0, READ_MAX); + more = read(fd, buffer, READ_MAX-1); + + if (more > 0) { + crm_trace("Got %d more bytes: %s", more, buffer); + output = realloc(output, len + more + 1); + sprintf(output+len, "%s", buffer); + len += more; + } + + } while (more == (READ_MAX-1) || (more < 0 && errno == EINTR)); + + return output; +} + +static void +stonith_action_async_done(GPid pid, gint status, gpointer user_data) +{ + int rc = -pcmk_err_generic; + stonith_action_t *action = user_data; + + if (action->timer_sigterm > 0) { + g_source_remove(action->timer_sigterm); + } + if (action->timer_sigkill > 0) { + g_source_remove(action->timer_sigkill); + } + + if(action->last_timeout_signo) { + rc = -ETIME; + crm_notice("Child process %d performing action '%s' timed out with signal %d", + pid, action->action, action->last_timeout_signo); + } else if(WIFSIGNALED(status)) { + int signo = WTERMSIG(status); + rc = -ECONNABORTED; + crm_notice("Child process %d performing action '%s' timed out with signal %d", + pid, action->action, signo); + } else if(WIFEXITED(status)) { + rc = WEXITSTATUS(status); + crm_debug("Child process %d performing action '%s' exited with rc %d", + pid, action->action, rc); + } + + action->rc = rc; + action->output = read_output(action->fd_stdout); + + if (action->done_cb) { + action->done_cb(pid, action->rc, action->output, action->userdata); + } + + stonith_action_destroy(action); +} + +static int +internal_stonith_action_execute(stonith_action_t *action) { - char *args = make_args(action, victim, device_args, port_map); int pid, status, len, rc = -EPROTO; + int ret; + int total = 0; int p_read_fd, p_write_fd; /* parent read/write file descriptors */ int c_read_fd, c_write_fd; /* child read/write file descriptors */ int fd1[2]; int fd2[2]; c_read_fd = c_write_fd = p_read_fd = p_write_fd = -1; - if (args == NULL || agent == NULL) + if (action->args == NULL || action->agent == NULL) goto fail; - len = strlen(args); + len = strlen(action->args); if (pipe(fd1)) goto fail; p_read_fd = fd1[0]; c_write_fd = fd1[1]; if (pipe(fd2)) goto fail; c_read_fd = fd2[0]; p_write_fd = fd2[1]; crm_debug("forking"); pid = fork(); if (pid < 0) { rc = -ECHILD; goto fail; } - if (pid) { - /* parent */ - int ret; - int total = 0; + if (!pid) { + /* child */ - ret = fcntl(p_read_fd, F_SETFL, fcntl(p_read_fd, F_GETFL, 0) | O_NONBLOCK); - if(ret < 0) { - crm_perror(LOG_NOTICE, "Could not change the output of %s to be non-blocking", agent); - } + close(1); + /* coverity[leaked_handle] False positive */ + if (dup(c_write_fd) < 0) + goto fail; + close(2); + /* coverity[leaked_handle] False positive */ + if (dup(c_write_fd) < 0) + goto fail; + close(0); + /* coverity[leaked_handle] False positive */ + if (dup(c_read_fd) < 0) + goto fail; - do { - crm_debug("sending args"); - ret = write(p_write_fd, args + total, len - total); - if (ret > 0) { - total += ret; - } + /* keep c_write_fd open so parent can report all errors. */ + close(c_read_fd); + close(p_read_fd); + close(p_write_fd); - } while (errno == EINTR && total < len); + execlp(action->agent, action->agent, NULL); + exit(EXIT_FAILURE); + } - if (total != len) { - crm_perror(LOG_ERR, "Sent %d not %d bytes", total, len); - if (ret >= 0) { - rc = -EREMOTEIO; - } - goto fail; - } + /* parent */ + action->pid = pid; + ret = fcntl(p_read_fd, F_SETFL, fcntl(p_read_fd, F_GETFL, 0) | O_NONBLOCK); + if(ret < 0) { + crm_perror(LOG_NOTICE, "Could not change the output of %s to be non-blocking", action->agent); + } - close(p_write_fd); + do { + crm_debug("sending args"); + ret = write(p_write_fd, action->args + total, len - total); + if (ret > 0) { + total += ret; + } - if (track && track->done) { - track->fd_stdout = p_read_fd; - g_child_watch_add(pid, track->done, track); - crm_trace("Op: %s on %s, pid: %d, timeout: %ds", action, agent, pid, track->timeout); - track->last_timeout_signo = 0; - if (track->timeout) { - track->pid = pid; - track->timer_sigterm = g_timeout_add(1000*track->timeout, st_child_term, track); - track->timer_sigkill = g_timeout_add(1000*(track->timeout+5), st_child_kill, track); + } while (errno == EINTR && total < len); - } else { - crm_err("No timeout set for stonith operation %s with device %s", action, agent); - } + if (total != len) { + crm_perror(LOG_ERR, "Sent %d not %d bytes", total, len); + if (ret >= 0) { + rc = -EREMOTEIO; + } + goto fail; + } - close(c_write_fd); - close(c_read_fd); - free(args); - return pid; + close(p_write_fd); + /* async */ + if (action->async) { + action->fd_stdout = p_read_fd; + g_child_watch_add(pid, stonith_action_async_done, action); + crm_trace("Op: %s on %s, pid: %d, timeout: %ds", action->action, action->agent, pid, action->timeout); + action->last_timeout_signo = 0; + if (action->timeout) { + action->timer_sigterm = g_timeout_add(1000*action->timeout, st_child_term, action); + action->timer_sigkill = g_timeout_add(1000*(action->timeout+5), st_child_kill, action); } else { - pid_t p; + crm_err("No timeout set for stonith operation %s with device %s", + action->action, action->agent); + } - do { - p = waitpid(pid, &status, 0); - } while (p < 0 && errno == EINTR); + close(c_write_fd); + close(c_read_fd); + return 0; - if (p < 0) { - crm_perror(LOG_ERR, "waitpid(%d)", pid); + } else { + /* sync */ + int timeout = action->timeout + 1; + pid_t p = 0; - } else if (p != pid) { - crm_err("Waited for %d, got %d", pid, p); + while (action->timeout < 0 || timeout > 0) { + p = waitpid(pid, &status, WNOHANG); + if (p > 0) { + break; } + sleep(1); + timeout--; + }; - if (output != NULL) { - char *local_copy; - int lpc = 0, last = 0, more; - len = 0; - do { - char buf[500]; - - ret = read(p_read_fd, buf, 500); - if (ret > 0) { - buf[ret] = 0; - *output = realloc(*output, len + ret + 1); - sprintf((*output) + len, "%s", buf); - crm_trace("%d: %s", ret, (*output) + len); - len += ret; - } - - } while (ret == 500 || (ret < 0 && errno == EINTR)); - - if (*output) { - local_copy = strdup(*output); - more = strlen(local_copy); - for(lpc = 0; lpc < more; lpc++) { - if(local_copy[lpc] == '\n' || local_copy[lpc] == 0) { - local_copy[lpc] = 0; - crm_debug("%s: %s", agent, local_copy+last); - last = lpc+1; - } - } - crm_debug("%s: %s (total %d bytes)", agent, local_copy+last, more); - free(local_copy); - } - } + if (timeout == 0) { + int killrc = kill(pid, 9 /*SIGKILL*/); - rc = -ECONNABORTED; - *agent_result = -ECONNABORTED; - if (WIFEXITED(status)) { - crm_debug("result = %d", WEXITSTATUS(status)); - *agent_result = -WEXITSTATUS(status); - rc = 0; + if (killrc && errno != ESRCH) { + crm_err("kill(%d, KILL) failed: %d", pid, errno); + } + } - } else if (WIFSIGNALED(status)) { - crm_err("call %s for %s exited due to signal %d", action, agent, WTERMSIG(status)); + if (p <= 0) { + crm_perror(LOG_ERR, "waitpid(%d)", pid); - } else { - crm_err("call %s for %s exited abnormally. stopped=%d, continued=%d", - action, agent, WIFSTOPPED(status), WIFCONTINUED(status)); - } + } else if (p != pid) { + crm_err("Waited for %d, got %d", pid, p); } - } else { - /* child */ + action->output = read_output(p_read_fd); - close(1); - /* coverity[leaked_handle] False positive */ - if (dup(c_write_fd) < 0) - goto fail; - close(2); - /* coverity[leaked_handle] False positive */ - if (dup(c_write_fd) < 0) - goto fail; - close(0); - /* coverity[leaked_handle] False positive */ - if (dup(c_read_fd) < 0) - goto fail; + action->rc = rc = -ECONNABORTED; + if (timeout == 0) { + action->rc = -ETIME; + } else if (WIFEXITED(status)) { + crm_debug("result = %d", WEXITSTATUS(status)); + action->rc = -WEXITSTATUS(status); + rc = 0; - /* keep c_write_fd open so parent can report all errors. */ - close(c_read_fd); - close(p_read_fd); - close(p_write_fd); + } else if (WIFSIGNALED(status)) { + crm_err("call %s for %s exited due to signal %d", action->action, action->agent, WTERMSIG(status)); - execlp(agent, agent, NULL); - exit(EXIT_FAILURE); + } else { + crm_err("call %s for %s exited abnormally. stopped=%d, continued=%d", + action->action, action->agent, WIFSTOPPED(status), WIFCONTINUED(status)); + } } - fail: - free(args); +fail: if (p_read_fd >= 0) { close(p_read_fd); } if (p_write_fd >= 0) { close(p_write_fd); } if (c_read_fd >= 0) { close(c_read_fd); } if (c_write_fd >= 0) { close(c_write_fd); } return rc; } +GPid +stonith_action_execute_async(stonith_action_t *action, + void *userdata, + void (*done)(GPid pid, int rc, const char *output, gpointer user_data)) +{ + int rc = 0; + + if (!action) { + return -1; + } + + action->userdata = userdata; + action->done_cb = done; + action->async = 1; + + rc = internal_stonith_action_execute(action); + + return rc ? rc : action->pid; +} + +int +stonith_action_execute(stonith_action_t *action, + int *agent_result, + char **output) +{ + int rc = 0; + + if (!action) { + return -1; + } + + rc = internal_stonith_action_execute(action); + if (rc) { + /* error */ + return rc; + } + + if (agent_result) { + *agent_result = action->rc; + } + if (output) { + *output = action->output; + action->output = NULL; /* handed it off, do not free */ + } + + stonith_action_destroy(action); + return rc; +} + static int stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace, stonith_key_value_t ** devices, int timeout) { int count = 0; if (devices == NULL) { crm_err("Parameter error: stonith_api_device_list"); return -EFAULT; } /* Include Heartbeat agents */ if (namespace == NULL || safe_str_eq("heartbeat", namespace)) { #if HAVE_STONITH_STONITH_H static gboolean need_init = TRUE; char **entry = NULL; char **type_list = NULL; static char **(*type_list_fn) (void) = NULL; static void (*type_free_fn) (char **) = NULL; if(need_init) { need_init = FALSE; type_list_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_types", FALSE); type_free_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_free_hostlist", FALSE); } if(type_list_fn) { type_list = (*type_list_fn)(); } for (entry = type_list; entry != NULL && *entry; ++entry) { crm_trace("Added: %s", *entry); *devices = stonith_key_value_add(*devices, NULL, *entry); count++; } if (type_list && type_free_fn) { (*type_free_fn)(type_list); } #else if(namespace != NULL) { return -EINVAL; /* Heartbeat agents not supported */ } #endif } /* Include Red Hat agents, basically: ls -1 @sbin_dir@/fence_* */ if (namespace == NULL || safe_str_eq("redhat", namespace)) { struct dirent **namelist; int file_num = scandir(RH_STONITH_DIR, &namelist, 0, alphasort); if (file_num > 0) { struct stat prop; char buffer[FILENAME_MAX + 1]; while (file_num--) { if ('.' == namelist[file_num]->d_name[0]) { free(namelist[file_num]); continue; } else if (0 != strncmp(RH_STONITH_PREFIX, namelist[file_num]->d_name, strlen(RH_STONITH_PREFIX))) { free(namelist[file_num]); continue; } snprintf(buffer, FILENAME_MAX, "%s/%s", RH_STONITH_DIR, namelist[file_num]->d_name); if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) { *devices = stonith_key_value_add(*devices, NULL, namelist[file_num]->d_name); count++; } free(namelist[file_num]); } free(namelist); } } return count; } static int stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent, const char *namespace, char **output, int timeout) { int rc = 0; char *buffer = NULL; const char *provider = get_stonith_provider(agent, namespace); crm_trace("looking up %s/%s metadata", agent, provider); /* By having this in a library, we can access it from stonith_admin * when neither lrmd or stonith-ng are running * Important for the crm shell's validations... */ if (safe_str_eq(provider, "redhat")) { - - int exec_rc = run_stonith_agent(agent, "metadata", NULL, NULL, NULL, &rc, &buffer, NULL); + stonith_action_t *action = stonith_action_create(agent, "metadata", NULL, 5, NULL, NULL); + int exec_rc = stonith_action_execute(action, &rc, &buffer); if (exec_rc < 0 || rc != 0 || buffer == NULL) { crm_debug("Query failed: %d %d: %s", exec_rc, rc, crm_str(buffer)); free(buffer); /* Just in case */ return -EINVAL; } else { xmlNode *xml = string2xml(buffer); xmlNode *actions = NULL; xmlXPathObject *xpathObj = NULL; xpathObj = xpath_search(xml, "//actions"); if (xpathObj && xpathObj->nodesetval->nodeNr > 0) { actions = getXpathResult(xpathObj, 0); } /* Now fudge the metadata so that the start/stop actions appear */ xpathObj = xpath_search(xml, "//action[@name='stop']"); if (xpathObj == NULL || xpathObj->nodesetval->nodeNr <= 0) { xmlNode *tmp = NULL; tmp = create_xml_node(actions, "action"); crm_xml_add(tmp, "name", "stop"); crm_xml_add(tmp, "timeout", "20s"); tmp = create_xml_node(actions, "action"); crm_xml_add(tmp, "name", "start"); crm_xml_add(tmp, "timeout", "20s"); } /* Now fudge the metadata so that the port isn't required in the configuration */ xpathObj = xpath_search(xml, "//parameter[@name='port']"); if (xpathObj && xpathObj->nodesetval->nodeNr > 0) { /* We'll fill this in */ xmlNode *tmp = getXpathResult(xpathObj, 0); crm_xml_add(tmp, "required", "0"); } free(buffer); buffer = dump_xml_formatted(xml); free_xml(xml); } } else { #if !HAVE_STONITH_STONITH_H return -EINVAL; /* Heartbeat agents not supported */ #else int bufferlen = 0; static const char *no_parameter_info = ""; Stonith *stonith_obj = NULL; static gboolean need_init = TRUE; static Stonith *(*st_new_fn) (const char *) = NULL; static const char *(*st_info_fn) (Stonith *, int) = NULL; static void (*st_del_fn) (Stonith *) = NULL; if(need_init) { need_init = FALSE; st_new_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_new", FALSE); st_del_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_delete", FALSE); st_info_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_get_info", FALSE); } if (lha_agents_lib && st_new_fn && st_del_fn && st_info_fn) { char *xml_meta_longdesc = NULL; char *xml_meta_shortdesc = NULL; char *meta_param = NULL; char *meta_longdesc = NULL; char *meta_shortdesc = NULL; stonith_obj = (*st_new_fn) (agent); if(stonith_obj) { meta_longdesc = strdup((*st_info_fn)(stonith_obj, ST_DEVICEDESCR)); if (meta_longdesc == NULL) { crm_warn("no long description in %s's metadata.", agent); meta_longdesc = strdup(no_parameter_info); } meta_shortdesc = strdup((*st_info_fn)(stonith_obj, ST_DEVICEID)); if (meta_shortdesc == NULL) { crm_warn("no short description in %s's metadata.", agent); meta_shortdesc = strdup(no_parameter_info); } meta_param = strdup((*st_info_fn)(stonith_obj, ST_CONF_XML)); if (meta_param == NULL) { crm_warn("no list of parameters in %s's metadata.", agent); meta_param = strdup(no_parameter_info); } (*st_del_fn)(stonith_obj); } else { return -EINVAL; /* Heartbeat agents not supported */ } xml_meta_longdesc = (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_longdesc); xml_meta_shortdesc = (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_shortdesc); bufferlen = strlen(META_TEMPLATE) + strlen(agent) + strlen(xml_meta_longdesc) + strlen(xml_meta_shortdesc) + strlen(meta_param) + 1; buffer = calloc(1, bufferlen); snprintf(buffer, bufferlen - 1, META_TEMPLATE, agent, xml_meta_longdesc, xml_meta_shortdesc, meta_param); xmlFree(xml_meta_longdesc); xmlFree(xml_meta_shortdesc); free(meta_shortdesc); free(meta_longdesc); free(meta_param); } #endif } if (output) { *output = buffer; } else { free(buffer); } return rc; } static int stonith_api_query(stonith_t * stonith, int call_options, const char *target, stonith_key_value_t ** devices, int timeout) { int rc = 0, lpc = 0, max = 0; xmlNode *data = NULL; xmlNode *output = NULL; xmlXPathObjectPtr xpathObj = NULL; CRM_CHECK(devices != NULL, return -EINVAL); data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, target); rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout); if (rc < 0) { return rc; } xpathObj = xpath_search(output, "//@agent"); if (xpathObj) { max = xpathObj->nodesetval->nodeNr; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); crm_info("%s[%d] = %s", "//@agent", lpc, xmlGetNodePath(match)); *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID)); } } free_xml(output); free_xml(data); return max; } static int stonith_api_call(stonith_t * stonith, int call_options, const char *id, const char *action, const char *victim, int timeout, xmlNode **output) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, F_STONITH_DEVICE, id); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add(data, F_STONITH_TARGET, victim); rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout); free_xml(data); return rc; } static int stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info, int timeout) { int rc; xmlNode *output = NULL; rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output); if (output && list_info) { const char *list_str; list_str = crm_element_value(output, "st_output"); if (list_str) { *list_info = strdup(list_str); } } if (output) { free_xml(output); } return rc; } static int stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout) { return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL); } static int stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port, int timeout) { return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL); } static int stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout); rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout); free_xml(data); return rc; } static int stonith_api_confirm(stonith_t * stonith, int call_options, const char *target) { return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0); } static int stonith_api_history(stonith_t * stonith, int call_options, const char *node, stonith_history_t ** history, int timeout) { int rc = 0; xmlNode *data = NULL; xmlNode *output = NULL; stonith_history_t *last = NULL; *history = NULL; if (node) { data = create_xml_node(NULL, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); } rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output, call_options | st_opt_sync_call, timeout); free_xml(data); if (rc == 0) { xmlNode *op = NULL; xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_ERR); for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) { stonith_history_t *kvp; kvp = calloc(1, sizeof(stonith_history_t)); kvp->target = crm_element_value_copy(op, F_STONITH_TARGET); kvp->action = crm_element_value_copy(op, F_STONITH_ACTION); kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN); kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE); crm_element_value_int(op, F_STONITH_DATE, &kvp->completed); crm_element_value_int(op, F_STONITH_STATE, &kvp->state); if (last) { last->next = kvp; } else { *history = kvp; } last = kvp; } } return rc; } gboolean is_redhat_agent(const char *agent) { int rc = 0; struct stat prop; char buffer[FILENAME_MAX + 1]; snprintf(buffer, FILENAME_MAX, "%s/%s", RH_STONITH_DIR, agent); rc = stat(buffer, &prop); if (rc >= 0 && S_ISREG(prop.st_mode)) { return TRUE; } return FALSE; } const char * get_stonith_provider(const char *agent, const char *provider) { /* This function sucks */ if (is_redhat_agent(agent)) { return "redhat"; #if HAVE_STONITH_STONITH_H } else { Stonith *stonith_obj = NULL; static gboolean need_init = TRUE; static Stonith *(*st_new_fn) (const char *) = NULL; static void (*st_del_fn) (Stonith *) = NULL; if(need_init) { need_init = FALSE; st_new_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_new", FALSE); st_del_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_delete", FALSE); } if (lha_agents_lib && st_new_fn && st_del_fn) { stonith_obj = (*st_new_fn) (agent); if(stonith_obj) { (*st_del_fn)(stonith_obj); return "heartbeat"; } } #endif } crm_err("No such device: %s", agent); return NULL; } static gint stonithlib_GCompareFunc(gconstpointer a, gconstpointer b) { int rc = 0; const stonith_notify_client_t *a_client = a; const stonith_notify_client_t *b_client = b; CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0); rc = strcmp(a_client->event, b_client->event); if (rc == 0) { if (a_client->notify == NULL || b_client->notify == NULL) { return 0; } else if (a_client->notify == b_client->notify) { return 0; } else if (((long)a_client->notify) < ((long)b_client->notify)) { crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return -1; } crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return 1; } return rc; } xmlNode * stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options) { xmlNode *op_msg = create_xml_node(NULL, "stonith_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command"); crm_xml_add(op_msg, F_TYPE, T_STONITH_NG); crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_STONITH_OPERATION, op); crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options); if (data != NULL) { add_message_xml(op_msg, F_STONITH_CALLDATA, data); } return op_msg; } static void stonith_destroy_op_callback(gpointer data) { stonith_callback_client_t *blob = data; if (blob->timer && blob->timer->ref > 0) { g_source_remove(blob->timer->ref); } free(blob->timer); free(blob); } static int stonith_api_signoff(stonith_t * stonith) { stonith_private_t *native = stonith->private; crm_debug("Signing out of the STONITH Service"); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if(native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } stonith->state = stonith_disconnected; return pcmk_ok; } static int stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd) { int rc = pcmk_ok; stonith_private_t *native = stonith->private; static struct ipc_client_callbacks st_callbacks = { .dispatch = stonith_dispatch_internal, .destroy = stonith_connection_destroy }; crm_trace("Connecting command channel"); stonith->state = stonith_connected_command; if(stonith_fd) { /* No mainloop */ native->ipc = crm_ipc_new("stonith-ng", 0); if(native->ipc && crm_ipc_connect(native->ipc)) { *stonith_fd = crm_ipc_get_fd(native->ipc); } else if(native->ipc) { rc = -ENOTCONN; } } else { /* With mainloop */ native->source = mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { crm_debug("Could not connect to the Stonith API"); rc = -ENOTCONN; } if (rc == pcmk_ok) { xmlNode *reply = NULL; xmlNode *hello = create_xml_node(NULL, "stonith_command"); crm_xml_add(hello, F_TYPE, T_STONITH_NG); crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(hello, F_STONITH_CLIENTNAME, name); rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply); if(rc < 0) { crm_perror(LOG_DEBUG, "Couldn't complete registration with the fencing API: %d", rc); rc = -ECOMM; } else if(reply == NULL) { crm_err("Did not receive registration reply"); rc = -EPROTO; } else { const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION); const char *tmp_ticket = crm_element_value(reply, F_STONITH_CLIENTID); if (safe_str_neq(msg_type, CRM_OP_REGISTER)) { crm_err("Invalid registration message: %s", msg_type); crm_log_xml_err(reply, "Bad reply"); rc = -EPROTO; } else if (tmp_ticket == NULL) { crm_err("No registration token provided"); crm_log_xml_err(reply, "Bad reply"); rc = -EPROTO; } else { crm_trace("Obtained registration token: %s", tmp_ticket); native->token = strdup(tmp_ticket); rc = pcmk_ok; } } free_xml(reply); free_xml(hello); } if (rc == pcmk_ok) { #if HAVE_MSGFROMIPC_TIMEOUT stonith->call_timeout = MAX_IPC_DELAY; #endif crm_debug("Connection to STONITH successful"); return pcmk_ok; } crm_debug("Connection to STONITH failed: %s", pcmk_strerror(rc)); stonith->cmds->disconnect(stonith); return rc; } static int stonith_set_notification(stonith_t * stonith, const char *callback, int enabled) { xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__); stonith_private_t *native = stonith->private; if (stonith->state != stonith_disconnected) { int rc; crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY); if (enabled) { crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback); } else { crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback); } rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL); if(rc < 0) { crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc); rc = -ECOMM; } } free_xml(notify_msg); return pcmk_ok; } static int stonith_api_add_notification(stonith_t * stonith, const char *event, void (*callback) (stonith_t * stonith, stonith_event_t *e)) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; private = stonith->private; crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list)); new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = callback; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); if (list_item != NULL) { crm_warn("Callback already present"); free(new_client); return -ENOTUNIQ; } else { private->notify_list = g_list_append(private->notify_list, new_client); stonith_set_notification(stonith, event, 1); crm_trace("Callback added (%d)", g_list_length(private->notify_list)); } return pcmk_ok; } static int stonith_api_del_notification(stonith_t * stonith, const char *event) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; crm_debug("Removing callback for %s events", event); private = stonith->private; new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = NULL; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); stonith_set_notification(stonith, event, 0); if (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; private->notify_list = g_list_remove(private->notify_list, list_client); free(list_client); crm_trace("Removed callback"); } else { crm_trace("Callback not present"); } free(new_client); return pcmk_ok; } static gboolean stonith_async_timeout_handler(gpointer data) { struct timer_rec_s *timer = data; crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout); stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME); /* Always return TRUE, never remove the handler * We do that in stonith_del_callback() */ return TRUE; } static void set_callback_timeout(stonith_callback_client_t *callback, stonith_t *stonith, int call_id, int timeout) { struct timer_rec_s *async_timer = callback->timer; if (timeout <= 0) { return; } if (!async_timer) { async_timer = calloc(1, sizeof(struct timer_rec_s)); callback->timer = async_timer; } async_timer->stonith = stonith; async_timer->call_id = call_id; /* Allow a fair bit of grace to allow the server to tell us of a timeout * This is only a fallback */ async_timer->timeout = (timeout + 60) * 1000; if (async_timer->ref) { g_source_remove(async_timer->ref); } async_timer->ref = g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer); } static void update_callback_timeout(int call_id, int timeout, stonith_t *st) { stonith_callback_client_t *callback = NULL; stonith_private_t *private = st->private; callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (!callback || !callback->allow_timeout_updates) { return; } set_callback_timeout(callback, st, call_id, timeout); } static void invoke_callback(stonith_t *st, int call_id, int rc, void *userdata, void (*callback) (stonith_t * st, stonith_callback_data_t *data)) { stonith_callback_data_t data = { 0, }; data.call_id = call_id; data.rc = rc; data.userdata = userdata; callback(st, &data); } static int stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options, void *user_data, const char *callback_name, void (*callback) (stonith_t * st, stonith_callback_data_t *data)) { stonith_callback_client_t *blob = NULL; stonith_private_t *private = NULL; CRM_CHECK(stonith != NULL, return -EINVAL); CRM_CHECK(stonith->private != NULL, return -EINVAL); private = stonith->private; if (call_id == 0) { private->op_callback = callback; } else if (call_id < 0) { if (!(options & st_opt_report_only_success)) { crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id)); invoke_callback(stonith, call_id, call_id, user_data, callback); } else { crm_warn("STONITH call failed: %s", pcmk_strerror(call_id)); } return FALSE; } blob = calloc(1, sizeof(stonith_callback_client_t)); blob->id = callback_name; blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE; blob->user_data = user_data; blob->callback = callback; blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE; if (timeout > 0) { set_callback_timeout(blob, stonith, call_id, timeout); } g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob); crm_trace("Added callback to %s for call %d", callback_name, call_id); return TRUE; } static int stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks) { stonith_private_t *private = stonith->private; if (all_callbacks) { private->op_callback = NULL; g_hash_table_destroy(private->stonith_op_callback_table); private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); } else if (call_id == 0) { private->op_callback = NULL; } else { g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); } return pcmk_ok; } static void stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data) { int call = GPOINTER_TO_INT(key); stonith_callback_client_t *blob = value; crm_debug("Call %d (%s): pending", call, crm_str(blob->id)); } void stonith_dump_pending_callbacks(stonith_t * stonith) { stonith_private_t *private = stonith->private; if (private->stonith_op_callback_table == NULL) { return; } return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL); } void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc) { stonith_private_t *private = NULL; stonith_callback_client_t *blob = NULL; stonith_callback_client_t local_blob; CRM_CHECK(stonith != NULL, return); CRM_CHECK(stonith->private != NULL, return); private = stonith->private; local_blob.id = NULL; local_blob.callback = NULL; local_blob.user_data = NULL; local_blob.only_success = FALSE; if (msg != NULL) { crm_element_value_int(msg, F_STONITH_RC, &rc); crm_element_value_int(msg, F_STONITH_CALLID, &call_id); } CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result")); blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (blob != NULL) { local_blob = *blob; blob = NULL; stonith_api_del_callback(stonith, call_id, FALSE); } else { crm_trace("No callback found for call %d", call_id); local_blob.callback = NULL; } if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) { crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id); invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback); } else if (private->op_callback == NULL && rc != pcmk_ok) { crm_warn("STONITH command failed: %s", pcmk_strerror(rc)); crm_log_xml_debug(msg, "Failed STONITH Update"); } if (private->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); invoke_callback(stonith, call_id, rc, NULL, private->op_callback); } crm_trace("OP callback activated."); } /* */ static stonith_event_t * xml_to_event(xmlNode *msg) { stonith_event_t *event = calloc(1, sizeof(stonith_event_t)); const char *ntype = crm_element_value(msg, F_SUBTYPE); char *data_addr = g_strdup_printf("//%s", ntype); xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG); crm_log_xml_trace(msg, "stonith_notify"); crm_element_value_int(msg, F_STONITH_RC, &(event->result)); if(safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) { event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION); if(data) { event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN); event->target = crm_element_value_copy(data, F_STONITH_TARGET); event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE); event->id = crm_element_value_copy(data, F_STONITH_REMOTE); event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME); } else { crm_err("No data for %s event", ntype); crm_log_xml_notice(msg, "BadEvent"); } } g_free(data_addr); return event; } static void event_free(stonith_event_t *event) { free(event->id); free(event->type); free(event->message); free(event->operation); free(event->origin); free(event->target); free(event->executioner); free(event->device); free(event->client_origin); + free(event); } static void stonith_send_notification(gpointer data, gpointer user_data) { struct notify_blob_s *blob = user_data; stonith_notify_client_t *entry = data; stonith_event_t *st_event = NULL; const char *event = NULL; if (blob->xml == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(blob->xml, F_SUBTYPE); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->notify == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (safe_str_neq(entry->event, event)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } st_event = xml_to_event(blob->xml); crm_trace("Invoking callback for %p/%s event...", entry, event); entry->notify(blob->stonith, st_event); crm_trace("Callback invoked..."); event_free(st_event); } int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout) { int rc = 0; int reply_id = -1; enum crm_ipc_flags ipc_flags = crm_ipc_client_none; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; stonith_private_t *native = stonith->private; if (stonith->state == stonith_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } if (call_options & st_opt_sync_call) { ipc_flags |= crm_ipc_client_response; } stonith->call_id++; /* prevent call_id from being negative (or zero) and conflicting * with the stonith_errors enum * use 2 because we use it as (stonith->call_id - 1) below */ if (stonith->call_id < 1) { stonith->call_id = 1; } CRM_CHECK(native->token != NULL,;); op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options); if (op_msg == NULL) { return -EINVAL; } crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout); crm_trace("Sending %s message to STONITH service, Timeout: %ds", op, timeout); rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000*(timeout + 60), &op_reply); free_xml(op_msg); if(rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & st_opt_sync_call)) { crm_trace("Async call %d, returning", stonith->call_id); CRM_CHECK(stonith->call_id != 0, return -EPROTO); free_xml(op_reply); return stonith->call_id; } rc = pcmk_ok; crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id); if (reply_id == stonith->call_id) { crm_trace("Syncronous reply %d received", reply_id); if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) { rc = -ENOMSG; } if ((call_options & st_opt_discard_reply) || output_data == NULL) { crm_trace("Discarding reply"); } else { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } } else if (reply_id <= 0) { crm_err("Recieved bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); free_xml(op_reply); rc = -ENOMSG; } else { crm_err("Recieved bad reply: %d (wanted %d)", reply_id, stonith->call_id); crm_log_xml_err(op_reply, "Old reply"); free_xml(op_reply); rc = -ENOMSG; } done: if (crm_ipc_connected(native->ipc) == FALSE) { crm_err("STONITH disconnected"); stonith->state = stonith_disconnected; } free_xml(op_reply); return rc; } /* Not used with mainloop */ bool stonith_dispatch(stonith_t * st) { gboolean stay_connected = TRUE; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->private; while(crm_ipc_ready(private->ipc)) { if(crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); stonith_dispatch_internal(msg, strlen(msg), st); } if(crm_ipc_connected(private->ipc) == FALSE) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; struct notify_blob_s blob; stonith_t * st = userdata; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->private; blob.stonith = st; blob.xml = string2xml(buffer); if (blob.xml == NULL) { crm_warn("Received a NULL msg from STONITH service: %s.", buffer); return 0; } /* do callbacks */ type = crm_element_value(blob.xml, F_TYPE); crm_trace("Activating %s callbacks...", type); if (safe_str_eq(type, T_STONITH_NG)) { stonith_perform_callback(st, blob.xml, 0, 0); } else if (safe_str_eq(type, T_STONITH_NOTIFY)) { g_list_foreach(private->notify_list, stonith_send_notification, &blob); } else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) { int call_id = 0; int timeout = 0; crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout); crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id); update_callback_timeout(call_id, timeout, st); } else { crm_err("Unknown message type: %s", type); crm_log_xml_warn(blob.xml, "BadReply"); } free_xml(blob.xml); return 1; } static int stonith_api_free(stonith_t * stonith) { int rc = pcmk_ok; if (stonith->state != stonith_disconnected) { rc = stonith->cmds->disconnect(stonith); } if (stonith->state == stonith_disconnected) { stonith_private_t *private = stonith->private; g_hash_table_destroy(private->stonith_op_callback_table); free(private->token); free(stonith->private); free(stonith->cmds); free(stonith); } return rc; } void stonith_api_delete(stonith_t * stonith) { stonith_private_t *private = stonith->private; GList *list = private->notify_list; while (list != NULL) { stonith_notify_client_t *client = g_list_nth_data(list, 0); list = g_list_remove(list, client); free(client); } stonith->cmds->free(stonith); stonith = NULL; } stonith_t * stonith_api_new(void) { stonith_t *new_stonith = NULL; stonith_private_t *private = NULL; new_stonith = calloc(1, sizeof(stonith_t)); private = calloc(1, sizeof(stonith_private_t)); new_stonith->private = private; private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); private->notify_list = NULL; new_stonith->call_id = 1; new_stonith->state = stonith_disconnected; new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t)); /* *INDENT-OFF* */ new_stonith->cmds->free = stonith_api_free; new_stonith->cmds->connect = stonith_api_signon; new_stonith->cmds->disconnect = stonith_api_signoff; new_stonith->cmds->list = stonith_api_list; new_stonith->cmds->monitor = stonith_api_monitor; new_stonith->cmds->status = stonith_api_status; new_stonith->cmds->fence = stonith_api_fence; new_stonith->cmds->confirm = stonith_api_confirm; new_stonith->cmds->history = stonith_api_history; new_stonith->cmds->list_agents = stonith_api_device_list; new_stonith->cmds->metadata = stonith_api_device_metadata; new_stonith->cmds->query = stonith_api_query; new_stonith->cmds->remove_device = stonith_api_remove_device; new_stonith->cmds->register_device = stonith_api_register_device; new_stonith->cmds->remove_level = stonith_api_remove_level; new_stonith->cmds->register_level = stonith_api_register_level; new_stonith->cmds->remove_callback = stonith_api_del_callback; new_stonith->cmds->register_callback = stonith_api_add_callback; new_stonith->cmds->remove_notification = stonith_api_del_notification; new_stonith->cmds->register_notification = stonith_api_add_notification; /* *INDENT-ON* */ return new_stonith; } stonith_key_value_t * stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value) { stonith_key_value_t *p, *end; p = calloc(1, sizeof(stonith_key_value_t)); if (key) { p->key = strdup(key); } if (value) { p->value = strdup(value); } end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values) { stonith_key_value_t *p; while (head) { p = head->next; if (keys) { free(head->key); } if (values) { free(head->value); } free(head); head = p; } } int stonith_api_kick(int nodeid, const char *uname, int timeout, bool off) { char *name = NULL; const char *action = "reboot"; int rc = -EPROTO; stonith_t *st = NULL; enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide; st = stonith_api_new(); if (st) { rc = st->cmds->connect(st, "stonith-api", NULL); } if (uname != NULL) { name = strdup(uname); } else if (nodeid > 0) { opts |= st_opt_cs_nodeid; name = crm_itoa(nodeid); } if (off) { action = "off"; } if (rc == pcmk_ok) { rc = st->cmds->fence(st, opts, name, action, timeout); } if (st) { st->cmds->disconnect(st); stonith_api_delete(st); } free(name); return rc; } time_t stonith_api_time(int nodeid, const char *uname, bool in_progress) { int rc = 0; char *name = NULL; time_t when = 0; time_t progress = 0; stonith_t *st = NULL; stonith_history_t *history, *hp = NULL; enum stonith_call_options opts = st_opt_sync_call; st = stonith_api_new(); if (st) { rc = st->cmds->connect(st, "stonith-api", NULL); } if (uname != NULL) { name = strdup(uname); } else if (nodeid > 0) { opts |= st_opt_cs_nodeid; name = crm_itoa(nodeid); } if (st && rc == pcmk_ok) { st->cmds->history(st, st_opt_sync_call | st_opt_cs_nodeid, name, &history, 120); for (hp = history; hp; hp = hp->next) { if (in_progress) { if (hp->state != st_done && hp->state != st_failed) { progress = time(NULL); } } else if (hp->state == st_done) { when = hp->completed; } } } if (progress) { when = progress; } if (st) { st->cmds->disconnect(st); stonith_api_delete(st); } free(name); return when; } diff --git a/pengine/graph.c b/pengine/graph.c index 4561627536..09e55dabb1 100644 --- a/pengine/graph.c +++ b/pengine/graph.c @@ -1,1004 +1,1003 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include gboolean update_action(action_t * action); gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type); static enum pe_action_flags get_action_flags(action_t * action, node_t * node) { enum pe_action_flags flags = action->flags; if (action->rsc) { flags = action->rsc->cmds->action_flags(action, NULL); if (action->rsc->variant >= pe_clone && node) { /* We only care about activity on $node */ enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node); /* Go to great lengths to ensure the correct value for pe_action_runnable... * * If we are a clone, then for _ordering_ constraints, its only relevant * if we are runnable _anywhere_. * * This only applies to _runnable_ though, and only for ordering constraints. * If this function is ever used during colocation, then we'll need additional logic * * Not very satisfying, but its logical and appears to work well. */ if (is_not_set(clone_flags, pe_action_runnable) && is_set(flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid); set_bit(clone_flags, pe_action_runnable); } flags = clone_flags; } } return flags; } static char * convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify, gboolean free_original) { int interval = 0; char *uuid = NULL; char *rid = NULL; char *raw_task = NULL; int task = no_action; pe_rsc_trace(rsc, "Processing %s", old_uuid); if (old_uuid == NULL) { return NULL; } else if (strstr(old_uuid, "notify") != NULL) { goto done; /* no conversion */ } else if (rsc->variant < pe_group) { goto done; /* no conversion */ } CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval)); if (interval > 0) { goto done; /* no conversion */ } task = text2task(raw_task); switch (task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: task--; break; case monitor_rsc: case shutdown_crm: case stonith_node: task = no_action; break; default: crm_err("Unknown action: %s", raw_task); task = no_action; break; } if (task != no_action) { if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) { uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1)); } else { uuid = generate_op_key(rid, task2text(task + 1), 0); } pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(old_uuid); } if (free_original) { free(old_uuid); } free(raw_task); free(rid); return uuid; } static action_t * rsc_expand_action(action_t * action) { action_t *result = action; if (action->rsc && action->rsc->variant >= pe_group) { /* Expand 'start' -> 'started' */ char *uuid = NULL; gboolean notify = FALSE; if (action->rsc->parent == NULL) { /* Only outter-most resources have notification actions */ notify = is_set(action->rsc->flags, pe_rsc_notify); } uuid = convert_non_atomic_uuid(action->uuid, action->rsc, notify, FALSE); if (uuid) { pe_rsc_trace(action->rsc, "Converting %s to %s %d", action->uuid, uuid, is_set(action->rsc->flags, pe_rsc_notify)); result = find_first_action(action->rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_err("Couldn't expand %s", action->uuid); result = action; } free(uuid); } } return result; } static enum pe_graph_flags graph_update_action(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; gboolean processed = FALSE; /* TODO: Do as many of these in parallel as possible */ if (type & pe_order_implies_then) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_then); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(then, pe_action_optional | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies right: %s then %s", first->uuid, then->uuid); } } if ((type & pe_order_restart) && then->rsc) { enum pe_action_flags restart = (pe_action_optional | pe_action_runnable); processed = TRUE; changed |= then->rsc->cmds->update_actions(first, then, node, flags, restart, pe_order_restart); if (changed) { pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("restart: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first) { processed = TRUE; if (first->rsc) { changed |= first->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_implies_first); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(first, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_first; } } if (changed) { pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_master) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_first_master); } if (changed) { pe_rsc_trace(then->rsc, "implies left when right rsc is Master role: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_one_or_more) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_one_or_more); } else if (is_set(flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_runnable_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_runnable_left); } else if (is_set(flags, pe_action_runnable) == FALSE) { if (update_action_flags(then, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_optional) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_optional); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_asymmetrical) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_asymmetrical); } if (changed) { pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid); } } if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", first->uuid, then->uuid); update_action_flags(then, pe_action_print_always); /* dont care about changed */ } if ((type & pe_order_implies_first_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", then->uuid, first->uuid); update_action_flags(first, pe_action_print_always); /* dont care about changed */ } if (processed == FALSE) { crm_trace("Constraint 0x%.6x not applicable", type); } return changed; } gboolean update_action(action_t * then) { GListPtr lpc = NULL; enum pe_graph_flags changed = pe_graph_none; int last_flags = then->flags; crm_trace("Processing %s (%s %s %s)", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : ""); if (is_set(then->flags, pe_action_requires_any)) { clear_bit(then->flags, pe_action_runnable); } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; action_t *first = other->action; node_t *then_node = then->node; node_t *first_node = first->node; enum pe_action_flags then_flags = 0; enum pe_action_flags first_flags = 0; if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node) { crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid); } } if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node) { crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid); } } clear_bit(changed, pe_graph_updated_first); if (first->rsc != then->rsc && first->rsc != NULL && then->rsc != NULL && first->rsc != then->rsc->parent) { first = rsc_expand_action(first); } if (first != other->action) { crm_trace("Ordering %s afer %s instead of %s", then->uuid, first->uuid, other->action->uuid); } first_flags = get_action_flags(first, then_node); then_flags = get_action_flags(then, first_node); crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) 0x%.6x", then->uuid, is_set(then_flags, pe_action_optional) ? "optional" : "required", is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then_flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : "", first->uuid, is_set(first_flags, pe_action_optional) ? "optional" : "required", is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first_flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : "", other->type); if (first == other->action) { clear_bit(first_flags, pe_action_pseudo); changed |= graph_update_action(first, then, then->node, first_flags, other->type); } else if (order_actions(first, then, other->type)) { /* Start again to get the new actions_before list */ changed |= (pe_graph_updated_then | pe_graph_disable); } if (changed & pe_graph_disable) { crm_trace("Disabled constraint %s -> %s", other->action->uuid, then->uuid); clear_bit(changed, pe_graph_disable); other->type = pe_order_none; } if (changed & pe_graph_updated_first) { GListPtr lpc2 = NULL; crm_trace("Updated %s (first %s %s %s), processing dependants ", first->uuid, is_set(first->flags, pe_action_optional) ? "optional" : "required", is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first->flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : ""); for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other = (action_wrapper_t *) lpc2->data; update_action(other->action); } update_action(first); } } if (is_set(then->flags, pe_action_requires_any)) { if (last_flags != then->flags) { changed |= pe_graph_updated_then; } else { clear_bit(changed, pe_graph_updated_then); } } if (changed & pe_graph_updated_then) { crm_trace("Updated %s (then %s %s %s), processing dependants ", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : ""); update_action(then); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; update_action(other->action); } } return FALSE; } gboolean shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set) { /* add the stop to the before lists so it counts as a pre-req * for the shutdown */ GListPtr lpc = NULL; for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) { action_t *action = (action_t *) lpc->data; if (action->rsc == NULL || action->node == NULL) { continue; } else if(action->node->details != node->details) { continue; } else if(is_set(data_set->flags, pe_flag_maintenance_mode)) { pe_rsc_trace(action->rsc, "Skipping %s: maintainence mode", action->uuid); continue; } else if(safe_str_neq(action->task, RSC_STOP)) { continue; } else if(is_not_set(action->rsc->flags, pe_rsc_managed) && is_not_set(action->rsc->flags, pe_rsc_block)) { /* * If another action depends on this one, we may still end up blocking */ pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid); continue; } pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid, node->details->uname); clear_bit(action->flags, pe_action_optional); custom_action_order(action->rsc, NULL, action, NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op, pe_order_optional|pe_order_runnable_left, data_set); } return TRUE; } gboolean stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set) { CRM_CHECK(stonith_op != NULL, return FALSE); /* * Make sure the stonith OP occurs before we start any shared resources */ if (stonith_op != NULL) { GListPtr lpc = NULL; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc_stonith_ordering(rsc, stonith_op, data_set); } } /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ return TRUE; } xmlNode * action2xml(action_t * action, gboolean as_input) { gboolean needs_node_info = TRUE; xmlNode *action_xml = NULL; xmlNode *args_xml = NULL; char *action_id_s = NULL; if (action == NULL) { return NULL; } if (safe_str_eq(action->task, CRM_OP_FENCE)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* needs_node_info = FALSE; */ } else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* } else if(safe_str_eq(action->task, RSC_PROBED)) { */ /* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */ } else if (is_set(action->flags, pe_action_pseudo)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT); needs_node_info = FALSE; } else { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); } action_id_s = crm_itoa(action->id); crm_xml_add(action_xml, XML_ATTR_ID, action_id_s); free(action_id_s); crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task); if (action->rsc != NULL && action->rsc->clone_name != NULL) { char *clone_key = NULL; const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); int interval = crm_parse_int(interval_s, "0"); if (safe_str_eq(action->task, RSC_NOTIFY)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid)); CRM_CHECK(n_task != NULL, crm_err("No notify operation value found for %s", action->uuid)); clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task); } else { clone_key = generate_op_key(action->rsc->clone_name, action->task, interval); } CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid)); crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key); crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid); } if (needs_node_info && action->node != NULL) { crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id); } if (is_set(action->flags, pe_action_failure_is_fatal) == FALSE) { add_hash_param(action->meta, XML_ATTR_TE_ALLOWFAIL, XML_BOOLEAN_TRUE); } if (as_input) { return action_xml; } if (action->rsc) { if (is_set(action->flags, pe_action_pseudo) == FALSE) { int lpc = 0; xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml)); const char *attr_list[] = { XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER, XML_ATTR_TYPE }; if(is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) { /* Do not use the 'instance free' name here as that * might interfere with the instance we plan to keep. * Ie. if there are more than two named /anonymous/ * instances on a given node, we need to make sure the * command goes to the right one. * * Keep this block, even when everyone is using * 'instance free' anonymous clone names - it means * we'll do the right thing if anyone toggles the * unique flag to 'off' */ crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } else if(is_not_set(action->rsc->flags, pe_rsc_unique)) { const char *xml_id = ID(action->rsc->xml); crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, action->rsc->clone_name); /* ID is what we'd like client to use * ID_LONG is what they might know it as instead * * ID_LONG is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the claus above instead */ crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); if(action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); } else { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } } else { CRM_ASSERT(action->rsc->clone_name == NULL); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); } for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); } } } args_xml = create_xml_node(NULL, XML_TAG_ATTRS); crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if (action->rsc != NULL) { g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (action->rsc != NULL) { resource_t *parent = action->rsc; while (parent != NULL) { parent->cmds->append_meta(parent, args_xml); parent = parent->parent; } } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml); } sorted_xml(args_xml, action_xml, FALSE); crm_log_xml_trace(action_xml, "dumped action"); free_xml(args_xml); return action_xml; } static gboolean should_dump_action(action_t * action) { CRM_CHECK(action != NULL, return FALSE); if (is_set(action->flags, pe_action_dumped)) { crm_trace( "action %d (%s) was already dumped", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) { GListPtr lpc = NULL; /* This is a horrible but convenient hack * * It mimimizes the number of actions with unsatisfied inputs * (ie. not included in the graph) * * This in turn, means we can be more concise when printing * aborted/incomplete graphs. * * It also makes it obvious which node is preventing * probe_complete from running (presumably because it is only * partially up) * * For these reasons we tolerate such perversions */ for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (is_not_set(wrapper->action->flags, pe_action_runnable)) { /* Only interested in runnable operations */ } else if (safe_str_neq(wrapper->action->task, RSC_START)) { /* Only interested in start operations */ } else if (is_set(wrapper->action->flags, pe_action_dumped)) { crm_trace( "action %d (%s) dependancy of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } else if (should_dump_action(wrapper->action)) { crm_trace( "action %d (%s) dependancy of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } } } if (is_set(action->flags, pe_action_runnable) == FALSE) { crm_trace( "action %d (%s) was not runnable", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_optional) && is_set(action->flags, pe_action_print_always) == FALSE) { crm_trace( "action %d (%s) was optional", action->id, action->uuid); return FALSE; } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) { const char *interval = NULL; interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); /* make sure probes and recurring monitors go through */ if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) { crm_trace( "action %d (%s) was for an unmanaged resource (%s)", action->id, action->uuid, action->rsc->id); return FALSE; } } if (is_set(action->flags, pe_action_pseudo) || safe_str_eq(action->task, CRM_OP_FENCE) || safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { /* skip the next checks */ return TRUE; } if (action->node == NULL) { pe_err("action %d (%s) was not allocated", action->id, action->uuid); log_action(LOG_DEBUG, "Unallocated action", action, FALSE); return FALSE; } else if (action->node->details->online == FALSE) { pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for offline node", action, FALSE); return FALSE; #if 0 /* but this would also affect resources that can be safely * migrated before a fencing op */ } else if (action->node->details->unclean == FALSE) { pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for unclean node", action, FALSE); return FALSE; #endif } return TRUE; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a; const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } return 0; } static gboolean should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) { int type = wrapper->type; type &= ~pe_order_implies_first_printed; type &= ~pe_order_implies_then_printed; type &= ~pe_order_optional; wrapper->state = pe_link_not_dumped; if (last_action == wrapper->action->id) { crm_trace( "Input (%d) %s duplicated for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); wrapper->state = pe_link_dup; return FALSE; } else if (wrapper->type == pe_order_none) { crm_trace( "Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) { crm_trace( "Input (%d) %s optional (ordering) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && (wrapper->type & pe_order_stonith_stop)) { crm_trace( "Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (wrapper->type == pe_order_load) { - crm_trace("check load filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node->details->uname, action->uuid, action->node->details->uname); + crm_trace("check load filter %s.%s -> %s.%s", + wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", + action->uuid, action->node ? action->node->details->uname : ""); if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) { - /* For migrate_to ops, we care about where it has been - * allocated to, not where the action will be executed + /* Remove the orders like : + * "load_stopped_node2" -> "rscA_migrate_to node1" + * which were created from: pengine/native.c: MigrateRsc() + * order_actions(other, then, other_w->type); */ - if(wrapper->action->node == NULL || action->rsc->allocated_to == NULL - || wrapper->action->node->details != action->rsc->allocated_to->details) { - /* Check if the actions are for the same node, ignore otherwise */ - crm_trace("load filter - migrate"); - wrapper->type = pe_order_none; - return FALSE; - } + wrapper->type = pe_order_none; + return FALSE; } else if (wrapper->action->node == NULL || action->node == NULL || wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - node"); wrapper->type = pe_order_none; return FALSE; } else if(is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("load filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->action->rsc && wrapper->action->rsc != action->rsc && is_set(wrapper->action->rsc->flags, pe_rsc_failed) && is_not_set(wrapper->action->rsc->flags, pe_rsc_managed) && strstr(wrapper->action->uuid, "_stop_0") && action->rsc && action->rsc->variant >= pe_clone) { crm_warn("Ignoring requirement that %s comeplete before %s:" " unmanaged failed resources cannot prevent clone shutdown", wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_dumped) || should_dump_action(wrapper->action)) { crm_trace( "Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #if 0 } else if (is_set(wrapper->action->flags, pe_action_runnable) && is_set(wrapper->action->flags, pe_action_pseudo) && wrapper->action->rsc->variant != pe_native) { crm_crit("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #endif } else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE && is_set(wrapper->action->flags, pe_action_print_always) == FALSE) { crm_trace( "Input (%d) %s optional for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); crm_trace( "Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type); return FALSE; } dump: crm_trace( "Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid); return TRUE; } void graph_element_from_action(action_t * action, pe_working_set_t * data_set) { GListPtr lpc = NULL; int last_action = -1; int synapse_priority = 0; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; xmlNode *input = NULL; xmlNode *xml_action = NULL; if (should_dump_action(action) == FALSE) { return; } set_bit(action->flags, pe_action_dumped); syn = create_xml_node(data_set->graph, "synapse"); set = create_xml_node(syn, "action_set"); in = create_xml_node(syn, "inputs"); crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse); data_set->num_synapse++; if (action->rsc != NULL) { synapse_priority = action->rsc->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority); } xml_action = action2xml(action, FALSE); add_node_nocopy(set, crm_element_name(xml_action), xml_action); action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (should_dump_input(last_action, action, wrapper) == FALSE) { continue; } wrapper->state = pe_link_dumped; CRM_CHECK(last_action < wrapper->action->id,; ); last_action = wrapper->action->id; input = create_xml_node(in, "trigger"); xml_action = action2xml(wrapper->action, TRUE); add_node_nocopy(input, crm_element_name(xml_action), xml_action); } } diff --git a/pengine/native.c b/pengine/native.c index 148b4dfc59..9e4029988c 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,3165 +1,3161 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define DELETE_THEN_REFRESH 1 /* The crmd will remove the resource from the CIB itself, making this redundant */ #define INFINITY_HACK (INFINITY * -100) #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh, resource_t * rsc_rh, gboolean update_rh); void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set); void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set); void pe_post_notify(resource_t * rsc, node_t * node, action_t * op, notify_data_t * n_data, pe_working_set_t * data_set); gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set); /* *INDENT-OFF* */ enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, RoleError, NullOp, PromoteRsc, }, /* Master */ { RoleError, DemoteRsc, RoleError, DemoteRsc, NullOp, }, }; /* *INDENT-ON* */ struct capacity_data { node_t *node; resource_t *rsc; gboolean is_enough; }; static gboolean is_fencing_resource(resource_t *rsc) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (safe_str_eq(class, "stonith")) { return TRUE; } return FALSE; } static void check_capacity(gpointer key, gpointer value, gpointer user_data) { int required = 0; int remaining = 0; struct capacity_data *data = user_data; required = crm_parse_int(value, "0"); remaining = crm_parse_int(g_hash_table_lookup(data->node->details->utilization, key), "0"); if (required > remaining) { pe_rsc_debug(data->rsc, "Node %s has no enough %s for resource %s: required=%d remaining=%d", data->node->details->uname, (char *)key, data->rsc->id, required, remaining); data->is_enough = FALSE; } } static gboolean have_enough_capacity(node_t * node, resource_t * rsc) { struct capacity_data data; data.node = node; data.rsc = rsc; data.is_enough = TRUE; g_hash_table_foreach(rsc->utilization, check_capacity, &data); return data.is_enough; } static gboolean native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ int alloc_details = scores_log_level + 1; GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; gboolean result = FALSE; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr gIter = NULL; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (have_enough_capacity(node, rsc) == FALSE) { pe_rsc_debug(rsc, "Resource %s cannot be allocated to node %s: none of enough capacity", rsc->id, node->details->uname); resource_location(rsc, node, -INFINITY, "__limit_utilization_", data_set); } } dump_node_scores(alloc_details, rsc, "Post-utilization", rsc->allowed_nodes); } length = g_hash_table_size(rsc->allowed_nodes); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to ? TRUE : FALSE; } if (prefer) { chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen && chosen->weight >= 0 && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Using preferred node %s for %s instead of choosing from %d candidates", chosen->details->uname, rsc->id, length); } else if (chosen && chosen->weight < 0) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else if (chosen && can_run_resources(chosen)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); } } if (chosen == NULL && rsc->allowed_nodes) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, g_list_nth_data(rsc->running_on, 0)); chosen = g_list_nth_data(nodes, 0); pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates", chosen ? chosen->details->uname : "", rsc->id, length); if (chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if (running && can_run_resources(running) == FALSE) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, running->details->uname); running = NULL; } for (lpc = 1; lpc < length && running; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if (tmp->weight == chosen->weight) { multiple++; if (tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if (multiple > 1) { int log_level = LOG_INFO; char *score = score2char(chosen->weight); if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); free(score); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } static int node_list_attr_score(GHashTable * list, const char *attr, const char *value) { GHashTableIter iter; node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { int weight = node->weight; if (can_run_resources(node) == FALSE) { weight = -INFINITY; } if (weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if (safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } } if (safe_str_neq(attr, "#" XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node ? best_node : "", best_score); } return best_score; } static void node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor, gboolean only_positive) { int score = 0; int new_score = 0; GHashTableIter iter; node_t *node = NULL; if (attr == NULL) { attr = "#" XML_ATTR_UNAME; } g_hash_table_iter_init(&iter, list1); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { CRM_CHECK(node != NULL, continue); score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); new_score = merge_weights(factor * score, node->weight); if (factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO - Decide if we want to filter only if weight == -INFINITY * */ crm_trace("%s: Filtering %d + %f*%d (factor * score)", node->details->uname, node->weight, factor, score); } else if (node->weight == INFINITY_HACK) { crm_trace("%s: Filtering %d + %f*%d (node < 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight > 0) { node->weight = INFINITY_HACK; crm_trace("%s: Filtering %d + %f*%d (score > 0)", node->details->uname, node->weight, factor, score); } else if (only_positive && new_score < 0 && node->weight == 0) { crm_trace("%s: Filtering %d + %f*%d (score == 0)", node->details->uname, node->weight, factor, score); } else { crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score); node->weight = new_score; } } } static GHashTable * node_hash_dup(GHashTable * hash) { /* Hack! */ GListPtr list = g_hash_table_get_values(hash); GHashTable *result = node_hash_from_list(list); g_list_free(list); return result; } GHashTable * native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } GHashTable * rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { GHashTable *work = NULL; int multiplier = 1; if (factor < 0) { multiplier = -1; } if (is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); if (is_set(flags, pe_weights_init)) { if (rsc->variant == pe_group && rsc->children) { GListPtr last = rsc->children; while (last->next != NULL) { last = last->next; } pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last); work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags); } else { work = node_hash_dup(rsc->allowed_nodes); } clear_bit(flags, pe_weights_init); } else { pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id); work = node_hash_dup(nodes); node_hash_update(work, rsc->allowed_nodes, attr, factor, is_set(flags, pe_weights_positive)); } if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id); g_hash_table_destroy(work); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if (can_run_any(work)) { GListPtr gIter = NULL; if (is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; } else { gIter = rsc->rsc_cons_lhs; } for (; gIter != NULL; gIter = gIter->next) { resource_t *other = NULL; rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else { other = constraint->rsc_lh; } pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id); work = rsc_merge_weights(other, rhs, work, constraint->node_attribute, multiplier * (float) constraint->score / INFINITY, flags); dump_node_scores(LOG_TRACE, NULL, rhs, work); } } if(is_set(flags, pe_weights_positive)) { node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->weight == INFINITY_HACK) { node->weight = 1; } } } if (nodes) { g_hash_table_destroy(nodes); } clear_bit(rsc->flags, pe_rsc_merging); return work; } node_t * native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr gIter = NULL; int alloc_details = scores_log_level + 1; if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-allloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; GHashTable *archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if (constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_hash_dup(rsc->allowed_nodes); } rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); if (archive && can_run_any(rsc->allowed_nodes) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive) { g_hash_table_destroy(archive); } } dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float) constraint->score / INFINITY, pe_weights_rollback); } for (gIter = rsc->rsc_tickets; gIter != NULL; gIter = gIter->next) { rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data; if (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby) { rsc_ticket_constraint(rsc, rsc_ticket, data_set); } } print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id); /* make sure it doesnt come up again */ resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; rsc->next_role = rsc->role; if (rsc->running_on == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if (is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to ? assign_to->details->uname : "'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if (is_set(data_set->flags, pe_flag_stop_everything) && is_fencing_resource(rsc) == FALSE) { pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if (is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set)) { pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if (rsc->allocated_to == NULL) { if (is_not_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); return rsc->allocated_to; } static gboolean is_op_dup(resource_t * rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { value = crm_element_value(operation, "name"); if (safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (value == NULL) { value = "0"; } if (safe_str_neq(value, interval)) { continue; } if (id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err ("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } } } return dup; } void RecurringOp(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; /* Only process for the operations without role="Stopped" */ value = crm_element_value(operation, "role"); if (value && text2role(value) == RSC_ROLE_STOPPED) { return; } pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node ? node->details->uname : "n/a"); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } if (start != NULL) { pe_rsc_trace(rsc, "Marking %s %s due to %s", key, is_set(start->flags, pe_action_optional) ? "optional" : "manditory", start->uuid); is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional); } else { pe_rsc_trace(rsc, "Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches == NULL) { is_optional = FALSE; pe_rsc_trace(rsc, "Marking %s manditory: not active", key); } else { g_list_free(possible_matches); } if ((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if (is_optional) { char *local_key = strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* its running : cancel it */ mon = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(mon->task); mon->task = strdup(RSC_CANCEL); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch (rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if (rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if (rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if (local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result, key, value ? value : role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); free(key); key = NULL; return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if (is_optional) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) { pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear); } else if (node == NULL || node->details->online == FALSE || node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear); } else if (is_set(mon->flags, pe_action_optional) == FALSE) { pe_rsc_info(rsc, " Start recurring %s (%llus) for %s on %s", mon->task, interval_ms / 1000, rsc->id, crm_str(node_uname)); } if (rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(PCMK_EXECRA_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); free(running_master); } if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); if (rsc->next_role == RSC_ROLE_MASTER) { custom_action_order(rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } else if (rsc->role == RSC_ROLE_MASTER) { custom_action_order(rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } } } void Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp(rsc, start, node, operation, data_set); } } } } void RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* TODO: Support of non-unique clone */ if (is_set(rsc->flags, pe_rsc_unique) == FALSE) { return; } /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } pe_rsc_trace(rsc, "Creating recurring actions %s for %s in role %s on nodes where it'll not be running", ID(operation), rsc->id, role2text(rsc->next_role)); if (node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval)) { return; } if (safe_str_eq(name, RSC_STOP) || safe_str_eq(name, RSC_START) || safe_str_eq(name, RSC_DEMOTE) || safe_str_eq(name, RSC_PROMOTE) ) { crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name); return; } key = generate_op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ free(key); return; } /* if the monitor exists on the node where the resource will be running, cancel it */ if (node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches) { action_t *cancel_op = NULL; char *local_key = strdup(key); g_list_free(possible_matches); cancel_op = custom_action(rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(cancel_op->task); cancel_op->task = strdup(RSC_CANCEL); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), crm_str(node_uname)); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *stop_node = (node_t *) gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; char *stop_op_key = NULL; if (node_uname && safe_str_eq(stop_node_uname, node_uname)) { continue; } pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if (possible_matches == NULL) { pe_rsc_trace(rsc, "Marking %s manditory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(PCMK_EXECRA_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); free(rc_inactive); probe_complete_ops = find_actions(data_set->actions, CRM_OP_PROBED, NULL); for (local_gIter = probe_complete_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *probe_complete = (action_t *) local_gIter->data; if (probe_complete->node == NULL) { if (is_set(probe_complete->flags, pe_action_optional) == FALSE) { probe_is_optional = FALSE; } if (is_set(probe_complete->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : probe un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(NULL, NULL, probe_complete, NULL, strdup(key), stopped_mon, pe_order_optional, data_set); } break; } } if (probe_complete_ops) { g_list_free(probe_complete_ops); } stop_op_key = stop_key(rsc); stop_ops = find_actions_exact(rsc->actions, stop_op_key, stop_node); for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { action_t *stop = (action_t *) local_gIter->data; if (is_set(stop->flags, pe_action_optional) == FALSE) { stop_is_optional = FALSE; } if (is_set(stop->flags, pe_action_runnable) == FALSE) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear); } if (is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, strdup(stop_op_key), stop, NULL, strdup(key), stopped_mon, pe_order_implies_then | pe_order_runnable_left, data_set); } } if (stop_ops) { g_list_free(stop_ops); } free(stop_op_key); if (is_optional == FALSE && probe_is_optional && stop_is_optional && is_set(rsc->flags, pe_rsc_managed) == FALSE) { pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional); } if (is_set(stopped_mon->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if (stop_node->details->online == FALSE || stop_node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear); } if (is_set(stopped_mon->flags, pe_action_runnable) && is_set(stopped_mon->flags, pe_action_optional) == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", stopped_mon->task, interval_ms / 1000, rsc->id, crm_str(stop_node_uname)); } } free(key); } void Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set) { if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } void native_create_actions(resource_t * rsc, pe_working_set_t * data_set) { action_t *start = NULL; node_t *chosen = NULL; node_t *current = NULL; gboolean need_stop = FALSE; GListPtr gIter = NULL; int num_active_nodes = 0; gboolean fence_device = is_fencing_resource(rsc); enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; chosen = rsc->allocated_to; if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } else if (rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role)); } pe_rsc_trace(rsc, "Processing state transition for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); if(rsc->running_on) { current = rsc->running_on->data; } for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *n = (node_t *) gIter->data; if(fence_device && n->details->unclean) { crm_info("Ignoring %s on %s: fencing resource on an unclean node", rsc->id, n->details->uname); continue; } num_active_nodes++; } get_rsc_attributes(rsc->parameters, rsc, chosen, data_set); for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop = stop_action(rsc, current, FALSE); set_bit(stop->flags, pe_action_dangle); pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s", rsc->id, current->details->uname); if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, FALSE, data_set); } } if (num_active_nodes > 1) { if (num_active_nodes == 2 && chosen && rsc->partial_migration_target && (chosen->details == rsc->partial_migration_target->details)) { /* Here the chosen node is still the migration target from a partial * migration. Attempt to continue the migration instead of recovering * by stopping the resource everywhere and starting it on a single node. */ pe_rsc_trace(rsc, "Will attempt to continue with a partial migration to target %s from %s", rsc->partial_migration_target->details->id, rsc->partial_migration_source->details->id); } else { const char *type = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); pe_proc_err("Resource %s (%s::%s) is active on %d nodes %s", rsc->id, class, type, num_active_nodes, recovery2text(rsc->recovery_type)); crm_warn("See %s for more information.", "http://clusterlabs.org/wiki/FAQ#Resource_is_Too_Active"); if (rsc->recovery_type == recovery_stop_start) { need_stop = TRUE; } /* If by chance a partial migration is in process, * but the migration target is not chosen still, clear all * partial migration data. */ rsc->partial_migration_source = rsc->partial_migration_target = NULL; } } if (is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, chosen, TRUE); set_bit(start->flags, pe_action_print_always); } if(current && chosen && current->details != chosen->details) { pe_rsc_trace(rsc, "Moving %s", rsc->id); need_stop = TRUE; } else if(is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Recovering %s", rsc->id); need_stop = TRUE; } else if(rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) { /* Recovery of a promoted resource */ start = start_action(rsc, chosen, TRUE); if(is_set(start->flags, pe_action_optional) == FALSE) { pe_rsc_trace(rsc, "Forced start %s", rsc->id); need_stop = TRUE; } } pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); role = rsc->role; /* Potentiall optional steps on brining the resource down and back up to the same level */ while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop?" required":""); if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) { break; } role = next_role; } while (rsc->role <= rsc->next_role && role != rsc->role) { next_role = rsc_state_matrix[role][rsc->role]; pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), rsc->id, need_stop?" required":""); if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { break; } role = next_role; } role = rsc->role; /* Required steps from this role to the next */ while (role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; pe_rsc_trace(rsc, "Role: Executing: %s->%s (%s)", role2text(role), role2text(next_role), rsc->id); if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { Recurring_Stopped(rsc, NULL, NULL, data_set); } } void native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { /* This function is on the critical path and worth optimizing as much as possible */ resource_t *top = uber_parent(rsc); int type = pe_order_optional | pe_order_implies_then | pe_order_restart; custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, type, data_set); if (top->variant == pe_master) { custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_implies_first_master, data_set); custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if (is_fencing_resource(rsc) == FALSE) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(all_stopped->task), all_stopped, pe_order_implies_then | pe_order_runnable_left, data_set); } if (g_hash_table_size(rsc->utilization) > 0 && safe_str_neq(data_set->placement_strategy, "default")) { GHashTableIter iter; node_t *next = NULL; GListPtr gIter = NULL; pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(current); update_action_flags(load_stopped, pe_action_optional | pe_action_clear); } custom_action_order(rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_load, data_set); } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&next)) { char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = node_copy(next); update_action_flags(load_stopped, pe_action_optional | pe_action_clear); } custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, start_key(rsc), NULL, pe_order_load, data_set); - custom_action_order(NULL, strdup(load_stopped_task), load_stopped, - rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, - pe_order_load, data_set); - free(load_stopped_task); } } } void native_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } enum filter_colocation_res { influence_nothing = 0, influence_rsc_location, influence_rsc_priority, }; static enum filter_colocation_res filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { if (constraint->score == 0) { return influence_nothing; } /* rh side must be allocated before we can process constraint */ if (is_set(rsc_rh->flags, pe_rsc_provisional)) { return influence_nothing; } if ((constraint->role_lh >= RSC_ROLE_SLAVE) && rsc_lh->parent && rsc_lh->parent->variant == pe_master && is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* LH and RH resources have already been allocated, place the correct * priority oh LH rsc for the given multistate resource role */ return influence_rsc_priority; } if (is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if ((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return influence_nothing; } details_rh = rsc_rh->allocated_to ? rsc_rh->allocated_to->details : NULL; details_lh = rsc_lh->allocated_to ? rsc_lh->allocated_to->details : NULL; if (constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh ? details_lh->uname : "n/a", details_rh ? details_rh->uname : "n/a"); } else if (constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh ? details_rh->uname : "n/a"); } return influence_nothing; } if (constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { crm_trace( "LH: Skipping constraint: \"%s\" state filter nextrole is %s", role2text(constraint->role_lh), role2text(rsc_lh->next_role)); return influence_nothing; } if (constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { crm_trace( "RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if (constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { crm_trace( "LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_lh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { crm_trace( "RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } return influence_rsc_location; } static void influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *rh_value = NULL; const char *lh_value = NULL; const char *attribute = "#id"; int score_multiplier = 1; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { return; } lh_value = g_hash_table_lookup(rsc_lh->allocated_to->details->attrs, attribute); rh_value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); if (!safe_str_eq(lh_value, rh_value)) { return; } if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) { return; } if (constraint->role_lh == RSC_ROLE_SLAVE) { score_multiplier = -1; } rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority); } static void colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = "#id"; GHashTable *work = NULL; gboolean do_check = FALSE; GHashTableIter iter; node_t *node = NULL; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (rsc_rh->allocated_to) { value = g_hash_table_lookup(rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if (constraint->score < 0) { /* nothing to do: * anti-colocation with something thats not running */ return; } work = node_hash_dup(rsc_lh->allowed_nodes); g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { tmp = g_hash_table_lookup(node->details->attrs, attribute); if (do_check && safe_str_eq(tmp, value)) { if (constraint->score < INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights(constraint->score, node->weight); } } else if (do_check == FALSE || constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check ? "failed" : "unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } } if (can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { char *score = score2char(constraint->score); pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); free(score); } if (work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { enum filter_colocation_res filter_results; filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint); switch (filter_results) { case influence_rsc_priority: influence_priority(rsc_lh, rsc_rh, constraint); break; case influence_rsc_location: pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0 ? "" : "Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); colocation_match(rsc_lh, rsc_rh, constraint); break; case influence_nothing: default: return; } } static gboolean filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket) { if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) { pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter", role2text(rsc_ticket->role_lh)); return FALSE; } return TRUE; } void rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set) { if (rsc_ticket == NULL) { pe_err("rsc_ticket was NULL"); return; } if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", rsc_ticket->id); return; } if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) { return; } if (rsc_lh->children) { GListPtr gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); } return; } pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)", rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); if (rsc_ticket->ticket->granted == FALSE && g_list_length(rsc_lh->running_on) > 0) { GListPtr gIter = NULL; switch (rsc_ticket->loss_policy) { case loss_ticket_stop: resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); break; case loss_ticket_demote: /*Promotion score will be set to -INFINITY in master_promotion_order() */ if (rsc_ticket->role_lh != RSC_ROLE_MASTER) { resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); } break; case loss_ticket_fence: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; crm_warn("Node %s will be fenced for deadman", node->details->uname); node->details->unclean = TRUE; } break; case loss_ticket_freeze: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } if (g_list_length(rsc_lh->running_on) > 0) { clear_bit(rsc_lh->flags, pe_rsc_managed); set_bit(rsc_lh->flags, pe_rsc_block); } break; } } else if (rsc_ticket->ticket->granted == FALSE){ if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); } } else if (rsc_ticket->ticket->standby) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set); } } } enum pe_action_flags native_action_flags(action_t * action, node_t * node) { return action->flags; } enum pe_graph_flags native_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { /* flags == get_action_flags(first, then_node) called from update_action() */ enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; if (type & pe_order_asymmetrical) { resource_t *then_rsc = then->rsc; enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0; if (!then_rsc) { /* ignore */ } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) { /* ignore... if 'then' is supposed to be stopped after 'first', but * then is already stopped, there is nothing to be done when non-symmetrical. */ } else if ((then_rsc_role == RSC_ROLE_STARTED) && safe_str_eq(then->task, RSC_START)) { /* ignore... if 'then' is supposed to be started after 'first', but * then is already started, there is nothing to be done when non-symmetrical. */ } else if (!(first->flags & pe_action_runnable)) { /* prevent 'then' action from happening if 'first' is not runnable and * 'then' has not yet occurred. */ pe_clear_action_bit(then, pe_action_runnable); pe_clear_action_bit(then, pe_action_optional); pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid); } else { /* ignore... then is allowed to start/stop if it wants to. */ } } if (type & pe_order_implies_first) { if ((filter & pe_action_optional) && (flags & pe_action_optional) == 0) { pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); pe_clear_action_bit(first, pe_action_optional); } } if (type & pe_order_implies_first_master) { if ((filter & pe_action_optional) && ((then->flags & pe_action_optional) == FALSE) && then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) { clear_bit(first->flags, pe_action_optional); } } if (is_set(type, pe_order_runnable_left) && is_set(filter, pe_action_runnable) && is_set(then->flags, pe_action_runnable) && is_set(flags, pe_action_runnable) == FALSE) { pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid); pe_clear_action_bit(then, pe_action_runnable); } if (is_set(type, pe_order_implies_then) && is_set(filter, pe_action_optional) && is_set(then->flags, pe_action_optional) && is_set(flags, pe_action_optional) == FALSE) { pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid); pe_clear_action_bit(then, pe_action_optional); } if (is_set(type, pe_order_restart)) { const char *reason = NULL; CRM_ASSERT(first->rsc && first->rsc->variant == pe_native); CRM_ASSERT(then->rsc && then->rsc->variant == pe_native); if ((filter & pe_action_runnable) && (then->flags & pe_action_runnable) == 0) { reason = "shutdown"; } if ((filter & pe_action_optional) && (then->flags & pe_action_optional) == 0) { reason = "recover"; } if (reason && is_set(first->flags, pe_action_optional) && is_set(first->flags, pe_action_runnable)) { pe_rsc_trace(first->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); pe_clear_action_bit(first, pe_action_optional); } if (reason && is_not_set(first->flags, pe_action_optional) && is_not_set(first->flags, pe_action_runnable)) { pe_rsc_trace(then->rsc, "Handling %s: %s -> %s", reason, first->uuid, then->uuid); pe_clear_action_bit(then, pe_action_runnable); } } if (then_flags != then->flags) { changed |= pe_graph_updated_then; pe_rsc_trace(then->rsc, "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", then->uuid, then->node ? then->node->details->uname : "[none]", then->flags, then_flags, first->uuid, first->flags); } if (first_flags != first->flags) { changed |= pe_graph_updated_first; pe_rsc_trace(first->rsc, "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, first_flags, then->uuid, then->flags); } return changed; } void native_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { GListPtr gIter = NULL; GHashTableIter iter; node_t *node = NULL; if (constraint == NULL) { pe_err("Constraint is NULL"); return; } else if (rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if (constraint->role_filter > 0 && constraint->role_filter != rsc->next_role) { pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s)", constraint->id, role2text(constraint->role_filter)); return; } else if (is_active(constraint) == FALSE) { pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id); return; } if (constraint->node_list_rh == NULL) { pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id); return; } for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *other_node = NULL; other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (other_node != NULL) { pe_rsc_trace(rsc, "%s + %s: %d + %d", node->details->uname, other_node->details->uname, node->weight, other_node->weight); other_node->weight = merge_weights(other_node->weight, node->weight); } else { node_t *new_node = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) new_node->details->id, new_node); } } g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight); } } void native_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } void #define log_change(fmt, args...) do { \ if(terminal) { \ printf(" * "fmt"\n", ##args); \ } else { \ crm_notice(fmt, ##args); \ } \ } while(0) LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { node_t *next = NULL; node_t *current = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *demote = NULL; action_t *promote = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; LogActions(child_rsc, data_set, terminal); } return; } next = rsc->allocated_to; if (rsc->running_on) { if (g_list_length(rsc->running_on) > 1 && rsc->partial_migration_source) { current = rsc->partial_migration_source; } else { current = rsc->running_on->data; } if (rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if (is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed) ? " unmanaged" : ""); return; } if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } key = start_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = stop_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } key = promote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } key = demote_key(rsc); possible_matches = find_actions(rsc->actions, key, next); free(key); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { key = generate_op_key(rsc->id, RSC_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); free(key); CRM_CHECK(next != NULL,); if (next == NULL) { } else if (possible_matches && current) { log_change("Migrate %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); g_list_free(possible_matches); } else if(is_set(rsc->flags, pe_rsc_reload)) { log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start == NULL || is_set(start->flags, pe_action_optional)) { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (moving && current) { log_change("Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if (is_set(rsc->flags, pe_rsc_failed)) { log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { log_change("Stop %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } return; } if (rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if (current != NULL) { gboolean allowed = FALSE; if (demote != NULL && (demote->flags & pe_action_runnable)) { allowed = TRUE; } log_change("Demote %s\t(%s -> %s %s%s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname, allowed ? "" : " - blocked"); if (stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->next_role > RSC_ROLE_STOPPED) { if (is_set(rsc->flags, pe_rsc_failed)) { log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(is_set(rsc->flags, pe_rsc_reload)) { log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } } } else if (rsc->next_role == RSC_ROLE_STOPPED) { GListPtr gIter = NULL; CRM_CHECK(current != NULL,); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; log_change("Stop %s\t(%s)", rsc->id, node->details->uname); } } if (moving) { log_change("Move %s\t(%s %s -> %s)", rsc->id, role2text(rsc->next_role), current->details->uname, next->details->uname); } if (rsc->role == RSC_ROLE_STOPPED) { gboolean allowed = FALSE; if(start && (start->flags & pe_action_runnable)) { allowed = TRUE; } CRM_CHECK(next != NULL,); if (next != NULL) { log_change("Start %s\t(%s%s)", rsc->id, next->details->uname, allowed?"":" - blocked"); } if(allowed == FALSE) { return; } } if (rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { gboolean allowed = FALSE; CRM_CHECK(next != NULL,); if (stop != NULL && is_not_set(stop->flags, pe_action_optional) && rsc->role > RSC_ROLE_STOPPED) { if (is_set(rsc->flags, pe_rsc_failed)) { log_change("Recover %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(is_set(rsc->flags, pe_rsc_reload)) { log_change("Reload %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { log_change("Restart %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } if (promote && (promote->flags & pe_action_runnable)) { allowed = TRUE; } log_change("Promote %s\t(%s -> %s %s%s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next->details->uname, allowed ? "" : " - blocked"); } } gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "%s", rsc->id); if (rsc->next_role == RSC_ROLE_STOPPED && rsc->variant == pe_native && is_fencing_resource(rsc)) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order(NULL, strdup(all_stopped->task), all_stopped, rsc, stop_key(rsc), NULL, pe_order_optional | pe_order_stonith_stop, data_set); } for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; action_t *stop; if (rsc->partial_migration_target) { if(rsc->partial_migration_target->details == current->details) { pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname, next->details->uname, rsc->id); continue; } else { pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id); optional = FALSE; } } pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname); stop = stop_action(rsc, current, optional); if(is_not_set(rsc->flags, pe_rsc_managed)) { update_action_flags(stop, pe_action_runnable|pe_action_clear); } if (is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } } return TRUE; } gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { action_t *start = NULL; pe_rsc_trace(rsc, "%s on %s %d", rsc->id, next?next->details->uname:"N/A", optional); start = start_action(rsc, next, TRUE); if (is_set(start->flags, pe_action_runnable) && optional == FALSE) { update_action_flags(start, pe_action_optional | pe_action_clear); } return TRUE; } gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; pe_rsc_trace(rsc, "%s on %s", rsc->id, next?next->details->uname:"N/A"); CRM_CHECK(next != NULL, return FALSE); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *start = (action_t *) gIter->data; if (is_set(start->flags, pe_action_runnable) == FALSE) { runnable = FALSE; } } g_list_free(action_list); if (runnable) { promote_action(rsc, next, optional); return TRUE; } pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *promote = (action_t *) gIter->data; update_action_flags(promote, pe_action_runnable | pe_action_clear); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "%s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { node_t *current = (node_t *) gIter->data; pe_rsc_trace(rsc, "%s on %s", rsc->id, next?next->details->uname:"N/A"); demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { crm_err("%s on %s", rsc->id, next?next->details->uname:"N/A"); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) { pe_rsc_trace(rsc, "%s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set) { action_t *delete = NULL; #if DELETE_THEN_REFRESH action_t *refresh = NULL; #endif if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if (node == NULL) { pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if (node->details->unclean || node->details->online == FALSE) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete = delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional ? pe_order_implies_then : pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional ? pe_order_implies_then : pe_order_optional, data_set); #if DELETE_THEN_REFRESH refresh = custom_action(NULL, strdup(CRM_OP_LRM_REFRESH), CRM_OP_LRM_REFRESH, node, FALSE, TRUE, data_set); add_hash_param(refresh->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); order_actions(delete, refresh, pe_order_optional); #endif return TRUE; } #include <../lib/pengine/unpack.h> #define set_char(x) last_rsc_id[lpc] = x; complete = TRUE; static char * increment_clone(char *last_rsc_id) { int lpc = 0; int len = 0; char *tmp = NULL; gboolean complete = FALSE; CRM_CHECK(last_rsc_id != NULL, return NULL); if (last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len - 1; while (complete == FALSE && lpc > 0) { switch (last_rsc_id[lpc]) { case 0: lpc--; break; case '0': set_char('1'); break; case '1': set_char('2'); break; case '2': set_char('3'); break; case '3': set_char('4'); break; case '4': set_char('5'); break; case '5': set_char('6'); break; case '6': set_char('7'); break; case '7': set_char('8'); break; case '8': set_char('9'); break; case '9': last_rsc_id[lpc] = '0'; lpc--; break; case ':': tmp = last_rsc_id; last_rsc_id = calloc(1, len + 2); memcpy(last_rsc_id, tmp, len); last_rsc_id[++lpc] = '1'; last_rsc_id[len] = '0'; last_rsc_id[len + 1] = 0; complete = TRUE; free(tmp); break; default: crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc); return NULL; break; } } return last_rsc_id; } static node_t * probe_grouped_clone(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { node_t *running = NULL; resource_t *top = uber_parent(rsc); if (running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() * * This code desperately needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=10: * No probes O(25s) * Detection without clone loop O(3m) * Detection with clone loop O(8m) ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while (peer && running == NULL) { running = pe_hash_table_lookup(peer->known_on, node->details->id); if (running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping active clone: %s", rsc->id); free(clone_id); return running; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } free(clone_id); } return running; } gboolean native_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { char *key = NULL; action_t *probe = NULL; node_t *running = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if (rc_inactive == NULL) { rc_inactive = crm_itoa(PCMK_EXECRA_NOT_RUNNING); rc_master = crm_itoa(PCMK_EXECRA_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id); return FALSE; } if (rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set) || any_created; } return any_created; } if (is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id); return FALSE; } if(node->details->pending && is_fencing_resource(rsc)) { crm_trace("Skipping probe for fencing resource %s on pending node %s", rsc->id, node->details->uname); return FALSE; } running = g_hash_table_lookup(rsc->known_on, node->details->id); if (running == NULL && is_set(rsc->flags, pe_rsc_unique) == FALSE) { /* Anonymous clones */ if (rsc->parent == top) { running = g_hash_table_lookup(rsc->parent->known_on, node->details->id); } else { /* Grouped anonymous clones need extra special handling */ running = probe_grouped_clone(rsc, node, data_set); } } if (force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ pe_rsc_trace(rsc, "Skipping active: %s on %s", rsc->id, node->details->uname); return FALSE; } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional | pe_action_clear); /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if (running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if (rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } pe_rsc_debug(rsc, "Probing %s on %s (%s)", rsc->id, node->details->uname, role2text(rsc->role)); order_actions(probe, complete, pe_order_implies_then); return TRUE; } static void native_start_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_stonith, pe_working_set_t * data_set) { node_t *target = stonith_op ? stonith_op->node : NULL; if (is_stonith) { char *key = start_key(rsc); action_t *ready = get_pseudo_op(STONITH_UP, data_set); pe_rsc_trace(rsc, "Ordering %s action before stonith events", key); custom_action_order(rsc, key, NULL, NULL, strdup(ready->task), ready, pe_order_optional | pe_order_implies_then, data_set); } else { GListPtr gIter = NULL; action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_optional); } else if (target != NULL && safe_str_eq(action->task, RSC_START) && NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) { /* if known == NULL, then we dont know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * its analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explaination is that the * DC died and took its status with it */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_optional | pe_order_runnable_left); } } } } static void native_stop_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_stonith, pe_working_set_t * data_set) { char *key = NULL; GListPtr gIter = NULL; GListPtr action_list = NULL; resource_t *top = uber_parent(rsc); key = stop_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); free(key); /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online && action->node->details->unclean == FALSE && is_set(rsc->flags, pe_rsc_failed)) { continue; } if (is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is" " implicit after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); update_action_flags(action, pe_action_implied_by_stonith); if (is_stonith == FALSE) { action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); order_actions(stonith_op, action, pe_order_optional); order_actions(stonith_op, parent_stop, pe_order_optional); } if (is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ notify_data_t *n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); crm_info("Creating secondary notification for %s", action->uuid); collect_notification_data(rsc, TRUE, FALSE, n_data); g_hash_table_insert(n_data->keys, strdup("notify_stop_resource"), strdup(rsc->id)); g_hash_table_insert(n_data->keys, strdup("notify_stop_uname"), strdup(action->node->details->uname)); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); key = demote_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); free(key); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->node->details->online == FALSE || action->node->details->unclean == TRUE || is_set(rsc->flags, pe_rsc_failed)) { if (is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is" " implict after %s is fenced", rsc->id, action->node->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ crm_trace("here - 1"); update_action_flags(action, pe_action_pseudo); update_action_flags(action, pe_action_runnable); if (is_stonith == FALSE) { order_actions(stonith_op, action, pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) { gboolean is_stonith = FALSE; if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } return; } if (is_not_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if (stonith_op != NULL && is_fencing_resource(rsc)) { is_stonith = TRUE; } /* Start constraints */ native_start_constraints(rsc, stonith_op, is_stonith, data_set); /* Stop constraints */ if(stonith_op) { native_stop_constraints(rsc, stonith_op, is_stonith, data_set); } } enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static enum stack_activity find_clone_activity_on(resource_t * rsc, resource_t * target, node_t * node, const char *type) { int mode = stack_stable; action_t *active = NULL; if (target->children) { GListPtr gIter = NULL; for (gIter = target->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; mode |= find_clone_activity_on(rsc, child, node, type); } return mode; } active = find_first_action(target->actions, NULL, RSC_START, NULL); if (active && is_set(active->flags, pe_action_optional) == FALSE && is_set(active->flags, pe_action_pseudo) == FALSE) { pe_rsc_debug(rsc, "%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_starting; } active = find_first_action(target->actions, NULL, RSC_STOP, node); if (active && is_set(active->flags, pe_action_optional) == FALSE && is_set(active->flags, pe_action_pseudo) == FALSE) { pe_rsc_debug(rsc, "%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_stopping; } return mode; } static enum stack_activity check_stack_element(resource_t * rsc, resource_t * other_rsc, const char *type) { resource_t *other_p = uber_parent(other_rsc); if (other_rsc == NULL || other_rsc == rsc) { return stack_stable; } else if (other_p->variant == pe_native) { crm_notice("Cannot migrate %s due to dependency on %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } else if (other_rsc == rsc->parent) { int mode = 0; GListPtr gIter = NULL; for (gIter = other_rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; if (constraint->score > 0) { mode |= check_stack_element(rsc, constraint->rsc_rh, type); } } return mode; } else if (other_p->variant == pe_group) { crm_notice("Cannot migrate %s due to dependency on group %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } /* else: >= clone */ /* ## Assumption A depends on clone(B) ## Resource Activity During Move N1 N2 N3 --- --- --- t0 A.stop t1 B.stop B.stop t2 B.start B.start t3 A.start ## Resource Activity During Migration N1 N2 N3 --- --- --- t0 B.start B.start t1 A.stop (1) t2 A.start (2) t3 B.stop B.stop Node 1: Rewritten to be a migrate-to operation Node 2: Rewritten to be a migrate-from operation # Constraints The following constraints already exist in the system. The 'ok' and 'fail' column refers to whether they still hold for migration. a) A.stop -> A.start - ok b) B.stop -> B.start - fail c) A.stop -> B.stop - ok d) B.start -> A.start - ok e) B.stop -> A.start - fail f) A.stop -> B.start - fail ## Scenarios B unchanged - ok B stopping only - fail - possible after fixing 'e' B starting only - fail - possible after fixing 'f' B stoping and starting - fail - constraint 'b' is unfixable B restarting only on N2 - fail - as-per previous only rarer */ /* Only allow migration when the clone is either stable, only starting or only stopping */ return find_clone_activity_on(rsc, other_rsc, NULL, type); } static gboolean at_stack_bottom(resource_t * rsc) { char *key = NULL; action_t *start = NULL; action_t *other = NULL; int mode = stack_stable; GListPtr action_list = NULL; GListPtr gIter = NULL; key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); free(key); pe_rsc_trace(rsc, "%s: processing", rsc->id); CRM_CHECK(action_list != NULL, return FALSE); start = action_list->data; g_list_free(action_list); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; resource_t *target = constraint->rsc_rh; pe_rsc_trace(rsc, "Checking %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if (constraint->score > 0) { mode |= check_stack_element(rsc, target, "coloc"); if (mode & stack_middle) { return FALSE; } else if ((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to colocation activity (last was %s)", rsc->id, target->id); return FALSE; } } } for (gIter = start->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (other_w->type & pe_order_serialize_only) { pe_rsc_trace(rsc, "%s: depends on %s (serialize ordering)", rsc->id, other->uuid); continue; } pe_rsc_trace(rsc, "%s: Checking %s ordering", rsc->id, other->uuid); if (is_set(other->flags, pe_action_optional) == FALSE) { mode |= check_stack_element(rsc, other->rsc, "order"); if (mode & stack_middle) { return FALSE; } else if ((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to ordering activity (last was %s)", rsc->id, other->rsc->id); return FALSE; } } } return TRUE; } static action_t * get_first_named_action(resource_t *rsc, const char *action, gboolean only_valid, node_t *current) { action_t *a = NULL; GListPtr action_list = NULL; char *key = generate_op_key(rsc->id, action, 0); action_list = find_actions(rsc->actions, key, current); if (action_list == NULL || action_list->data == NULL) { crm_trace("%s: no %s action", rsc->id, action); free(key); return NULL; } a = action_list->data; g_list_free(action_list); if(only_valid && is_set(a->flags, pe_action_pseudo)) { crm_trace("%s: pseudo", key); a = NULL; } else if(only_valid && is_not_set(a->flags, pe_action_runnable)) { crm_trace("%s: runnable", key); a = NULL; } free(key); return a; } static void MigrateRsc(resource_t * rsc, action_t *stop, action_t *start, pe_working_set_t * data_set, gboolean partial) { action_t *to = NULL; action_t *from = NULL; action_t *then = NULL; action_t *other = NULL; action_t *done = get_pseudo_op(STONITH_DONE, data_set); GListPtr gIter = NULL; const char *value = g_hash_table_lookup(rsc->meta, XML_OP_ATTR_ALLOW_MIGRATE); if (crm_is_true(value) == FALSE) { return; } if (rsc->next_role > RSC_ROLE_SLAVE) { pe_rsc_trace(rsc, "%s: resource role: role=%s", rsc->id, role2text(rsc->next_role)); return; } if(start == NULL || stop == NULL) { pe_rsc_trace(rsc, "%s: not exists %p -> %p", rsc->id, stop, start); return; } else if (start->node == NULL || stop->node == NULL) { pe_rsc_trace(rsc, "%s: no node %p -> %p", rsc->id, stop->node, start->node); return; } else if(is_set(stop->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: stop action", rsc->id); return; } else if(is_set(start->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: start action", rsc->id); return; } else if (stop->node->details == start->node->details) { pe_rsc_trace(rsc, "%s: not moving %p -> %p", rsc->id, stop->node, start->node); return; } else if (at_stack_bottom(rsc) == FALSE) { pe_rsc_trace(rsc, "%s: not at stack bottom", rsc->id); return; } pe_rsc_trace(rsc, "%s %s -> %s", rsc->id, stop->node->details->uname, start->node->details->uname); if (partial) { pe_rsc_info(rsc, "Completing partial migration of %s from %s to %s", rsc->id, stop->node ? stop->node->details->uname : "unknown", start->node ? start->node->details->uname : "unknown"); } else { pe_rsc_info(rsc, "Migrating %s from %s to %s", rsc->id, stop->node ? stop->node->details->uname : "unknown", start->node ? start->node->details->uname : "unknown"); } /* Preserve the stop to ensure the end state is sane on that node, * Make the start a pseudo op * Create migrate_to, have it depend on everything the stop did * Create migrate_from * *-> migrate_to -> migrate_from -> stop -> start */ update_action_flags(start, pe_action_pseudo); /* easier than trying to delete it from the graph * but perhaps we should have it run anyway */ if (!partial) { to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, stop->node, FALSE, TRUE, data_set); } from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, start->node, FALSE, TRUE, data_set); /* This is slightly sub-optimal if 'to' fails, but always * run both halves of the migration before terminating the * transition. * * This can be removed if/when we update unpack_rsc_op() to * 'correctly' handle partial migrations. * * Without this, we end up stopping both sides */ from->priority = INFINITY; if (!partial) { order_actions(to, from, pe_order_optional); add_hash_param(to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, stop->node->details->uname); add_hash_param(to->meta, XML_LRM_ATTR_MIGRATE_TARGET, start->node->details->uname); } then = to ? to : from; order_actions(from, stop, pe_order_optional); order_actions(done, then, pe_order_optional); add_hash_param(from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, stop->node->details->uname); add_hash_param(from->meta, XML_LRM_ATTR_MIGRATE_TARGET, start->node->details->uname); /* Create the correct ordering ajustments based on find_clone_activity_on(); */ for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; resource_t *target = constraint->rsc_rh; pe_rsc_info(rsc, "Repairing %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if (constraint->score > 0) { int mode = check_stack_element(rsc, target, "coloc"); action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); CRM_ASSERT(clone_stop != NULL); CRM_ASSERT(clone_start != NULL); CRM_ASSERT((mode & stack_middle) == 0); CRM_ASSERT(((mode & stack_stopping) && (mode & stack_starting)) == 0); if (mode & stack_stopping) { #if 0 crm_debug("Creating %s.start -> %s.stop ordering", rsc->id, target->id); order_actions(from, clone_stop, pe_order_optional); #endif GListPtr lpc2 = NULL; for (lpc2 = start->actions_before; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other_w = (action_wrapper_t *) lpc2->data; /* Needed if the clone's started pseudo-action ever gets printed in the graph */ if (other_w->action == clone_start) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, start->uuid); other_w->type = pe_order_none; } } } else if (mode & stack_starting) { #if 0 crm_debug("Creating %s.started -> %s.stop ordering", target->id, rsc->id); order_actions(clone_start, to, pe_order_optional); #endif GListPtr lpc2 = NULL; for (lpc2 = clone_stop->actions_before; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other_w = (action_wrapper_t *) lpc2->data; /* Needed if the clone's stop pseudo-action ever gets printed in the graph */ if (other_w->action == stop) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, clone_stop->uuid); other_w->type = pe_order_none; } } } } } #if 0 /* Implied now that start/stop are not morphed into migrate ops */ /* Anything that needed stop to complete, now also needs start to have completed */ for (gIter = stop->actions_after; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (is_set(other->flags, pe_action_optional) || other->rsc != NULL) { continue; } crm_debug("Ordering %s before %s (stop)", from->uuid, other->uuid); order_actions(from, other, other_w->type); } #endif /* migrate 'then' action also needs anything that the stop needed to have completed too */ for (gIter = stop->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (other->rsc == NULL) { /* nothing */ } else if (is_set(other->flags, pe_action_optional) || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (stop)", other_w->action->uuid, stop->uuid); order_actions(other, then, other_w->type); } /* migrate 'then' action also needs anything that the start needed to have completed too */ for (gIter = start->actions_before; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other_w = (action_wrapper_t *) gIter->data; other = other_w->action; if (other->rsc == NULL) { /* nothing */ } else if (is_set(other->flags, pe_action_optional) || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (start)", other_w->action->uuid, stop->uuid); order_actions(other, then, other_w->type); } } static void ReloadRsc(resource_t * rsc, action_t *stop, action_t *start, pe_working_set_t * data_set) { action_t *action = NULL; action_t *rewrite = NULL; if(is_not_set(rsc->flags, pe_rsc_try_reload)) { return; } else if(is_not_set(stop->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: stop action", rsc->id); return; } else if(is_not_set(start->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s: start action", rsc->id); return; } pe_rsc_trace(rsc, "%s on %s", rsc->id, stop->node->details->uname); action = get_first_named_action(rsc, RSC_PROMOTE, TRUE, NULL); if (action && is_set(action->flags, pe_action_optional) == FALSE) { update_action_flags(action, pe_action_pseudo); } action = get_first_named_action(rsc, RSC_DEMOTE, TRUE, NULL); if (action && is_set(action->flags, pe_action_optional) == FALSE) { rewrite = action; update_action_flags(stop, pe_action_pseudo); } else { rewrite = start; } pe_rsc_info(rsc, "Rewriting %s of %s on %s as a reload", rewrite->task, rsc->id, stop->node->details->uname); set_bit(rsc->flags, pe_rsc_reload); update_action_flags(rewrite, pe_action_optional|pe_action_clear); free(rewrite->uuid); free(rewrite->task); rewrite->task = strdup("reload"); rewrite->uuid = generate_op_key(rsc->id, rewrite->task, 0); } void rsc_migrate_reload(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; action_t *stop = NULL; action_t *start = NULL; gboolean partial = FALSE; if (rsc->children) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_migrate_reload(child_rsc, data_set); } return; } else if (rsc->variant > pe_native) { return; } pe_rsc_trace(rsc, "Processing %s", rsc->id); if (rsc->partial_migration_target) { start = get_first_named_action(rsc, RSC_START, TRUE, rsc->partial_migration_target); stop = get_first_named_action(rsc, RSC_STOP, TRUE, rsc->partial_migration_source); if (start && stop) { partial = TRUE; } } pe_rsc_trace(rsc, "%s %s %p", rsc->id, partial?"partial":"full", stop); if (!partial) { stop = get_first_named_action(rsc, RSC_STOP, TRUE, rsc->running_on ? rsc->running_on->data : NULL); start = get_first_named_action(rsc, RSC_START, TRUE, NULL); } if (is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || rsc->next_role < RSC_ROLE_STARTED || ((g_list_length(rsc->running_on) != 1) && !partial)) { pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); return; } if(stop == NULL) { return; } else if (is_set(stop->flags, pe_action_optional) && is_set(rsc->flags, pe_rsc_try_reload)) { ReloadRsc(rsc, stop, start, data_set); } else if(is_not_set(stop->flags, pe_action_optional)) { MigrateRsc(rsc, stop, start, data_set, partial); } } void native_append_meta(resource_t * rsc, xmlNode * xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } } diff --git a/pengine/regression.sh b/pengine/regression.sh index 65d508f008..55b5c5cc2c 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,656 +1,657 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # core=`dirname $0` . $core/regression.core.sh create_mode="true" info Generating test outputs for these tests... # do_test file description info Done. echo "" info Performing the following tests from $io_dir create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" do_test orphan-2 "Orphan stop, remove failcount" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" do_test params-6 "Params: Detect reload in previously migrated resource" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test domain "Failover domains" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" --rc 4 do_test standby "Standby" do_test comments "Comments" echo "" do_test one-or-more-0 "Everything starts" do_test one-or-more-1 "Nothing starts because of A" do_test one-or-more-2 "D can start because of C" do_test one-or-more-3 "D cannot start because of B and C" do_test one-or-more-4 "D cannot start because of target-role" do_test one-or-more-5 "Start A and F even though C and D are stopped" do_test one-or-more-6 "Leave A running even though B is stopped" do_test one-or-more-7 "Leave A running even though C is stopped" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" do_test ordered-set-basic-startup "Constraint set with default order settings." do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop" do_test coloc_fp_logic "Verify floating point calculations in colocation are working" do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc." do_test colo_slave_w_native "cl#5070 - Verify promotion order is affected when colocating slave to native rsc." echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" do_test master-demote-2 "Demote does not clear past failure" do_test master-move "Move master based on failure of colocated group" do_test master-probed-score "Observe the promotion score of probed resources" do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint" do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint" do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint" do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion." echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource" do_test bug-5028-detach "Ensure detach still works" do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition" do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" do_test 696 "OSDL #696 - CRM starts stonith RA without monitor" do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id" do_test 829 "OSDL #829" do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test stonith-4 "Stonith node state" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false" do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false" do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts." do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false" do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false." do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false." do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false" do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true" do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources." do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases" do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload" do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change." do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart" do_test failcount "Ensure failcounts are correctly expired" do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart" do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop" do_test bug-5059 "No need to restart p_stateful1:*" do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled." do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled." do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group" do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)." do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group." do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs." echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" +do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" echo "" do_test stopped-monitor-00 "Stopped Monitor - initial start" do_test stopped-monitor-01 "Stopped Monitor - failed started" do_test stopped-monitor-02 "Stopped Monitor - started multi-up" do_test stopped-monitor-03 "Stopped Monitor - stop started" do_test stopped-monitor-04 "Stopped Monitor - failed stop" do_test stopped-monitor-05 "Stopped Monitor - start unmanaged" do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08 "Stopped Monitor - migrate" do_test stopped-monitor-09 "Stopped Monitor - unmanage started" do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")" do_test stopped-monitor-20 "Stopped Monitor - initial stop" do_test stopped-monitor-21 "Stopped Monitor - stopped single-up" do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23 "Stopped Monitor - start stopped" do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role="Started")" do_test stopped-monitor-30 "Stopped Monitor - new node started" do_test stopped-monitor-31 "Stopped Monitor - new node stopped" echo"" do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)" do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)" do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)" do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)" do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)" do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)" do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)" do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)" do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)" do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)" do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)" do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)" do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)" do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)" do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)" do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)" do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)" do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)" do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)" do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)" do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)" do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)" do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)" do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)" do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)" do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)" do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)" do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)" do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)" do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)" do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)" do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)" do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)" do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)" do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)" do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)" do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)" do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)" do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)" do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)" do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)" do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)" do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)" do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)" do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)" do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)" do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)" do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)" do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)" do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)" do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)" do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)" do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)" do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)" do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)" do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)" do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)" do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)" do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)" do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)" do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)" do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)" do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)" do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)" do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)" do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)" do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)" do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)" do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)" do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)" do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)" do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)" do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)" do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)" do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)" do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)" do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)" do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)" do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)" do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)" do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)" do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)" do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)" do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)" do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)" do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)" do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)" do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)" do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)" do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)" do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)" do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)" do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)" echo "" do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)" do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)" do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)" do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)" do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)" do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)" do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)" do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)" do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)" do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)" echo "" do_test template-1 "Template - 1" do_test template-2 "Template - 2" do_test template-3 "Template - 3 (merge operations)" do_test template-coloc-1 "Template - Colocation 1" do_test template-coloc-2 "Template - Colocation 2" do_test template-coloc-3 "Template - Colocation 3" do_test template-order-1 "Template - Order 1" do_test template-order-2 "Template - Order 2" do_test template-order-3 "Template - Order 3" do_test template-ticket "Template - Ticket" do_test template-rsc-sets-1 "Template - Resource Sets 1" do_test template-rsc-sets-2 "Template - Resource Sets 2" do_test template-rsc-sets-3 "Template - Resource Sets 3" do_test template-rsc-sets-4 "Template - Resource Sets 4" echo "" test_results diff --git a/pengine/test10/load-stopped-loop.dot b/pengine/test10/load-stopped-loop.dot new file mode 100644 index 0000000000..ed8ae48c46 --- /dev/null +++ b/pengine/test10/load-stopped-loop.dot @@ -0,0 +1,62 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange"] +"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" -> "license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style = bold] +"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style=bold color="green" fontcolor="black"] +"license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" -> "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style = bold] +"license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" [ style=bold color="green" fontcolor="black"] +"license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"] +"license.anbriz.vds-ok.com-vm_start_0 v03-a" -> "license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold] +"license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="orange"] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "all_stopped" [ style = bold] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "load_stopped_v03-b v03-b" [ style = bold] +"license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style=bold color="green" fontcolor="black"] +"load_stopped_mgmt01 mgmt01" [ style=bold color="green" fontcolor="orange"] +"load_stopped_v03-a v03-a" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold] +"load_stopped_v03-a v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style = bold] +"load_stopped_v03-a v03-a" [ style=bold color="green" fontcolor="orange"] +"load_stopped_v03-b v03-b" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] +"load_stopped_v03-b v03-b" [ style=bold color="green" fontcolor="orange"] +"stonith-v03-a_monitor_60000 v03-b" [ style=bold color="green" fontcolor="black"] +"stonith-v03-a_start_0 v03-b" -> "stonith-v03-a_monitor_60000 v03-b" [ style = bold] +"stonith-v03-a_start_0 v03-b" [ style=bold color="green" fontcolor="black"] +"stonith-v03-a_stop_0 v03-b" -> "stonith-v03-a_start_0 v03-b" [ style = bold] +"stonith-v03-a_stop_0 v03-b" [ style=bold color="green" fontcolor="black"] +"stonith-v03-b_monitor_60000 v03-a" [ style=bold color="green" fontcolor="black"] +"stonith-v03-b_start_0 v03-a" -> "stonith-v03-b_monitor_60000 v03-a" [ style = bold] +"stonith-v03-b_start_0 v03-a" [ style=bold color="green" fontcolor="black"] +"stonith-v03-b_stop_0 v03-a" -> "stonith-v03-b_start_0 v03-a" [ style = bold] +"stonith-v03-b_stop_0 v03-a" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style=bold color="green" fontcolor="black"] +"terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style=bold color="green" fontcolor="orange"] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "all_stopped" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "load_stopped_v03-a v03-a" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold] +"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style=bold color="green" fontcolor="black"] +"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"] +"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold] +"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style = bold] +"vds-ok-pool-0-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style = bold] +"vds-ok-pool-0-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-0-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style = bold] +"vds-ok-pool-0-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style = bold] +"vds-ok-pool-1-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style = bold] +"vds-ok-pool-1-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"] +"vds-ok-pool-1-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style = bold] +"vds-ok-pool-1-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"] +} diff --git a/pengine/test10/load-stopped-loop.exp b/pengine/test10/load-stopped-loop.exp new file mode 100644 index 0000000000..75aa2e8687 --- /dev/null +++ b/pengine/test10/load-stopped-loop.exp @@ -0,0 +1,404 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/load-stopped-loop.scores b/pengine/test10/load-stopped-loop.scores new file mode 100644 index 0000000000..dc9f5f5a3b --- /dev/null +++ b/pengine/test10/load-stopped-loop.scores @@ -0,0 +1,1765 @@ +Allocation scores: +clone_color: cl-clvmd allocation score on mgmt01: 0 +clone_color: cl-clvmd allocation score on v03-a: 0 +clone_color: cl-clvmd allocation score on v03-b: 0 +clone_color: cl-dlm allocation score on mgmt01: 0 +clone_color: cl-dlm allocation score on v03-a: 0 +clone_color: cl-dlm allocation score on v03-b: 0 +clone_color: cl-iscsid allocation score on mgmt01: 0 +clone_color: cl-iscsid allocation score on v03-a: 0 +clone_color: cl-iscsid allocation score on v03-b: 0 +clone_color: cl-libvirt-images-fs allocation score on mgmt01: 0 +clone_color: cl-libvirt-images-fs allocation score on v03-a: 0 +clone_color: cl-libvirt-images-fs allocation score on v03-b: 0 +clone_color: cl-libvirt-images-pool allocation score on mgmt01: -INFINITY +clone_color: cl-libvirt-images-pool allocation score on v03-a: 0 +clone_color: cl-libvirt-images-pool allocation score on v03-b: 0 +clone_color: cl-libvirt-install-fs allocation score on mgmt01: 0 +clone_color: cl-libvirt-install-fs allocation score on v03-a: 0 +clone_color: cl-libvirt-install-fs allocation score on v03-b: 0 +clone_color: cl-libvirt-qpid allocation score on mgmt01: -INFINITY +clone_color: cl-libvirt-qpid allocation score on v03-a: 0 +clone_color: cl-libvirt-qpid allocation score on v03-b: 0 +clone_color: cl-libvirtd allocation score on mgmt01: -INFINITY +clone_color: cl-libvirtd allocation score on v03-a: 0 +clone_color: cl-libvirtd allocation score on v03-b: 0 +clone_color: cl-mcast-anbriz-net allocation score on mgmt01: -INFINITY +clone_color: cl-mcast-anbriz-net allocation score on v03-a: 0 +clone_color: cl-mcast-anbriz-net allocation score on v03-b: 0 +clone_color: cl-mcast-gleb-net allocation score on mgmt01: -INFINITY +clone_color: cl-mcast-gleb-net allocation score on v03-a: 0 +clone_color: cl-mcast-gleb-net allocation score on v03-b: 0 +clone_color: cl-mcast-test-net allocation score on mgmt01: -INFINITY +clone_color: cl-mcast-test-net allocation score on v03-a: 0 +clone_color: cl-mcast-test-net allocation score on v03-b: 0 +clone_color: cl-multipathd allocation score on mgmt01: 0 +clone_color: cl-multipathd allocation score on v03-a: 0 +clone_color: cl-multipathd allocation score on v03-b: 0 +clone_color: cl-node-params allocation score on mgmt01: -INFINITY +clone_color: cl-node-params allocation score on v03-a: 0 +clone_color: cl-node-params allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-0-iscsi allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-0-iscsi allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-0-iscsi allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-0-pool allocation score on mgmt01: -INFINITY +clone_color: cl-vds-ok-pool-0-pool allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-0-pool allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-0-vg allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-0-vg allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-0-vg allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-1-iscsi allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-1-iscsi allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-1-iscsi allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-1-pool allocation score on mgmt01: -INFINITY +clone_color: cl-vds-ok-pool-1-pool allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-1-pool allocation score on v03-b: 0 +clone_color: cl-vds-ok-pool-1-vg allocation score on mgmt01: 0 +clone_color: cl-vds-ok-pool-1-vg allocation score on v03-a: 0 +clone_color: cl-vds-ok-pool-1-vg allocation score on v03-b: 0 +clone_color: cl-vlan1-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan1-if allocation score on v03-a: 0 +clone_color: cl-vlan1-if allocation score on v03-b: 0 +clone_color: cl-vlan101-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan101-if allocation score on v03-a: 0 +clone_color: cl-vlan101-if allocation score on v03-b: 0 +clone_color: cl-vlan102-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan102-if allocation score on v03-a: 0 +clone_color: cl-vlan102-if allocation score on v03-b: 0 +clone_color: cl-vlan103-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan103-if allocation score on v03-a: 0 +clone_color: cl-vlan103-if allocation score on v03-b: 0 +clone_color: cl-vlan104-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan104-if allocation score on v03-a: 0 +clone_color: cl-vlan104-if allocation score on v03-b: 0 +clone_color: cl-vlan200-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan200-if allocation score on v03-a: 0 +clone_color: cl-vlan200-if allocation score on v03-b: 0 +clone_color: cl-vlan3-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan3-if allocation score on v03-a: 0 +clone_color: cl-vlan3-if allocation score on v03-b: 0 +clone_color: cl-vlan4-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan4-if allocation score on v03-a: 0 +clone_color: cl-vlan4-if allocation score on v03-b: 0 +clone_color: cl-vlan5-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan5-if allocation score on v03-a: 0 +clone_color: cl-vlan5-if allocation score on v03-b: 0 +clone_color: cl-vlan900-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan900-if allocation score on v03-a: 0 +clone_color: cl-vlan900-if allocation score on v03-b: 0 +clone_color: cl-vlan909-if allocation score on mgmt01: -INFINITY +clone_color: cl-vlan909-if allocation score on v03-a: 0 +clone_color: cl-vlan909-if allocation score on v03-b: 0 +clone_color: clvmd:0 allocation score on mgmt01: 1 +clone_color: clvmd:0 allocation score on v03-a: 0 +clone_color: clvmd:0 allocation score on v03-b: 0 +clone_color: clvmd:1 allocation score on mgmt01: 0 +clone_color: clvmd:1 allocation score on v03-a: 0 +clone_color: clvmd:1 allocation score on v03-b: 1 +clone_color: clvmd:2 allocation score on mgmt01: 0 +clone_color: clvmd:2 allocation score on v03-a: 1 +clone_color: clvmd:2 allocation score on v03-b: 0 +clone_color: clvmd:3 allocation score on mgmt01: 0 +clone_color: clvmd:3 allocation score on v03-a: 0 +clone_color: clvmd:3 allocation score on v03-b: 0 +clone_color: clvmd:4 allocation score on mgmt01: 0 +clone_color: clvmd:4 allocation score on v03-a: 0 +clone_color: clvmd:4 allocation score on v03-b: 0 +clone_color: clvmd:5 allocation score on mgmt01: 0 +clone_color: clvmd:5 allocation score on v03-a: 0 +clone_color: clvmd:5 allocation score on v03-b: 0 +clone_color: clvmd:6 allocation score on mgmt01: 0 +clone_color: clvmd:6 allocation score on v03-a: 0 +clone_color: clvmd:6 allocation score on v03-b: 0 +clone_color: clvmd:7 allocation score on mgmt01: 0 +clone_color: clvmd:7 allocation score on v03-a: 0 +clone_color: clvmd:7 allocation score on v03-b: 0 +clone_color: clvmd:8 allocation score on mgmt01: 0 +clone_color: clvmd:8 allocation score on v03-a: 0 +clone_color: clvmd:8 allocation score on v03-b: 0 +clone_color: dlm:0 allocation score on mgmt01: 1 +clone_color: dlm:0 allocation score on v03-a: 0 +clone_color: dlm:0 allocation score on v03-b: 0 +clone_color: dlm:1 allocation score on mgmt01: 0 +clone_color: dlm:1 allocation score on v03-a: 0 +clone_color: dlm:1 allocation score on v03-b: 1 +clone_color: dlm:2 allocation score on mgmt01: 0 +clone_color: dlm:2 allocation score on v03-a: 1 +clone_color: dlm:2 allocation score on v03-b: 0 +clone_color: dlm:3 allocation score on mgmt01: 0 +clone_color: dlm:3 allocation score on v03-a: 0 +clone_color: dlm:3 allocation score on v03-b: 0 +clone_color: dlm:4 allocation score on mgmt01: 0 +clone_color: dlm:4 allocation score on v03-a: 0 +clone_color: dlm:4 allocation score on v03-b: 0 +clone_color: dlm:5 allocation score on mgmt01: 0 +clone_color: dlm:5 allocation score on v03-a: 0 +clone_color: dlm:5 allocation score on v03-b: 0 +clone_color: dlm:6 allocation score on mgmt01: 0 +clone_color: dlm:6 allocation score on v03-a: 0 +clone_color: dlm:6 allocation score on v03-b: 0 +clone_color: dlm:7 allocation score on mgmt01: 0 +clone_color: dlm:7 allocation score on v03-a: 0 +clone_color: dlm:7 allocation score on v03-b: 0 +clone_color: dlm:8 allocation score on mgmt01: 0 +clone_color: dlm:8 allocation score on v03-a: 0 +clone_color: dlm:8 allocation score on v03-b: 0 +clone_color: iscsid:0 allocation score on mgmt01: 1 +clone_color: iscsid:0 allocation score on v03-a: 0 +clone_color: iscsid:0 allocation score on v03-b: 0 +clone_color: iscsid:1 allocation score on mgmt01: 0 +clone_color: iscsid:1 allocation score on v03-a: 0 +clone_color: iscsid:1 allocation score on v03-b: 1 +clone_color: iscsid:2 allocation score on mgmt01: 0 +clone_color: iscsid:2 allocation score on v03-a: 1 +clone_color: iscsid:2 allocation score on v03-b: 0 +clone_color: iscsid:3 allocation score on mgmt01: 0 +clone_color: iscsid:3 allocation score on v03-a: 0 +clone_color: iscsid:3 allocation score on v03-b: 0 +clone_color: iscsid:4 allocation score on mgmt01: 0 +clone_color: iscsid:4 allocation score on v03-a: 0 +clone_color: iscsid:4 allocation score on v03-b: 0 +clone_color: iscsid:5 allocation score on mgmt01: 0 +clone_color: iscsid:5 allocation score on v03-a: 0 +clone_color: iscsid:5 allocation score on v03-b: 0 +clone_color: iscsid:6 allocation score on mgmt01: 0 +clone_color: iscsid:6 allocation score on v03-a: 0 +clone_color: iscsid:6 allocation score on v03-b: 0 +clone_color: iscsid:7 allocation score on mgmt01: 0 +clone_color: iscsid:7 allocation score on v03-a: 0 +clone_color: iscsid:7 allocation score on v03-b: 0 +clone_color: iscsid:8 allocation score on mgmt01: 0 +clone_color: iscsid:8 allocation score on v03-a: 0 +clone_color: iscsid:8 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:0 allocation score on mgmt01: 1 +clone_color: libvirt-images-fs:0 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:0 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:1 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:1 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:1 allocation score on v03-b: 1 +clone_color: libvirt-images-fs:2 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:2 allocation score on v03-a: 1 +clone_color: libvirt-images-fs:2 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:3 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:3 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:3 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:4 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:4 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:4 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:5 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:5 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:5 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:6 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:6 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:6 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:7 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:7 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:7 allocation score on v03-b: 0 +clone_color: libvirt-images-fs:8 allocation score on mgmt01: 0 +clone_color: libvirt-images-fs:8 allocation score on v03-a: 0 +clone_color: libvirt-images-fs:8 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:0 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:0 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:0 allocation score on v03-b: 1 +clone_color: libvirt-images-pool:1 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:1 allocation score on v03-a: 1 +clone_color: libvirt-images-pool:1 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:2 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:2 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:2 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:3 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:3 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:3 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:4 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:4 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:4 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:5 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:5 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:5 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:6 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:6 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:6 allocation score on v03-b: 0 +clone_color: libvirt-images-pool:7 allocation score on mgmt01: -INFINITY +clone_color: libvirt-images-pool:7 allocation score on v03-a: 0 +clone_color: libvirt-images-pool:7 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:0 allocation score on mgmt01: 1 +clone_color: libvirt-install-fs:0 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:0 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:1 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:1 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:1 allocation score on v03-b: 1 +clone_color: libvirt-install-fs:2 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:2 allocation score on v03-a: 1 +clone_color: libvirt-install-fs:2 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:3 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:3 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:3 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:4 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:4 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:4 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:5 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:5 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:5 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:6 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:6 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:6 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:7 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:7 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:7 allocation score on v03-b: 0 +clone_color: libvirt-install-fs:8 allocation score on mgmt01: 0 +clone_color: libvirt-install-fs:8 allocation score on v03-a: 0 +clone_color: libvirt-install-fs:8 allocation score on v03-b: 0 +clone_color: libvirt-qpid:0 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:0 allocation score on v03-a: 0 +clone_color: libvirt-qpid:0 allocation score on v03-b: 1 +clone_color: libvirt-qpid:1 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:1 allocation score on v03-a: 1 +clone_color: libvirt-qpid:1 allocation score on v03-b: 0 +clone_color: libvirt-qpid:2 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:2 allocation score on v03-a: 0 +clone_color: libvirt-qpid:2 allocation score on v03-b: 0 +clone_color: libvirt-qpid:3 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:3 allocation score on v03-a: 0 +clone_color: libvirt-qpid:3 allocation score on v03-b: 0 +clone_color: libvirt-qpid:4 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:4 allocation score on v03-a: 0 +clone_color: libvirt-qpid:4 allocation score on v03-b: 0 +clone_color: libvirt-qpid:5 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:5 allocation score on v03-a: 0 +clone_color: libvirt-qpid:5 allocation score on v03-b: 0 +clone_color: libvirt-qpid:6 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:6 allocation score on v03-a: 0 +clone_color: libvirt-qpid:6 allocation score on v03-b: 0 +clone_color: libvirt-qpid:7 allocation score on mgmt01: -INFINITY +clone_color: libvirt-qpid:7 allocation score on v03-a: 0 +clone_color: libvirt-qpid:7 allocation score on v03-b: 0 +clone_color: libvirtd:0 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:0 allocation score on v03-a: 0 +clone_color: libvirtd:0 allocation score on v03-b: 1 +clone_color: libvirtd:1 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:1 allocation score on v03-a: 1 +clone_color: libvirtd:1 allocation score on v03-b: 0 +clone_color: libvirtd:2 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:2 allocation score on v03-a: 0 +clone_color: libvirtd:2 allocation score on v03-b: 0 +clone_color: libvirtd:3 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:3 allocation score on v03-a: 0 +clone_color: libvirtd:3 allocation score on v03-b: 0 +clone_color: libvirtd:4 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:4 allocation score on v03-a: 0 +clone_color: libvirtd:4 allocation score on v03-b: 0 +clone_color: libvirtd:5 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:5 allocation score on v03-a: 0 +clone_color: libvirtd:5 allocation score on v03-b: 0 +clone_color: libvirtd:6 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:6 allocation score on v03-a: 0 +clone_color: libvirtd:6 allocation score on v03-b: 0 +clone_color: libvirtd:7 allocation score on mgmt01: -INFINITY +clone_color: libvirtd:7 allocation score on v03-a: 0 +clone_color: libvirtd:7 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:0 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:0 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:0 allocation score on v03-b: 1 +clone_color: mcast-anbriz-net:1 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:1 allocation score on v03-a: 1 +clone_color: mcast-anbriz-net:1 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:2 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:2 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:2 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:3 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:3 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:3 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:4 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:4 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:4 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:5 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:5 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:5 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:6 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:6 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:6 allocation score on v03-b: 0 +clone_color: mcast-anbriz-net:7 allocation score on mgmt01: -INFINITY +clone_color: mcast-anbriz-net:7 allocation score on v03-a: 0 +clone_color: mcast-anbriz-net:7 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:0 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:0 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:0 allocation score on v03-b: 1 +clone_color: mcast-gleb-net:1 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:1 allocation score on v03-a: 1 +clone_color: mcast-gleb-net:1 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:2 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:2 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:2 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:3 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:3 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:3 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:4 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:4 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:4 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:5 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:5 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:5 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:6 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:6 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:6 allocation score on v03-b: 0 +clone_color: mcast-gleb-net:7 allocation score on mgmt01: -INFINITY +clone_color: mcast-gleb-net:7 allocation score on v03-a: 0 +clone_color: mcast-gleb-net:7 allocation score on v03-b: 0 +clone_color: mcast-test-net:0 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:0 allocation score on v03-a: 0 +clone_color: mcast-test-net:0 allocation score on v03-b: 1 +clone_color: mcast-test-net:1 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:1 allocation score on v03-a: 1 +clone_color: mcast-test-net:1 allocation score on v03-b: 0 +clone_color: mcast-test-net:2 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:2 allocation score on v03-a: 0 +clone_color: mcast-test-net:2 allocation score on v03-b: 0 +clone_color: mcast-test-net:3 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:3 allocation score on v03-a: 0 +clone_color: mcast-test-net:3 allocation score on v03-b: 0 +clone_color: mcast-test-net:4 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:4 allocation score on v03-a: 0 +clone_color: mcast-test-net:4 allocation score on v03-b: 0 +clone_color: mcast-test-net:5 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:5 allocation score on v03-a: 0 +clone_color: mcast-test-net:5 allocation score on v03-b: 0 +clone_color: mcast-test-net:6 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:6 allocation score on v03-a: 0 +clone_color: mcast-test-net:6 allocation score on v03-b: 0 +clone_color: mcast-test-net:7 allocation score on mgmt01: -INFINITY +clone_color: mcast-test-net:7 allocation score on v03-a: 0 +clone_color: mcast-test-net:7 allocation score on v03-b: 0 +clone_color: multipathd:0 allocation score on mgmt01: 1 +clone_color: multipathd:0 allocation score on v03-a: 0 +clone_color: multipathd:0 allocation score on v03-b: 0 +clone_color: multipathd:1 allocation score on mgmt01: 0 +clone_color: multipathd:1 allocation score on v03-a: 0 +clone_color: multipathd:1 allocation score on v03-b: 1 +clone_color: multipathd:2 allocation score on mgmt01: 0 +clone_color: multipathd:2 allocation score on v03-a: 1 +clone_color: multipathd:2 allocation score on v03-b: 0 +clone_color: multipathd:3 allocation score on mgmt01: 0 +clone_color: multipathd:3 allocation score on v03-a: 0 +clone_color: multipathd:3 allocation score on v03-b: 0 +clone_color: multipathd:4 allocation score on mgmt01: 0 +clone_color: multipathd:4 allocation score on v03-a: 0 +clone_color: multipathd:4 allocation score on v03-b: 0 +clone_color: multipathd:5 allocation score on mgmt01: 0 +clone_color: multipathd:5 allocation score on v03-a: 0 +clone_color: multipathd:5 allocation score on v03-b: 0 +clone_color: multipathd:6 allocation score on mgmt01: 0 +clone_color: multipathd:6 allocation score on v03-a: 0 +clone_color: multipathd:6 allocation score on v03-b: 0 +clone_color: multipathd:7 allocation score on mgmt01: 0 +clone_color: multipathd:7 allocation score on v03-a: 0 +clone_color: multipathd:7 allocation score on v03-b: 0 +clone_color: multipathd:8 allocation score on mgmt01: 0 +clone_color: multipathd:8 allocation score on v03-a: 0 +clone_color: multipathd:8 allocation score on v03-b: 0 +clone_color: node-params:0 allocation score on mgmt01: -INFINITY +clone_color: node-params:0 allocation score on v03-a: 0 +clone_color: node-params:0 allocation score on v03-b: 1 +clone_color: node-params:1 allocation score on mgmt01: -INFINITY +clone_color: node-params:1 allocation score on v03-a: 1 +clone_color: node-params:1 allocation score on v03-b: 0 +clone_color: node-params:2 allocation score on mgmt01: -INFINITY +clone_color: node-params:2 allocation score on v03-a: 0 +clone_color: node-params:2 allocation score on v03-b: 0 +clone_color: node-params:3 allocation score on mgmt01: -INFINITY +clone_color: node-params:3 allocation score on v03-a: 0 +clone_color: node-params:3 allocation score on v03-b: 0 +clone_color: node-params:4 allocation score on mgmt01: -INFINITY +clone_color: node-params:4 allocation score on v03-a: 0 +clone_color: node-params:4 allocation score on v03-b: 0 +clone_color: node-params:5 allocation score on mgmt01: -INFINITY +clone_color: node-params:5 allocation score on v03-a: 0 +clone_color: node-params:5 allocation score on v03-b: 0 +clone_color: node-params:6 allocation score on mgmt01: -INFINITY +clone_color: node-params:6 allocation score on v03-a: 0 +clone_color: node-params:6 allocation score on v03-b: 0 +clone_color: node-params:7 allocation score on mgmt01: -INFINITY +clone_color: node-params:7 allocation score on v03-a: 0 +clone_color: node-params:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-0-iscsi:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-0-iscsi:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-0-iscsi:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-iscsi:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-iscsi:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-iscsi:8 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:0 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:0 allocation score on v03-b: 1 +clone_color: vds-ok-pool-0-pool:1 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:1 allocation score on v03-a: 1 +clone_color: vds-ok-pool-0-pool:1 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:2 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:2 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:3 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:4 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:5 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:6 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-pool:7 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-0-pool:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-pool:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-0-vg:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-0-vg:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-0-vg:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-0-vg:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-0-vg:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-0-vg:8 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-1-iscsi:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-1-iscsi:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-1-iscsi:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-iscsi:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-iscsi:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-iscsi:8 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:0 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:0 allocation score on v03-b: 1 +clone_color: vds-ok-pool-1-pool:1 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:1 allocation score on v03-a: 1 +clone_color: vds-ok-pool-1-pool:1 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:2 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:2 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:3 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:4 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:5 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:6 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-pool:7 allocation score on mgmt01: -INFINITY +clone_color: vds-ok-pool-1-pool:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-pool:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:0 allocation score on mgmt01: 1 +clone_color: vds-ok-pool-1-vg:0 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:0 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:1 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:1 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:1 allocation score on v03-b: 1 +clone_color: vds-ok-pool-1-vg:2 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:2 allocation score on v03-a: 1 +clone_color: vds-ok-pool-1-vg:2 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:3 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:3 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:3 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:4 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:4 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:4 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:5 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:5 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:5 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:6 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:6 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:6 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:7 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:7 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:7 allocation score on v03-b: 0 +clone_color: vds-ok-pool-1-vg:8 allocation score on mgmt01: 0 +clone_color: vds-ok-pool-1-vg:8 allocation score on v03-a: 0 +clone_color: vds-ok-pool-1-vg:8 allocation score on v03-b: 0 +clone_color: vlan1-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:0 allocation score on v03-a: 0 +clone_color: vlan1-if:0 allocation score on v03-b: 1 +clone_color: vlan1-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:1 allocation score on v03-a: 1 +clone_color: vlan1-if:1 allocation score on v03-b: 0 +clone_color: vlan1-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:2 allocation score on v03-a: 0 +clone_color: vlan1-if:2 allocation score on v03-b: 0 +clone_color: vlan1-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:3 allocation score on v03-a: 0 +clone_color: vlan1-if:3 allocation score on v03-b: 0 +clone_color: vlan1-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:4 allocation score on v03-a: 0 +clone_color: vlan1-if:4 allocation score on v03-b: 0 +clone_color: vlan1-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:5 allocation score on v03-a: 0 +clone_color: vlan1-if:5 allocation score on v03-b: 0 +clone_color: vlan1-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:6 allocation score on v03-a: 0 +clone_color: vlan1-if:6 allocation score on v03-b: 0 +clone_color: vlan1-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan1-if:7 allocation score on v03-a: 0 +clone_color: vlan1-if:7 allocation score on v03-b: 0 +clone_color: vlan101-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:0 allocation score on v03-a: 0 +clone_color: vlan101-if:0 allocation score on v03-b: 1 +clone_color: vlan101-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:1 allocation score on v03-a: 1 +clone_color: vlan101-if:1 allocation score on v03-b: 0 +clone_color: vlan101-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:2 allocation score on v03-a: 0 +clone_color: vlan101-if:2 allocation score on v03-b: 0 +clone_color: vlan101-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:3 allocation score on v03-a: 0 +clone_color: vlan101-if:3 allocation score on v03-b: 0 +clone_color: vlan101-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:4 allocation score on v03-a: 0 +clone_color: vlan101-if:4 allocation score on v03-b: 0 +clone_color: vlan101-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:5 allocation score on v03-a: 0 +clone_color: vlan101-if:5 allocation score on v03-b: 0 +clone_color: vlan101-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:6 allocation score on v03-a: 0 +clone_color: vlan101-if:6 allocation score on v03-b: 0 +clone_color: vlan101-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan101-if:7 allocation score on v03-a: 0 +clone_color: vlan101-if:7 allocation score on v03-b: 0 +clone_color: vlan102-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:0 allocation score on v03-a: 0 +clone_color: vlan102-if:0 allocation score on v03-b: 1 +clone_color: vlan102-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:1 allocation score on v03-a: 1 +clone_color: vlan102-if:1 allocation score on v03-b: 0 +clone_color: vlan102-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:2 allocation score on v03-a: 0 +clone_color: vlan102-if:2 allocation score on v03-b: 0 +clone_color: vlan102-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:3 allocation score on v03-a: 0 +clone_color: vlan102-if:3 allocation score on v03-b: 0 +clone_color: vlan102-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:4 allocation score on v03-a: 0 +clone_color: vlan102-if:4 allocation score on v03-b: 0 +clone_color: vlan102-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:5 allocation score on v03-a: 0 +clone_color: vlan102-if:5 allocation score on v03-b: 0 +clone_color: vlan102-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:6 allocation score on v03-a: 0 +clone_color: vlan102-if:6 allocation score on v03-b: 0 +clone_color: vlan102-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan102-if:7 allocation score on v03-a: 0 +clone_color: vlan102-if:7 allocation score on v03-b: 0 +clone_color: vlan103-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:0 allocation score on v03-a: 0 +clone_color: vlan103-if:0 allocation score on v03-b: 1 +clone_color: vlan103-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:1 allocation score on v03-a: 1 +clone_color: vlan103-if:1 allocation score on v03-b: 0 +clone_color: vlan103-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:2 allocation score on v03-a: 0 +clone_color: vlan103-if:2 allocation score on v03-b: 0 +clone_color: vlan103-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:3 allocation score on v03-a: 0 +clone_color: vlan103-if:3 allocation score on v03-b: 0 +clone_color: vlan103-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:4 allocation score on v03-a: 0 +clone_color: vlan103-if:4 allocation score on v03-b: 0 +clone_color: vlan103-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:5 allocation score on v03-a: 0 +clone_color: vlan103-if:5 allocation score on v03-b: 0 +clone_color: vlan103-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:6 allocation score on v03-a: 0 +clone_color: vlan103-if:6 allocation score on v03-b: 0 +clone_color: vlan103-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan103-if:7 allocation score on v03-a: 0 +clone_color: vlan103-if:7 allocation score on v03-b: 0 +clone_color: vlan104-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:0 allocation score on v03-a: 0 +clone_color: vlan104-if:0 allocation score on v03-b: 1 +clone_color: vlan104-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:1 allocation score on v03-a: 1 +clone_color: vlan104-if:1 allocation score on v03-b: 0 +clone_color: vlan104-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:2 allocation score on v03-a: 0 +clone_color: vlan104-if:2 allocation score on v03-b: 0 +clone_color: vlan104-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:3 allocation score on v03-a: 0 +clone_color: vlan104-if:3 allocation score on v03-b: 0 +clone_color: vlan104-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:4 allocation score on v03-a: 0 +clone_color: vlan104-if:4 allocation score on v03-b: 0 +clone_color: vlan104-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:5 allocation score on v03-a: 0 +clone_color: vlan104-if:5 allocation score on v03-b: 0 +clone_color: vlan104-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:6 allocation score on v03-a: 0 +clone_color: vlan104-if:6 allocation score on v03-b: 0 +clone_color: vlan104-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan104-if:7 allocation score on v03-a: 0 +clone_color: vlan104-if:7 allocation score on v03-b: 0 +clone_color: vlan200-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:0 allocation score on v03-a: 0 +clone_color: vlan200-if:0 allocation score on v03-b: 1 +clone_color: vlan200-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:1 allocation score on v03-a: 1 +clone_color: vlan200-if:1 allocation score on v03-b: 0 +clone_color: vlan200-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:2 allocation score on v03-a: 0 +clone_color: vlan200-if:2 allocation score on v03-b: 0 +clone_color: vlan200-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:3 allocation score on v03-a: 0 +clone_color: vlan200-if:3 allocation score on v03-b: 0 +clone_color: vlan200-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:4 allocation score on v03-a: 0 +clone_color: vlan200-if:4 allocation score on v03-b: 0 +clone_color: vlan200-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:5 allocation score on v03-a: 0 +clone_color: vlan200-if:5 allocation score on v03-b: 0 +clone_color: vlan200-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:6 allocation score on v03-a: 0 +clone_color: vlan200-if:6 allocation score on v03-b: 0 +clone_color: vlan200-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan200-if:7 allocation score on v03-a: 0 +clone_color: vlan200-if:7 allocation score on v03-b: 0 +clone_color: vlan3-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:0 allocation score on v03-a: 0 +clone_color: vlan3-if:0 allocation score on v03-b: 1 +clone_color: vlan3-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:1 allocation score on v03-a: 1 +clone_color: vlan3-if:1 allocation score on v03-b: 0 +clone_color: vlan3-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:2 allocation score on v03-a: 0 +clone_color: vlan3-if:2 allocation score on v03-b: 0 +clone_color: vlan3-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:3 allocation score on v03-a: 0 +clone_color: vlan3-if:3 allocation score on v03-b: 0 +clone_color: vlan3-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:4 allocation score on v03-a: 0 +clone_color: vlan3-if:4 allocation score on v03-b: 0 +clone_color: vlan3-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:5 allocation score on v03-a: 0 +clone_color: vlan3-if:5 allocation score on v03-b: 0 +clone_color: vlan3-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:6 allocation score on v03-a: 0 +clone_color: vlan3-if:6 allocation score on v03-b: 0 +clone_color: vlan3-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan3-if:7 allocation score on v03-a: 0 +clone_color: vlan3-if:7 allocation score on v03-b: 0 +clone_color: vlan4-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:0 allocation score on v03-a: 0 +clone_color: vlan4-if:0 allocation score on v03-b: 1 +clone_color: vlan4-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:1 allocation score on v03-a: 1 +clone_color: vlan4-if:1 allocation score on v03-b: 0 +clone_color: vlan4-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:2 allocation score on v03-a: 0 +clone_color: vlan4-if:2 allocation score on v03-b: 0 +clone_color: vlan4-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:3 allocation score on v03-a: 0 +clone_color: vlan4-if:3 allocation score on v03-b: 0 +clone_color: vlan4-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:4 allocation score on v03-a: 0 +clone_color: vlan4-if:4 allocation score on v03-b: 0 +clone_color: vlan4-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:5 allocation score on v03-a: 0 +clone_color: vlan4-if:5 allocation score on v03-b: 0 +clone_color: vlan4-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:6 allocation score on v03-a: 0 +clone_color: vlan4-if:6 allocation score on v03-b: 0 +clone_color: vlan4-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan4-if:7 allocation score on v03-a: 0 +clone_color: vlan4-if:7 allocation score on v03-b: 0 +clone_color: vlan5-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:0 allocation score on v03-a: 0 +clone_color: vlan5-if:0 allocation score on v03-b: 1 +clone_color: vlan5-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:1 allocation score on v03-a: 1 +clone_color: vlan5-if:1 allocation score on v03-b: 0 +clone_color: vlan5-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:2 allocation score on v03-a: 0 +clone_color: vlan5-if:2 allocation score on v03-b: 0 +clone_color: vlan5-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:3 allocation score on v03-a: 0 +clone_color: vlan5-if:3 allocation score on v03-b: 0 +clone_color: vlan5-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:4 allocation score on v03-a: 0 +clone_color: vlan5-if:4 allocation score on v03-b: 0 +clone_color: vlan5-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:5 allocation score on v03-a: 0 +clone_color: vlan5-if:5 allocation score on v03-b: 0 +clone_color: vlan5-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:6 allocation score on v03-a: 0 +clone_color: vlan5-if:6 allocation score on v03-b: 0 +clone_color: vlan5-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan5-if:7 allocation score on v03-a: 0 +clone_color: vlan5-if:7 allocation score on v03-b: 0 +clone_color: vlan900-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:0 allocation score on v03-a: 0 +clone_color: vlan900-if:0 allocation score on v03-b: 1 +clone_color: vlan900-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:1 allocation score on v03-a: 1 +clone_color: vlan900-if:1 allocation score on v03-b: 0 +clone_color: vlan900-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:2 allocation score on v03-a: 0 +clone_color: vlan900-if:2 allocation score on v03-b: 0 +clone_color: vlan900-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:3 allocation score on v03-a: 0 +clone_color: vlan900-if:3 allocation score on v03-b: 0 +clone_color: vlan900-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:4 allocation score on v03-a: 0 +clone_color: vlan900-if:4 allocation score on v03-b: 0 +clone_color: vlan900-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:5 allocation score on v03-a: 0 +clone_color: vlan900-if:5 allocation score on v03-b: 0 +clone_color: vlan900-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:6 allocation score on v03-a: 0 +clone_color: vlan900-if:6 allocation score on v03-b: 0 +clone_color: vlan900-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan900-if:7 allocation score on v03-a: 0 +clone_color: vlan900-if:7 allocation score on v03-b: 0 +clone_color: vlan909-if:0 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:0 allocation score on v03-a: 0 +clone_color: vlan909-if:0 allocation score on v03-b: 1 +clone_color: vlan909-if:1 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:1 allocation score on v03-a: 1 +clone_color: vlan909-if:1 allocation score on v03-b: 0 +clone_color: vlan909-if:2 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:2 allocation score on v03-a: 0 +clone_color: vlan909-if:2 allocation score on v03-b: 0 +clone_color: vlan909-if:3 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:3 allocation score on v03-a: 0 +clone_color: vlan909-if:3 allocation score on v03-b: 0 +clone_color: vlan909-if:4 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:4 allocation score on v03-a: 0 +clone_color: vlan909-if:4 allocation score on v03-b: 0 +clone_color: vlan909-if:5 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:5 allocation score on v03-a: 0 +clone_color: vlan909-if:5 allocation score on v03-b: 0 +clone_color: vlan909-if:6 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:6 allocation score on v03-a: 0 +clone_color: vlan909-if:6 allocation score on v03-b: 0 +clone_color: vlan909-if:7 allocation score on mgmt01: -INFINITY +clone_color: vlan909-if:7 allocation score on v03-a: 0 +clone_color: vlan909-if:7 allocation score on v03-b: 0 +native_color: c5-x64-devel.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: c5-x64-devel.vds-ok.com-vm allocation score on v03-a: 0 +native_color: c5-x64-devel.vds-ok.com-vm allocation score on v03-b: 0 +native_color: c6-x64-devel.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: c6-x64-devel.vds-ok.com-vm allocation score on v03-a: 0 +native_color: c6-x64-devel.vds-ok.com-vm allocation score on v03-b: 0 +native_color: cloudsrv.credo-dialogue.com-vm allocation score on mgmt01: -INFINITY +native_color: cloudsrv.credo-dialogue.com-vm allocation score on v03-a: 0 +native_color: cloudsrv.credo-dialogue.com-vm allocation score on v03-b: 0 +native_color: clvmd:0 allocation score on mgmt01: 1 +native_color: clvmd:0 allocation score on v03-a: -INFINITY +native_color: clvmd:0 allocation score on v03-b: -INFINITY +native_color: clvmd:1 allocation score on mgmt01: -INFINITY +native_color: clvmd:1 allocation score on v03-a: -INFINITY +native_color: clvmd:1 allocation score on v03-b: 1 +native_color: clvmd:2 allocation score on mgmt01: -INFINITY +native_color: clvmd:2 allocation score on v03-a: 1 +native_color: clvmd:2 allocation score on v03-b: -INFINITY +native_color: clvmd:3 allocation score on mgmt01: -INFINITY +native_color: clvmd:3 allocation score on v03-a: -INFINITY +native_color: clvmd:3 allocation score on v03-b: -INFINITY +native_color: clvmd:4 allocation score on mgmt01: -INFINITY +native_color: clvmd:4 allocation score on v03-a: -INFINITY +native_color: clvmd:4 allocation score on v03-b: -INFINITY +native_color: clvmd:5 allocation score on mgmt01: -INFINITY +native_color: clvmd:5 allocation score on v03-a: -INFINITY +native_color: clvmd:5 allocation score on v03-b: -INFINITY +native_color: clvmd:6 allocation score on mgmt01: -INFINITY +native_color: clvmd:6 allocation score on v03-a: -INFINITY +native_color: clvmd:6 allocation score on v03-b: -INFINITY +native_color: clvmd:7 allocation score on mgmt01: -INFINITY +native_color: clvmd:7 allocation score on v03-a: -INFINITY +native_color: clvmd:7 allocation score on v03-b: -INFINITY +native_color: clvmd:8 allocation score on mgmt01: -INFINITY +native_color: clvmd:8 allocation score on v03-a: -INFINITY +native_color: clvmd:8 allocation score on v03-b: -INFINITY +native_color: dist.express-consult.org-vm allocation score on mgmt01: -INFINITY +native_color: dist.express-consult.org-vm allocation score on v03-a: -INFINITY +native_color: dist.express-consult.org-vm allocation score on v03-b: -INFINITY +native_color: dist.fly-uni.org-vm allocation score on mgmt01: -INFINITY +native_color: dist.fly-uni.org-vm allocation score on v03-a: -INFINITY +native_color: dist.fly-uni.org-vm allocation score on v03-b: -INFINITY +native_color: dlm:0 allocation score on mgmt01: 1 +native_color: dlm:0 allocation score on v03-a: -INFINITY +native_color: dlm:0 allocation score on v03-b: -INFINITY +native_color: dlm:1 allocation score on mgmt01: 0 +native_color: dlm:1 allocation score on v03-a: -INFINITY +native_color: dlm:1 allocation score on v03-b: 1 +native_color: dlm:2 allocation score on mgmt01: 0 +native_color: dlm:2 allocation score on v03-a: 1 +native_color: dlm:2 allocation score on v03-b: 0 +native_color: dlm:3 allocation score on mgmt01: -INFINITY +native_color: dlm:3 allocation score on v03-a: -INFINITY +native_color: dlm:3 allocation score on v03-b: -INFINITY +native_color: dlm:4 allocation score on mgmt01: -INFINITY +native_color: dlm:4 allocation score on v03-a: -INFINITY +native_color: dlm:4 allocation score on v03-b: -INFINITY +native_color: dlm:5 allocation score on mgmt01: -INFINITY +native_color: dlm:5 allocation score on v03-a: -INFINITY +native_color: dlm:5 allocation score on v03-b: -INFINITY +native_color: dlm:6 allocation score on mgmt01: -INFINITY +native_color: dlm:6 allocation score on v03-a: -INFINITY +native_color: dlm:6 allocation score on v03-b: -INFINITY +native_color: dlm:7 allocation score on mgmt01: -INFINITY +native_color: dlm:7 allocation score on v03-a: -INFINITY +native_color: dlm:7 allocation score on v03-b: -INFINITY +native_color: dlm:8 allocation score on mgmt01: -INFINITY +native_color: dlm:8 allocation score on v03-a: -INFINITY +native_color: dlm:8 allocation score on v03-b: -INFINITY +native_color: eu1.ca-pages.com-vm allocation score on mgmt01: -INFINITY +native_color: eu1.ca-pages.com-vm allocation score on v03-a: -INFINITY +native_color: eu1.ca-pages.com-vm allocation score on v03-b: -INFINITY +native_color: eu2.ca-pages.com-vm allocation score on mgmt01: -INFINITY +native_color: eu2.ca-pages.com-vm allocation score on v03-a: -INFINITY +native_color: eu2.ca-pages.com-vm allocation score on v03-b: -INFINITY +native_color: f13-x64-devel.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: f13-x64-devel.vds-ok.com-vm allocation score on v03-a: 0 +native_color: f13-x64-devel.vds-ok.com-vm allocation score on v03-b: 0 +native_color: git.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: git.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: git.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: gotin-bbb-vm allocation score on mgmt01: -INFINITY +native_color: gotin-bbb-vm allocation score on v03-a: -INFINITY +native_color: gotin-bbb-vm allocation score on v03-b: -INFINITY +native_color: gw.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: gw.anbriz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: gw.anbriz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: gw.gleb.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: gw.gleb.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: gw.gleb.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: gw.gotin.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: gw.gotin.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: gw.gotin.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: iscsid:0 allocation score on mgmt01: 1 +native_color: iscsid:0 allocation score on v03-a: -INFINITY +native_color: iscsid:0 allocation score on v03-b: -INFINITY +native_color: iscsid:1 allocation score on mgmt01: 0 +native_color: iscsid:1 allocation score on v03-a: -INFINITY +native_color: iscsid:1 allocation score on v03-b: 1 +native_color: iscsid:2 allocation score on mgmt01: 0 +native_color: iscsid:2 allocation score on v03-a: 1 +native_color: iscsid:2 allocation score on v03-b: 0 +native_color: iscsid:3 allocation score on mgmt01: -INFINITY +native_color: iscsid:3 allocation score on v03-a: -INFINITY +native_color: iscsid:3 allocation score on v03-b: -INFINITY +native_color: iscsid:4 allocation score on mgmt01: -INFINITY +native_color: iscsid:4 allocation score on v03-a: -INFINITY +native_color: iscsid:4 allocation score on v03-b: -INFINITY +native_color: iscsid:5 allocation score on mgmt01: -INFINITY +native_color: iscsid:5 allocation score on v03-a: -INFINITY +native_color: iscsid:5 allocation score on v03-b: -INFINITY +native_color: iscsid:6 allocation score on mgmt01: -INFINITY +native_color: iscsid:6 allocation score on v03-a: -INFINITY +native_color: iscsid:6 allocation score on v03-b: -INFINITY +native_color: iscsid:7 allocation score on mgmt01: -INFINITY +native_color: iscsid:7 allocation score on v03-a: -INFINITY +native_color: iscsid:7 allocation score on v03-b: -INFINITY +native_color: iscsid:8 allocation score on mgmt01: -INFINITY +native_color: iscsid:8 allocation score on v03-a: -INFINITY +native_color: iscsid:8 allocation score on v03-b: -INFINITY +native_color: ktstudio.net-vm allocation score on mgmt01: -INFINITY +native_color: ktstudio.net-vm allocation score on v03-a: 0 +native_color: ktstudio.net-vm allocation score on v03-b: 0 +native_color: lenny-x32-devel-vm allocation score on mgmt01: -INFINITY +native_color: lenny-x32-devel-vm allocation score on v03-a: 0 +native_color: lenny-x32-devel-vm allocation score on v03-b: 0 +native_color: libvirt-images-fs:0 allocation score on mgmt01: 1 +native_color: libvirt-images-fs:0 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:0 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:1 allocation score on mgmt01: 0 +native_color: libvirt-images-fs:1 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:1 allocation score on v03-b: 1 +native_color: libvirt-images-fs:2 allocation score on mgmt01: 0 +native_color: libvirt-images-fs:2 allocation score on v03-a: 1 +native_color: libvirt-images-fs:2 allocation score on v03-b: 0 +native_color: libvirt-images-fs:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:3 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:3 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:4 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:4 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:5 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:5 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:6 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:6 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:7 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:7 allocation score on v03-b: -INFINITY +native_color: libvirt-images-fs:8 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-fs:8 allocation score on v03-a: -INFINITY +native_color: libvirt-images-fs:8 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:0 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:0 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:0 allocation score on v03-b: 1 +native_color: libvirt-images-pool:1 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:1 allocation score on v03-a: 1 +native_color: libvirt-images-pool:1 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:2 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:2 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:2 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:3 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:3 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:4 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:4 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:5 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:5 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:6 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:6 allocation score on v03-b: -INFINITY +native_color: libvirt-images-pool:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-images-pool:7 allocation score on v03-a: -INFINITY +native_color: libvirt-images-pool:7 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:0 allocation score on mgmt01: 1 +native_color: libvirt-install-fs:0 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:0 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:1 allocation score on mgmt01: 0 +native_color: libvirt-install-fs:1 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:1 allocation score on v03-b: 1 +native_color: libvirt-install-fs:2 allocation score on mgmt01: 0 +native_color: libvirt-install-fs:2 allocation score on v03-a: 1 +native_color: libvirt-install-fs:2 allocation score on v03-b: 0 +native_color: libvirt-install-fs:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:3 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:3 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:4 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:4 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:5 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:5 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:6 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:6 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:7 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:7 allocation score on v03-b: -INFINITY +native_color: libvirt-install-fs:8 allocation score on mgmt01: -INFINITY +native_color: libvirt-install-fs:8 allocation score on v03-a: -INFINITY +native_color: libvirt-install-fs:8 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:0 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:0 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:0 allocation score on v03-b: 1 +native_color: libvirt-qpid:1 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:1 allocation score on v03-a: 1 +native_color: libvirt-qpid:1 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:2 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:2 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:2 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:3 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:3 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:3 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:4 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:4 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:4 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:5 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:5 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:5 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:6 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:6 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:6 allocation score on v03-b: -INFINITY +native_color: libvirt-qpid:7 allocation score on mgmt01: -INFINITY +native_color: libvirt-qpid:7 allocation score on v03-a: -INFINITY +native_color: libvirt-qpid:7 allocation score on v03-b: -INFINITY +native_color: libvirtd:0 allocation score on mgmt01: -INFINITY +native_color: libvirtd:0 allocation score on v03-a: -INFINITY +native_color: libvirtd:0 allocation score on v03-b: 1 +native_color: libvirtd:1 allocation score on mgmt01: -INFINITY +native_color: libvirtd:1 allocation score on v03-a: 1 +native_color: libvirtd:1 allocation score on v03-b: -INFINITY +native_color: libvirtd:2 allocation score on mgmt01: -INFINITY +native_color: libvirtd:2 allocation score on v03-a: -INFINITY +native_color: libvirtd:2 allocation score on v03-b: -INFINITY +native_color: libvirtd:3 allocation score on mgmt01: -INFINITY +native_color: libvirtd:3 allocation score on v03-a: -INFINITY +native_color: libvirtd:3 allocation score on v03-b: -INFINITY +native_color: libvirtd:4 allocation score on mgmt01: -INFINITY +native_color: libvirtd:4 allocation score on v03-a: -INFINITY +native_color: libvirtd:4 allocation score on v03-b: -INFINITY +native_color: libvirtd:5 allocation score on mgmt01: -INFINITY +native_color: libvirtd:5 allocation score on v03-a: -INFINITY +native_color: libvirtd:5 allocation score on v03-b: -INFINITY +native_color: libvirtd:6 allocation score on mgmt01: -INFINITY +native_color: libvirtd:6 allocation score on v03-a: -INFINITY +native_color: libvirtd:6 allocation score on v03-b: -INFINITY +native_color: libvirtd:7 allocation score on mgmt01: -INFINITY +native_color: libvirtd:7 allocation score on v03-a: -INFINITY +native_color: libvirtd:7 allocation score on v03-b: -INFINITY +native_color: license.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: license.anbriz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: license.anbriz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre01-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre01-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre01-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre01-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre01-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre01-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre02-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre02-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre02-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre02-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre02-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre02-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre03-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre03-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre03-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre03-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre03-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre03-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre04-left.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre04-left.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre04-left.vds-ok.com-vm allocation score on v03-b: 0 +native_color: lustre04-right.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: lustre04-right.vds-ok.com-vm allocation score on v03-a: 0 +native_color: lustre04-right.vds-ok.com-vm allocation score on v03-b: 0 +native_color: maxb-c55-vm allocation score on mgmt01: -INFINITY +native_color: maxb-c55-vm allocation score on v03-a: -INFINITY +native_color: maxb-c55-vm allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:0 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:0 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:0 allocation score on v03-b: 1 +native_color: mcast-anbriz-net:1 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:1 allocation score on v03-a: 1 +native_color: mcast-anbriz-net:1 allocation score on v03-b: 0 +native_color: mcast-anbriz-net:2 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:2 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:2 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:3 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:3 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:3 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:4 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:4 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:4 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:5 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:5 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:5 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:6 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:6 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:6 allocation score on v03-b: -INFINITY +native_color: mcast-anbriz-net:7 allocation score on mgmt01: -INFINITY +native_color: mcast-anbriz-net:7 allocation score on v03-a: -INFINITY +native_color: mcast-anbriz-net:7 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:0 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:0 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:0 allocation score on v03-b: 1 +native_color: mcast-gleb-net:1 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:1 allocation score on v03-a: 1 +native_color: mcast-gleb-net:1 allocation score on v03-b: 0 +native_color: mcast-gleb-net:2 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:2 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:2 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:3 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:3 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:3 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:4 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:4 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:4 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:5 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:5 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:5 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:6 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:6 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:6 allocation score on v03-b: -INFINITY +native_color: mcast-gleb-net:7 allocation score on mgmt01: -INFINITY +native_color: mcast-gleb-net:7 allocation score on v03-a: -INFINITY +native_color: mcast-gleb-net:7 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:0 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:0 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:0 allocation score on v03-b: 1 +native_color: mcast-test-net:1 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:1 allocation score on v03-a: 1 +native_color: mcast-test-net:1 allocation score on v03-b: 0 +native_color: mcast-test-net:2 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:2 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:2 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:3 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:3 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:3 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:4 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:4 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:4 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:5 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:5 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:5 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:6 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:6 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:6 allocation score on v03-b: -INFINITY +native_color: mcast-test-net:7 allocation score on mgmt01: -INFINITY +native_color: mcast-test-net:7 allocation score on v03-a: -INFINITY +native_color: mcast-test-net:7 allocation score on v03-b: -INFINITY +native_color: metae.ru-vm allocation score on mgmt01: -INFINITY +native_color: metae.ru-vm allocation score on v03-a: -INFINITY +native_color: metae.ru-vm allocation score on v03-b: -INFINITY +native_color: multipathd:0 allocation score on mgmt01: 1 +native_color: multipathd:0 allocation score on v03-a: -INFINITY +native_color: multipathd:0 allocation score on v03-b: -INFINITY +native_color: multipathd:1 allocation score on mgmt01: 0 +native_color: multipathd:1 allocation score on v03-a: -INFINITY +native_color: multipathd:1 allocation score on v03-b: 1 +native_color: multipathd:2 allocation score on mgmt01: 0 +native_color: multipathd:2 allocation score on v03-a: 1 +native_color: multipathd:2 allocation score on v03-b: 0 +native_color: multipathd:3 allocation score on mgmt01: -INFINITY +native_color: multipathd:3 allocation score on v03-a: -INFINITY +native_color: multipathd:3 allocation score on v03-b: -INFINITY +native_color: multipathd:4 allocation score on mgmt01: -INFINITY +native_color: multipathd:4 allocation score on v03-a: -INFINITY +native_color: multipathd:4 allocation score on v03-b: -INFINITY +native_color: multipathd:5 allocation score on mgmt01: -INFINITY +native_color: multipathd:5 allocation score on v03-a: -INFINITY +native_color: multipathd:5 allocation score on v03-b: -INFINITY +native_color: multipathd:6 allocation score on mgmt01: -INFINITY +native_color: multipathd:6 allocation score on v03-a: -INFINITY +native_color: multipathd:6 allocation score on v03-b: -INFINITY +native_color: multipathd:7 allocation score on mgmt01: -INFINITY +native_color: multipathd:7 allocation score on v03-a: -INFINITY +native_color: multipathd:7 allocation score on v03-b: -INFINITY +native_color: multipathd:8 allocation score on mgmt01: -INFINITY +native_color: multipathd:8 allocation score on v03-a: -INFINITY +native_color: multipathd:8 allocation score on v03-b: -INFINITY +native_color: node-params:0 allocation score on mgmt01: -INFINITY +native_color: node-params:0 allocation score on v03-a: -INFINITY +native_color: node-params:0 allocation score on v03-b: 1 +native_color: node-params:1 allocation score on mgmt01: -INFINITY +native_color: node-params:1 allocation score on v03-a: 1 +native_color: node-params:1 allocation score on v03-b: 0 +native_color: node-params:2 allocation score on mgmt01: -INFINITY +native_color: node-params:2 allocation score on v03-a: -INFINITY +native_color: node-params:2 allocation score on v03-b: -INFINITY +native_color: node-params:3 allocation score on mgmt01: -INFINITY +native_color: node-params:3 allocation score on v03-a: -INFINITY +native_color: node-params:3 allocation score on v03-b: -INFINITY +native_color: node-params:4 allocation score on mgmt01: -INFINITY +native_color: node-params:4 allocation score on v03-a: -INFINITY +native_color: node-params:4 allocation score on v03-b: -INFINITY +native_color: node-params:5 allocation score on mgmt01: -INFINITY +native_color: node-params:5 allocation score on v03-a: -INFINITY +native_color: node-params:5 allocation score on v03-b: -INFINITY +native_color: node-params:6 allocation score on mgmt01: -INFINITY +native_color: node-params:6 allocation score on v03-a: -INFINITY +native_color: node-params:6 allocation score on v03-b: -INFINITY +native_color: node-params:7 allocation score on mgmt01: -INFINITY +native_color: node-params:7 allocation score on v03-a: -INFINITY +native_color: node-params:7 allocation score on v03-b: -INFINITY +native_color: rodovoepomestie.ru-vm allocation score on mgmt01: -INFINITY +native_color: rodovoepomestie.ru-vm allocation score on v03-a: -INFINITY +native_color: rodovoepomestie.ru-vm allocation score on v03-b: -INFINITY +native_color: stonith-mgmt01 allocation score on mgmt01: -INFINITY +native_color: stonith-mgmt01 allocation score on v03-a: 0 +native_color: stonith-mgmt01 allocation score on v03-b: 0 +native_color: stonith-mgmt02 allocation score on mgmt01: 0 +native_color: stonith-mgmt02 allocation score on v03-a: 0 +native_color: stonith-mgmt02 allocation score on v03-b: 0 +native_color: stonith-v02-a allocation score on mgmt01: -INFINITY +native_color: stonith-v02-a allocation score on v03-a: -INFINITY +native_color: stonith-v02-a allocation score on v03-b: -INFINITY +native_color: stonith-v02-b allocation score on mgmt01: -INFINITY +native_color: stonith-v02-b allocation score on v03-a: -INFINITY +native_color: stonith-v02-b allocation score on v03-b: -INFINITY +native_color: stonith-v02-c allocation score on mgmt01: -INFINITY +native_color: stonith-v02-c allocation score on v03-a: -INFINITY +native_color: stonith-v02-c allocation score on v03-b: -INFINITY +native_color: stonith-v02-d allocation score on mgmt01: -INFINITY +native_color: stonith-v02-d allocation score on v03-a: -INFINITY +native_color: stonith-v02-d allocation score on v03-b: -INFINITY +native_color: stonith-v03-a allocation score on mgmt01: 0 +native_color: stonith-v03-a allocation score on v03-a: -INFINITY +native_color: stonith-v03-a allocation score on v03-b: 0 +native_color: stonith-v03-b allocation score on mgmt01: 0 +native_color: stonith-v03-b allocation score on v03-a: 0 +native_color: stonith-v03-b allocation score on v03-b: -INFINITY +native_color: stonith-v03-c allocation score on mgmt01: -INFINITY +native_color: stonith-v03-c allocation score on v03-a: -INFINITY +native_color: stonith-v03-c allocation score on v03-b: -INFINITY +native_color: stonith-v03-d allocation score on mgmt01: -INFINITY +native_color: stonith-v03-d allocation score on v03-a: -INFINITY +native_color: stonith-v03-d allocation score on v03-b: -INFINITY +native_color: terminal.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: terminal.anbriz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: terminal.anbriz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: terminal0.anbriz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: terminal0.anbriz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: terminal0.anbriz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: test-01.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: test-01.vds-ok.com-vm allocation score on v03-a: 0 +native_color: test-01.vds-ok.com-vm allocation score on v03-b: 0 +native_color: ubuntu9.10-gotin-vm allocation score on mgmt01: -INFINITY +native_color: ubuntu9.10-gotin-vm allocation score on v03-a: -INFINITY +native_color: ubuntu9.10-gotin-vm allocation score on v03-b: -INFINITY +native_color: vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: 0 +native_color: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: 0 +native_color: vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on mgmt01: -INFINITY +native_color: vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-a: -INFINITY +native_color: vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-0-iscsi:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-0-iscsi:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-0-iscsi:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-iscsi:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-iscsi:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-iscsi:8 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:0 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:0 allocation score on v03-b: 1 +native_color: vds-ok-pool-0-pool:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:1 allocation score on v03-a: 1 +native_color: vds-ok-pool-0-pool:1 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:2 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-pool:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-pool:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-pool:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-0-vg:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-0-vg:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-0-vg:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-0-vg:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-0-vg:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-0-vg:8 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-1-iscsi:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-1-iscsi:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-1-iscsi:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-iscsi:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-iscsi:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-iscsi:8 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:0 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:0 allocation score on v03-b: 1 +native_color: vds-ok-pool-1-pool:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:1 allocation score on v03-a: 1 +native_color: vds-ok-pool-1-pool:1 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:2 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-pool:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-pool:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-pool:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:0 allocation score on mgmt01: 1 +native_color: vds-ok-pool-1-vg:0 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:0 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:1 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:1 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:1 allocation score on v03-b: 1 +native_color: vds-ok-pool-1-vg:2 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:2 allocation score on v03-a: 1 +native_color: vds-ok-pool-1-vg:2 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:3 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:3 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:3 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:4 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:4 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:4 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:5 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:5 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:5 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:6 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:6 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:6 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:7 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:7 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:7 allocation score on v03-b: -INFINITY +native_color: vds-ok-pool-1-vg:8 allocation score on mgmt01: -INFINITY +native_color: vds-ok-pool-1-vg:8 allocation score on v03-a: -INFINITY +native_color: vds-ok-pool-1-vg:8 allocation score on v03-b: -INFINITY +native_color: vlan1-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:0 allocation score on v03-a: -INFINITY +native_color: vlan1-if:0 allocation score on v03-b: 1 +native_color: vlan1-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:1 allocation score on v03-a: 1 +native_color: vlan1-if:1 allocation score on v03-b: 0 +native_color: vlan1-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:2 allocation score on v03-a: -INFINITY +native_color: vlan1-if:2 allocation score on v03-b: -INFINITY +native_color: vlan1-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:3 allocation score on v03-a: -INFINITY +native_color: vlan1-if:3 allocation score on v03-b: -INFINITY +native_color: vlan1-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:4 allocation score on v03-a: -INFINITY +native_color: vlan1-if:4 allocation score on v03-b: -INFINITY +native_color: vlan1-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:5 allocation score on v03-a: -INFINITY +native_color: vlan1-if:5 allocation score on v03-b: -INFINITY +native_color: vlan1-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:6 allocation score on v03-a: -INFINITY +native_color: vlan1-if:6 allocation score on v03-b: -INFINITY +native_color: vlan1-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan1-if:7 allocation score on v03-a: -INFINITY +native_color: vlan1-if:7 allocation score on v03-b: -INFINITY +native_color: vlan101-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:0 allocation score on v03-a: -INFINITY +native_color: vlan101-if:0 allocation score on v03-b: 1 +native_color: vlan101-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:1 allocation score on v03-a: 1 +native_color: vlan101-if:1 allocation score on v03-b: 0 +native_color: vlan101-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:2 allocation score on v03-a: -INFINITY +native_color: vlan101-if:2 allocation score on v03-b: -INFINITY +native_color: vlan101-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:3 allocation score on v03-a: -INFINITY +native_color: vlan101-if:3 allocation score on v03-b: -INFINITY +native_color: vlan101-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:4 allocation score on v03-a: -INFINITY +native_color: vlan101-if:4 allocation score on v03-b: -INFINITY +native_color: vlan101-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:5 allocation score on v03-a: -INFINITY +native_color: vlan101-if:5 allocation score on v03-b: -INFINITY +native_color: vlan101-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:6 allocation score on v03-a: -INFINITY +native_color: vlan101-if:6 allocation score on v03-b: -INFINITY +native_color: vlan101-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan101-if:7 allocation score on v03-a: -INFINITY +native_color: vlan101-if:7 allocation score on v03-b: -INFINITY +native_color: vlan102-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:0 allocation score on v03-a: -INFINITY +native_color: vlan102-if:0 allocation score on v03-b: 1 +native_color: vlan102-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:1 allocation score on v03-a: 1 +native_color: vlan102-if:1 allocation score on v03-b: 0 +native_color: vlan102-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:2 allocation score on v03-a: -INFINITY +native_color: vlan102-if:2 allocation score on v03-b: -INFINITY +native_color: vlan102-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:3 allocation score on v03-a: -INFINITY +native_color: vlan102-if:3 allocation score on v03-b: -INFINITY +native_color: vlan102-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:4 allocation score on v03-a: -INFINITY +native_color: vlan102-if:4 allocation score on v03-b: -INFINITY +native_color: vlan102-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:5 allocation score on v03-a: -INFINITY +native_color: vlan102-if:5 allocation score on v03-b: -INFINITY +native_color: vlan102-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:6 allocation score on v03-a: -INFINITY +native_color: vlan102-if:6 allocation score on v03-b: -INFINITY +native_color: vlan102-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan102-if:7 allocation score on v03-a: -INFINITY +native_color: vlan102-if:7 allocation score on v03-b: -INFINITY +native_color: vlan103-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:0 allocation score on v03-a: -INFINITY +native_color: vlan103-if:0 allocation score on v03-b: 1 +native_color: vlan103-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:1 allocation score on v03-a: 1 +native_color: vlan103-if:1 allocation score on v03-b: 0 +native_color: vlan103-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:2 allocation score on v03-a: -INFINITY +native_color: vlan103-if:2 allocation score on v03-b: -INFINITY +native_color: vlan103-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:3 allocation score on v03-a: -INFINITY +native_color: vlan103-if:3 allocation score on v03-b: -INFINITY +native_color: vlan103-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:4 allocation score on v03-a: -INFINITY +native_color: vlan103-if:4 allocation score on v03-b: -INFINITY +native_color: vlan103-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:5 allocation score on v03-a: -INFINITY +native_color: vlan103-if:5 allocation score on v03-b: -INFINITY +native_color: vlan103-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:6 allocation score on v03-a: -INFINITY +native_color: vlan103-if:6 allocation score on v03-b: -INFINITY +native_color: vlan103-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan103-if:7 allocation score on v03-a: -INFINITY +native_color: vlan103-if:7 allocation score on v03-b: -INFINITY +native_color: vlan104-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:0 allocation score on v03-a: -INFINITY +native_color: vlan104-if:0 allocation score on v03-b: 1 +native_color: vlan104-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:1 allocation score on v03-a: 1 +native_color: vlan104-if:1 allocation score on v03-b: 0 +native_color: vlan104-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:2 allocation score on v03-a: -INFINITY +native_color: vlan104-if:2 allocation score on v03-b: -INFINITY +native_color: vlan104-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:3 allocation score on v03-a: -INFINITY +native_color: vlan104-if:3 allocation score on v03-b: -INFINITY +native_color: vlan104-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:4 allocation score on v03-a: -INFINITY +native_color: vlan104-if:4 allocation score on v03-b: -INFINITY +native_color: vlan104-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:5 allocation score on v03-a: -INFINITY +native_color: vlan104-if:5 allocation score on v03-b: -INFINITY +native_color: vlan104-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:6 allocation score on v03-a: -INFINITY +native_color: vlan104-if:6 allocation score on v03-b: -INFINITY +native_color: vlan104-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan104-if:7 allocation score on v03-a: -INFINITY +native_color: vlan104-if:7 allocation score on v03-b: -INFINITY +native_color: vlan200-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:0 allocation score on v03-a: -INFINITY +native_color: vlan200-if:0 allocation score on v03-b: 1 +native_color: vlan200-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:1 allocation score on v03-a: 1 +native_color: vlan200-if:1 allocation score on v03-b: 0 +native_color: vlan200-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:2 allocation score on v03-a: -INFINITY +native_color: vlan200-if:2 allocation score on v03-b: -INFINITY +native_color: vlan200-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:3 allocation score on v03-a: -INFINITY +native_color: vlan200-if:3 allocation score on v03-b: -INFINITY +native_color: vlan200-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:4 allocation score on v03-a: -INFINITY +native_color: vlan200-if:4 allocation score on v03-b: -INFINITY +native_color: vlan200-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:5 allocation score on v03-a: -INFINITY +native_color: vlan200-if:5 allocation score on v03-b: -INFINITY +native_color: vlan200-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:6 allocation score on v03-a: -INFINITY +native_color: vlan200-if:6 allocation score on v03-b: -INFINITY +native_color: vlan200-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan200-if:7 allocation score on v03-a: -INFINITY +native_color: vlan200-if:7 allocation score on v03-b: -INFINITY +native_color: vlan3-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:0 allocation score on v03-a: -INFINITY +native_color: vlan3-if:0 allocation score on v03-b: 1 +native_color: vlan3-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:1 allocation score on v03-a: 1 +native_color: vlan3-if:1 allocation score on v03-b: 0 +native_color: vlan3-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:2 allocation score on v03-a: -INFINITY +native_color: vlan3-if:2 allocation score on v03-b: -INFINITY +native_color: vlan3-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:3 allocation score on v03-a: -INFINITY +native_color: vlan3-if:3 allocation score on v03-b: -INFINITY +native_color: vlan3-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:4 allocation score on v03-a: -INFINITY +native_color: vlan3-if:4 allocation score on v03-b: -INFINITY +native_color: vlan3-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:5 allocation score on v03-a: -INFINITY +native_color: vlan3-if:5 allocation score on v03-b: -INFINITY +native_color: vlan3-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:6 allocation score on v03-a: -INFINITY +native_color: vlan3-if:6 allocation score on v03-b: -INFINITY +native_color: vlan3-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan3-if:7 allocation score on v03-a: -INFINITY +native_color: vlan3-if:7 allocation score on v03-b: -INFINITY +native_color: vlan4-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:0 allocation score on v03-a: -INFINITY +native_color: vlan4-if:0 allocation score on v03-b: 1 +native_color: vlan4-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:1 allocation score on v03-a: 1 +native_color: vlan4-if:1 allocation score on v03-b: 0 +native_color: vlan4-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:2 allocation score on v03-a: -INFINITY +native_color: vlan4-if:2 allocation score on v03-b: -INFINITY +native_color: vlan4-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:3 allocation score on v03-a: -INFINITY +native_color: vlan4-if:3 allocation score on v03-b: -INFINITY +native_color: vlan4-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:4 allocation score on v03-a: -INFINITY +native_color: vlan4-if:4 allocation score on v03-b: -INFINITY +native_color: vlan4-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:5 allocation score on v03-a: -INFINITY +native_color: vlan4-if:5 allocation score on v03-b: -INFINITY +native_color: vlan4-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:6 allocation score on v03-a: -INFINITY +native_color: vlan4-if:6 allocation score on v03-b: -INFINITY +native_color: vlan4-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan4-if:7 allocation score on v03-a: -INFINITY +native_color: vlan4-if:7 allocation score on v03-b: -INFINITY +native_color: vlan5-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:0 allocation score on v03-a: -INFINITY +native_color: vlan5-if:0 allocation score on v03-b: 1 +native_color: vlan5-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:1 allocation score on v03-a: 1 +native_color: vlan5-if:1 allocation score on v03-b: 0 +native_color: vlan5-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:2 allocation score on v03-a: -INFINITY +native_color: vlan5-if:2 allocation score on v03-b: -INFINITY +native_color: vlan5-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:3 allocation score on v03-a: -INFINITY +native_color: vlan5-if:3 allocation score on v03-b: -INFINITY +native_color: vlan5-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:4 allocation score on v03-a: -INFINITY +native_color: vlan5-if:4 allocation score on v03-b: -INFINITY +native_color: vlan5-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:5 allocation score on v03-a: -INFINITY +native_color: vlan5-if:5 allocation score on v03-b: -INFINITY +native_color: vlan5-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:6 allocation score on v03-a: -INFINITY +native_color: vlan5-if:6 allocation score on v03-b: -INFINITY +native_color: vlan5-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan5-if:7 allocation score on v03-a: -INFINITY +native_color: vlan5-if:7 allocation score on v03-b: -INFINITY +native_color: vlan900-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:0 allocation score on v03-a: -INFINITY +native_color: vlan900-if:0 allocation score on v03-b: 1 +native_color: vlan900-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:1 allocation score on v03-a: 1 +native_color: vlan900-if:1 allocation score on v03-b: 0 +native_color: vlan900-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:2 allocation score on v03-a: -INFINITY +native_color: vlan900-if:2 allocation score on v03-b: -INFINITY +native_color: vlan900-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:3 allocation score on v03-a: -INFINITY +native_color: vlan900-if:3 allocation score on v03-b: -INFINITY +native_color: vlan900-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:4 allocation score on v03-a: -INFINITY +native_color: vlan900-if:4 allocation score on v03-b: -INFINITY +native_color: vlan900-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:5 allocation score on v03-a: -INFINITY +native_color: vlan900-if:5 allocation score on v03-b: -INFINITY +native_color: vlan900-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:6 allocation score on v03-a: -INFINITY +native_color: vlan900-if:6 allocation score on v03-b: -INFINITY +native_color: vlan900-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan900-if:7 allocation score on v03-a: -INFINITY +native_color: vlan900-if:7 allocation score on v03-b: -INFINITY +native_color: vlan909-if:0 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:0 allocation score on v03-a: -INFINITY +native_color: vlan909-if:0 allocation score on v03-b: 1 +native_color: vlan909-if:1 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:1 allocation score on v03-a: 1 +native_color: vlan909-if:1 allocation score on v03-b: 0 +native_color: vlan909-if:2 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:2 allocation score on v03-a: -INFINITY +native_color: vlan909-if:2 allocation score on v03-b: -INFINITY +native_color: vlan909-if:3 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:3 allocation score on v03-a: -INFINITY +native_color: vlan909-if:3 allocation score on v03-b: -INFINITY +native_color: vlan909-if:4 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:4 allocation score on v03-a: -INFINITY +native_color: vlan909-if:4 allocation score on v03-b: -INFINITY +native_color: vlan909-if:5 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:5 allocation score on v03-a: -INFINITY +native_color: vlan909-if:5 allocation score on v03-b: -INFINITY +native_color: vlan909-if:6 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:6 allocation score on v03-a: -INFINITY +native_color: vlan909-if:6 allocation score on v03-b: -INFINITY +native_color: vlan909-if:7 allocation score on mgmt01: -INFINITY +native_color: vlan909-if:7 allocation score on v03-a: -INFINITY +native_color: vlan909-if:7 allocation score on v03-b: -INFINITY +native_color: zakaz.transferrus.ru-vm allocation score on mgmt01: -INFINITY +native_color: zakaz.transferrus.ru-vm allocation score on v03-a: -INFINITY +native_color: zakaz.transferrus.ru-vm allocation score on v03-b: -INFINITY diff --git a/pengine/test10/load-stopped-loop.summary b/pengine/test10/load-stopped-loop.summary new file mode 100644 index 0000000000..c14e05ddce --- /dev/null +++ b/pengine/test10/load-stopped-loop.summary @@ -0,0 +1,354 @@ + +Current cluster status: +Online: [ mgmt01 v03-a v03-b ] + + stonith-v02-a (stonith:fence_ipmilan): Stopped + stonith-v02-b (stonith:fence_ipmilan): Stopped + stonith-v02-c (stonith:fence_ipmilan): Stopped + stonith-v02-d (stonith:fence_ipmilan): Stopped + stonith-mgmt01 (stonith:fence_xvm): Started v03-b + stonith-mgmt02 (stonith:meatware): Started mgmt01 + stonith-v03-c (stonith:fence_ipmilan): Stopped + stonith-v03-a (stonith:fence_ipmilan): Started v03-b + stonith-v03-b (stonith:fence_ipmilan): Started v03-a + stonith-v03-d (stonith:fence_ipmilan): Stopped + Clone Set: cl-clvmd [clvmd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ clvmd:3 clvmd:4 clvmd:5 clvmd:6 clvmd:7 clvmd:8 ] + Clone Set: cl-dlm [dlm] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ dlm:3 dlm:4 dlm:5 dlm:6 dlm:7 dlm:8 ] + Clone Set: cl-iscsid [iscsid] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ iscsid:3 iscsid:4 iscsid:5 iscsid:6 iscsid:7 iscsid:8 ] + Clone Set: cl-libvirtd [libvirtd] + Started: [ v03-a v03-b ] + Stopped: [ libvirtd:2 libvirtd:3 libvirtd:4 libvirtd:5 libvirtd:6 libvirtd:7 ] + Clone Set: cl-multipathd [multipathd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ multipathd:3 multipathd:4 multipathd:5 multipathd:6 multipathd:7 multipathd:8 ] + Clone Set: cl-node-params [node-params] + Started: [ v03-a v03-b ] + Stopped: [ node-params:2 node-params:3 node-params:4 node-params:5 node-params:6 node-params:7 ] + Clone Set: cl-vlan1-if [vlan1-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan1-if:2 vlan1-if:3 vlan1-if:4 vlan1-if:5 vlan1-if:6 vlan1-if:7 ] + Clone Set: cl-vlan101-if [vlan101-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan101-if:2 vlan101-if:3 vlan101-if:4 vlan101-if:5 vlan101-if:6 vlan101-if:7 ] + Clone Set: cl-vlan102-if [vlan102-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan102-if:2 vlan102-if:3 vlan102-if:4 vlan102-if:5 vlan102-if:6 vlan102-if:7 ] + Clone Set: cl-vlan103-if [vlan103-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan103-if:2 vlan103-if:3 vlan103-if:4 vlan103-if:5 vlan103-if:6 vlan103-if:7 ] + Clone Set: cl-vlan104-if [vlan104-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan104-if:2 vlan104-if:3 vlan104-if:4 vlan104-if:5 vlan104-if:6 vlan104-if:7 ] + Clone Set: cl-vlan3-if [vlan3-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan3-if:2 vlan3-if:3 vlan3-if:4 vlan3-if:5 vlan3-if:6 vlan3-if:7 ] + Clone Set: cl-vlan4-if [vlan4-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan4-if:2 vlan4-if:3 vlan4-if:4 vlan4-if:5 vlan4-if:6 vlan4-if:7 ] + Clone Set: cl-vlan5-if [vlan5-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan5-if:2 vlan5-if:3 vlan5-if:4 vlan5-if:5 vlan5-if:6 vlan5-if:7 ] + Clone Set: cl-vlan900-if [vlan900-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan900-if:2 vlan900-if:3 vlan900-if:4 vlan900-if:5 vlan900-if:6 vlan900-if:7 ] + Clone Set: cl-vlan909-if [vlan909-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan909-if:2 vlan909-if:3 vlan909-if:4 vlan909-if:5 vlan909-if:6 vlan909-if:7 ] + Clone Set: cl-libvirt-images-fs [libvirt-images-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-images-fs:3 libvirt-images-fs:4 libvirt-images-fs:5 libvirt-images-fs:6 libvirt-images-fs:7 libvirt-images-fs:8 ] + Clone Set: cl-libvirt-install-fs [libvirt-install-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-install-fs:3 libvirt-install-fs:4 libvirt-install-fs:5 libvirt-install-fs:6 libvirt-install-fs:7 libvirt-install-fs:8 ] + Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-iscsi:3 vds-ok-pool-0-iscsi:4 vds-ok-pool-0-iscsi:5 vds-ok-pool-0-iscsi:6 vds-ok-pool-0-iscsi:7 vds-ok-pool-0-iscsi:8 ] + Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-vg:3 vds-ok-pool-0-vg:4 vds-ok-pool-0-vg:5 vds-ok-pool-0-vg:6 vds-ok-pool-0-vg:7 vds-ok-pool-0-vg:8 ] + Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-iscsi:3 vds-ok-pool-1-iscsi:4 vds-ok-pool-1-iscsi:5 vds-ok-pool-1-iscsi:6 vds-ok-pool-1-iscsi:7 vds-ok-pool-1-iscsi:8 ] + Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-vg:3 vds-ok-pool-1-vg:4 vds-ok-pool-1-vg:5 vds-ok-pool-1-vg:6 vds-ok-pool-1-vg:7 vds-ok-pool-1-vg:8 ] + Clone Set: cl-libvirt-images-pool [libvirt-images-pool] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-images-pool:2 libvirt-images-pool:3 libvirt-images-pool:4 libvirt-images-pool:5 libvirt-images-pool:6 libvirt-images-pool:7 ] + Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-0-pool:2 vds-ok-pool-0-pool:3 vds-ok-pool-0-pool:4 vds-ok-pool-0-pool:5 vds-ok-pool-0-pool:6 vds-ok-pool-0-pool:7 ] + Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-1-pool:2 vds-ok-pool-1-pool:3 vds-ok-pool-1-pool:4 vds-ok-pool-1-pool:5 vds-ok-pool-1-pool:6 vds-ok-pool-1-pool:7 ] + git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + Clone Set: cl-vlan200-if [vlan200-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan200-if:2 vlan200-if:3 vlan200-if:4 vlan200-if:5 vlan200-if:6 vlan200-if:7 ] + lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a + dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped + eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped + maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped + metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped + c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + Clone Set: cl-mcast-test-net [mcast-test-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-test-net:2 mcast-test-net:3 mcast-test-net:4 mcast-test-net:5 mcast-test-net:6 mcast-test-net:7 ] + dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped + ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a + cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-anbriz-net:2 mcast-anbriz-net:3 mcast-anbriz-net:4 mcast-anbriz-net:5 mcast-anbriz-net:6 mcast-anbriz-net:7 ] + gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-libvirt-qpid [libvirt-qpid] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-qpid:2 libvirt-qpid:3 libvirt-qpid:4 libvirt-qpid:5 libvirt-qpid:6 libvirt-qpid:7 ] + gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-mcast-gleb-net [mcast-gleb-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-gleb-net:2 mcast-gleb-net:3 mcast-gleb-net:4 mcast-gleb-net:5 mcast-gleb-net:6 mcast-gleb-net:7 ] + +Transition Summary: + * Reload vds-ok-pool-0-iscsi:0 (Started mgmt01) + * Reload vds-ok-pool-0-iscsi:1 (Started v03-b) + * Reload vds-ok-pool-0-iscsi:2 (Started v03-a) + * Reload vds-ok-pool-1-iscsi:0 (Started mgmt01) + * Reload vds-ok-pool-1-iscsi:1 (Started v03-b) + * Reload vds-ok-pool-1-iscsi:2 (Started v03-a) + * Restart stonith-v03-b (Started v03-a) + * Restart stonith-v03-a (Started v03-b) + * Migrate license.anbriz.vds-ok.com-vm (Started v03-b -> v03-a) + * Migrate terminal0.anbriz.vds-ok.com-vm (Started v03-a -> v03-b) + * Start vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (v03-a) + +Executing cluster transition: + * Resource action: vds-ok-pool-0-iscsi:1 reload on mgmt01 + * Resource action: vds-ok-pool-0-iscsi:1 monitor=30000 on mgmt01 + * Resource action: vds-ok-pool-0-iscsi:0 reload on v03-b + * Resource action: vds-ok-pool-0-iscsi:0 monitor=30000 on v03-b + * Resource action: vds-ok-pool-0-iscsi:2 reload on v03-a + * Resource action: vds-ok-pool-0-iscsi:2 monitor=30000 on v03-a + * Resource action: vds-ok-pool-1-iscsi:1 reload on mgmt01 + * Resource action: vds-ok-pool-1-iscsi:1 monitor=30000 on mgmt01 + * Resource action: vds-ok-pool-1-iscsi:0 reload on v03-b + * Resource action: vds-ok-pool-1-iscsi:0 monitor=30000 on v03-b + * Resource action: vds-ok-pool-1-iscsi:2 reload on v03-a + * Resource action: vds-ok-pool-1-iscsi:2 monitor=30000 on v03-a + * Resource action: stonith-v03-b stop on v03-a + * Resource action: stonith-v03-b start on v03-a + * Resource action: stonith-v03-b monitor=60000 on v03-a + * Resource action: stonith-v03-a stop on v03-b + * Resource action: stonith-v03-a start on v03-b + * Resource action: stonith-v03-a monitor=60000 on v03-b + * Resource action: license.anbriz.vds-ok.com-vm migrate_to on v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a + * Pseudo action: load_stopped_mgmt01 + * Resource action: license.anbriz.vds-ok.com-vm migrate_from on v03-a + * Resource action: license.anbriz.vds-ok.com-vm stop on v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_from on v03-b + * Resource action: terminal0.anbriz.vds-ok.com-vm stop on v03-a + * Pseudo action: load_stopped_v03-b + * Pseudo action: load_stopped_v03-a + * Pseudo action: all_stopped + * Pseudo action: license.anbriz.vds-ok.com-vm_start_0 + * Pseudo action: terminal0.anbriz.vds-ok.com-vm_start_0 + * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm start on v03-a + * Resource action: license.anbriz.vds-ok.com-vm monitor=10000 on v03-a + * Resource action: terminal0.anbriz.vds-ok.com-vm monitor=10000 on v03-b + * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-a + +Revised cluster status: +Online: [ mgmt01 v03-a v03-b ] + + stonith-v02-a (stonith:fence_ipmilan): Stopped + stonith-v02-b (stonith:fence_ipmilan): Stopped + stonith-v02-c (stonith:fence_ipmilan): Stopped + stonith-v02-d (stonith:fence_ipmilan): Stopped + stonith-mgmt01 (stonith:fence_xvm): Started v03-b + stonith-mgmt02 (stonith:meatware): Started mgmt01 + stonith-v03-c (stonith:fence_ipmilan): Stopped + stonith-v03-a (stonith:fence_ipmilan): Started v03-b + stonith-v03-b (stonith:fence_ipmilan): Started v03-a + stonith-v03-d (stonith:fence_ipmilan): Stopped + Clone Set: cl-clvmd [clvmd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ clvmd:3 clvmd:4 clvmd:5 clvmd:6 clvmd:7 clvmd:8 ] + Clone Set: cl-dlm [dlm] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ dlm:3 dlm:4 dlm:5 dlm:6 dlm:7 dlm:8 ] + Clone Set: cl-iscsid [iscsid] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ iscsid:3 iscsid:4 iscsid:5 iscsid:6 iscsid:7 iscsid:8 ] + Clone Set: cl-libvirtd [libvirtd] + Started: [ v03-a v03-b ] + Stopped: [ libvirtd:2 libvirtd:3 libvirtd:4 libvirtd:5 libvirtd:6 libvirtd:7 ] + Clone Set: cl-multipathd [multipathd] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ multipathd:3 multipathd:4 multipathd:5 multipathd:6 multipathd:7 multipathd:8 ] + Clone Set: cl-node-params [node-params] + Started: [ v03-a v03-b ] + Stopped: [ node-params:2 node-params:3 node-params:4 node-params:5 node-params:6 node-params:7 ] + Clone Set: cl-vlan1-if [vlan1-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan1-if:2 vlan1-if:3 vlan1-if:4 vlan1-if:5 vlan1-if:6 vlan1-if:7 ] + Clone Set: cl-vlan101-if [vlan101-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan101-if:2 vlan101-if:3 vlan101-if:4 vlan101-if:5 vlan101-if:6 vlan101-if:7 ] + Clone Set: cl-vlan102-if [vlan102-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan102-if:2 vlan102-if:3 vlan102-if:4 vlan102-if:5 vlan102-if:6 vlan102-if:7 ] + Clone Set: cl-vlan103-if [vlan103-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan103-if:2 vlan103-if:3 vlan103-if:4 vlan103-if:5 vlan103-if:6 vlan103-if:7 ] + Clone Set: cl-vlan104-if [vlan104-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan104-if:2 vlan104-if:3 vlan104-if:4 vlan104-if:5 vlan104-if:6 vlan104-if:7 ] + Clone Set: cl-vlan3-if [vlan3-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan3-if:2 vlan3-if:3 vlan3-if:4 vlan3-if:5 vlan3-if:6 vlan3-if:7 ] + Clone Set: cl-vlan4-if [vlan4-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan4-if:2 vlan4-if:3 vlan4-if:4 vlan4-if:5 vlan4-if:6 vlan4-if:7 ] + Clone Set: cl-vlan5-if [vlan5-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan5-if:2 vlan5-if:3 vlan5-if:4 vlan5-if:5 vlan5-if:6 vlan5-if:7 ] + Clone Set: cl-vlan900-if [vlan900-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan900-if:2 vlan900-if:3 vlan900-if:4 vlan900-if:5 vlan900-if:6 vlan900-if:7 ] + Clone Set: cl-vlan909-if [vlan909-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan909-if:2 vlan909-if:3 vlan909-if:4 vlan909-if:5 vlan909-if:6 vlan909-if:7 ] + Clone Set: cl-libvirt-images-fs [libvirt-images-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-images-fs:3 libvirt-images-fs:4 libvirt-images-fs:5 libvirt-images-fs:6 libvirt-images-fs:7 libvirt-images-fs:8 ] + Clone Set: cl-libvirt-install-fs [libvirt-install-fs] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ libvirt-install-fs:3 libvirt-install-fs:4 libvirt-install-fs:5 libvirt-install-fs:6 libvirt-install-fs:7 libvirt-install-fs:8 ] + Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-iscsi:3 vds-ok-pool-0-iscsi:4 vds-ok-pool-0-iscsi:5 vds-ok-pool-0-iscsi:6 vds-ok-pool-0-iscsi:7 vds-ok-pool-0-iscsi:8 ] + Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-0-vg:3 vds-ok-pool-0-vg:4 vds-ok-pool-0-vg:5 vds-ok-pool-0-vg:6 vds-ok-pool-0-vg:7 vds-ok-pool-0-vg:8 ] + Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-iscsi:3 vds-ok-pool-1-iscsi:4 vds-ok-pool-1-iscsi:5 vds-ok-pool-1-iscsi:6 vds-ok-pool-1-iscsi:7 vds-ok-pool-1-iscsi:8 ] + Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg] + Started: [ mgmt01 v03-a v03-b ] + Stopped: [ vds-ok-pool-1-vg:3 vds-ok-pool-1-vg:4 vds-ok-pool-1-vg:5 vds-ok-pool-1-vg:6 vds-ok-pool-1-vg:7 vds-ok-pool-1-vg:8 ] + Clone Set: cl-libvirt-images-pool [libvirt-images-pool] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-images-pool:2 libvirt-images-pool:3 libvirt-images-pool:4 libvirt-images-pool:5 libvirt-images-pool:6 libvirt-images-pool:7 ] + Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-0-pool:2 vds-ok-pool-0-pool:3 vds-ok-pool-0-pool:4 vds-ok-pool-0-pool:5 vds-ok-pool-0-pool:6 vds-ok-pool-0-pool:7 ] + Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool] + Started: [ v03-a v03-b ] + Stopped: [ vds-ok-pool-1-pool:2 vds-ok-pool-1-pool:3 vds-ok-pool-1-pool:4 vds-ok-pool-1-pool:5 vds-ok-pool-1-pool:6 vds-ok-pool-1-pool:7 ] + git.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + f13-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + eu2.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + zakaz.transferrus.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + Clone Set: cl-vlan200-if [vlan200-if] + Started: [ v03-a v03-b ] + Stopped: [ vlan200-if:2 vlan200-if:3 vlan200-if:4 vlan200-if:5 vlan200-if:6 vlan200-if:7 ] + lenny-x32-devel-vm (ocf::vds-ok:VirtualDomain): Started v03-a + dist.express-consult.org-vm (ocf::vds-ok:VirtualDomain): Stopped + eu1.ca-pages.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gotin-bbb-vm (ocf::vds-ok:VirtualDomain): Stopped + maxb-c55-vm (ocf::vds-ok:VirtualDomain): Stopped + metae.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + rodovoepomestie.ru-vm (ocf::vds-ok:VirtualDomain): Stopped + ubuntu9.10-gotin-vm (ocf::vds-ok:VirtualDomain): Stopped + c5-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + Clone Set: cl-mcast-test-net [mcast-test-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-test-net:2 mcast-test-net:3 mcast-test-net:4 mcast-test-net:5 mcast-test-net:6 mcast-test-net:7 ] + dist.fly-uni.org-vm (ocf::vds-ok:VirtualDomain): Stopped + ktstudio.net-vm (ocf::vds-ok:VirtualDomain): Started v03-a + cloudsrv.credo-dialogue.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + c6-x64-devel.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre01-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre02-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre03-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre03-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre04-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + lustre04-right.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-anbriz-net:2 mcast-anbriz-net:3 mcast-anbriz-net:4 mcast-anbriz-net:5 mcast-anbriz-net:6 mcast-anbriz-net:7 ] + gw.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + license.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + terminal.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + lustre01-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + lustre02-left.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + test-01.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-a + Clone Set: cl-libvirt-qpid [libvirt-qpid] + Started: [ v03-a v03-b ] + Stopped: [ libvirt-qpid:2 libvirt-qpid:3 libvirt-qpid:4 libvirt-qpid:5 libvirt-qpid:6 libvirt-qpid:7 ] + gw.gleb.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + gw.gotin.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Stopped + terminal0.anbriz.vds-ok.com-vm (ocf::vds-ok:VirtualDomain): Started v03-b + Clone Set: cl-mcast-gleb-net [mcast-gleb-net] + Started: [ v03-a v03-b ] + Stopped: [ mcast-gleb-net:2 mcast-gleb-net:3 mcast-gleb-net:4 mcast-gleb-net:5 mcast-gleb-net:6 mcast-gleb-net:7 ] + diff --git a/pengine/test10/load-stopped-loop.xml b/pengine/test10/load-stopped-loop.xml new file mode 100644 index 0000000000..31bd172f88 --- /dev/null +++ b/pengine/test10/load-stopped-loop.xml @@ -0,0 +1,3959 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pengine/test10/migrate-begin.dot b/pengine/test10/migrate-begin.dot index 36c3acd0b8..b7424a8e5d 100644 --- a/pengine/test10/migrate-begin.dot +++ b/pengine/test10/migrate-begin.dot @@ -1,16 +1,15 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] -"load_stopped_hex-13 hex-13" -> "test-vm_migrate_to_0 hex-14" [ style = bold] "load_stopped_hex-13 hex-13" -> "test-vm_start_0 hex-13" [ style = bold] "load_stopped_hex-13 hex-13" [ style=bold color="green" fontcolor="orange" ] "load_stopped_hex-14 hex-14" [ style=bold color="green" fontcolor="orange" ] "test-vm_migrate_from_0 hex-13" -> "test-vm_stop_0 hex-14" [ style = bold] "test-vm_migrate_from_0 hex-13" [ style=bold color="green" fontcolor="black" ] "test-vm_migrate_to_0 hex-14" -> "test-vm_migrate_from_0 hex-13" [ style = bold] "test-vm_migrate_to_0 hex-14" [ style=bold color="green" fontcolor="black" ] "test-vm_start_0 hex-13" [ style=bold color="green" fontcolor="orange" ] "test-vm_stop_0 hex-14" -> "all_stopped" [ style = bold] "test-vm_stop_0 hex-14" -> "load_stopped_hex-14 hex-14" [ style = bold] "test-vm_stop_0 hex-14" -> "test-vm_start_0 hex-13" [ style = bold] "test-vm_stop_0 hex-14" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/migrate-begin.exp b/pengine/test10/migrate-begin.exp index fa817f3353..a19bb7b09a 100644 --- a/pengine/test10/migrate-begin.exp +++ b/pengine/test10/migrate-begin.exp @@ -1,89 +1,85 @@ - - - - - + diff --git a/pengine/test10/migrate-begin.summary b/pengine/test10/migrate-begin.summary index b4b58703f1..3dea03a7fb 100644 --- a/pengine/test10/migrate-begin.summary +++ b/pengine/test10/migrate-begin.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-14 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] Transition Summary: * Migrate test-vm (Started hex-14 -> hex-13) Executing cluster transition: - * Pseudo action: load_stopped_hex-13 * Resource action: test-vm migrate_to on hex-14 + * Pseudo action: load_stopped_hex-13 * Resource action: test-vm migrate_from on hex-13 * Resource action: test-vm stop on hex-14 * Pseudo action: load_stopped_hex-14 * Pseudo action: all_stopped * Pseudo action: test-vm_start_0 Revised cluster status: Online: [ hex-13 hex-14 ] test-vm (ocf::heartbeat:Xen): Started hex-13 Clone Set: c-clusterfs [dlm] Started: [ hex-13 hex-14 ] diff --git a/pengine/test10/utilization-order3.dot b/pengine/test10/utilization-order3.dot index 9d9c4d2646..84659beccf 100644 --- a/pengine/test10/utilization-order3.dot +++ b/pengine/test10/utilization-order3.dot @@ -1,20 +1,19 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "load_stopped_node1 node1" -> "rsc2_start_0 node1" [ style = bold] "load_stopped_node1 node1" [ style=bold color="green" fontcolor="orange" ] -"load_stopped_node2 node2" -> "rsc1_migrate_to_0 node1" [ style = bold] "load_stopped_node2 node2" -> "rsc1_start_0 node2" [ style = bold] "load_stopped_node2 node2" [ style=bold color="green" fontcolor="orange" ] "probe_complete node1" [ style=bold color="green" fontcolor="black" ] "probe_complete node2" [ style=bold color="green" fontcolor="black" ] "rsc1_migrate_from_0 node2" -> "rsc1_stop_0 node1" [ style = bold] "rsc1_migrate_from_0 node2" [ style=bold color="green" fontcolor="black" ] "rsc1_migrate_to_0 node1" -> "rsc1_migrate_from_0 node2" [ style = bold] "rsc1_migrate_to_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc1_start_0 node2" [ style=bold color="green" fontcolor="orange" ] "rsc1_stop_0 node1" -> "all_stopped" [ style = bold] "rsc1_stop_0 node1" -> "load_stopped_node1 node1" [ style = bold] "rsc1_stop_0 node1" -> "rsc1_start_0 node2" [ style = bold] "rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc2_start_0 node1" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/utilization-order3.exp b/pengine/test10/utilization-order3.exp index d8e0dfc057..2975e70050 100644 --- a/pengine/test10/utilization-order3.exp +++ b/pengine/test10/utilization-order3.exp @@ -1,118 +1,114 @@ - - - - - + diff --git a/pengine/test10/utilization-order3.summary b/pengine/test10/utilization-order3.summary index cfeea48924..23411ee578 100644 --- a/pengine/test10/utilization-order3.summary +++ b/pengine/test10/utilization-order3.summary @@ -1,27 +1,27 @@ Current cluster status: Online: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Stopped rsc1 (ocf::pacemaker:Dummy): Started node1 Transition Summary: * Start rsc2 (node1) * Migrate rsc1 (Started node1 -> node2) Executing cluster transition: - * Pseudo action: load_stopped_node2 * Resource action: rsc1 migrate_to on node1 + * Pseudo action: load_stopped_node2 * Resource action: rsc1 migrate_from on node2 * Resource action: rsc1 stop on node1 * Pseudo action: load_stopped_node1 * Pseudo action: all_stopped * Resource action: rsc2 start on node1 * Pseudo action: rsc1_start_0 Revised cluster status: Online: [ node1 node2 ] rsc2 (ocf::pacemaker:Dummy): Started node1 rsc1 (ocf::pacemaker:Dummy): Started node2 diff --git a/pengine/test10/utilization-order4.dot b/pengine/test10/utilization-order4.dot index 8f4ee501e5..7439bd9846 100644 --- a/pengine/test10/utilization-order4.dot +++ b/pengine/test10/utilization-order4.dot @@ -1,54 +1,53 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "clone-nfs_stop_0" -> "clone-nfs_stopped_0" [ style = bold] "clone-nfs_stop_0" -> "grp-nfs:1_stop_0" [ style = bold] "clone-nfs_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-nfs_stopped_0" -> "clone-ping_stop_0" [ style = bold] "clone-nfs_stopped_0" [ style=bold color="green" fontcolor="orange" ] "clone-ping_stop_0" -> "clone-ping_stopped_0" [ style = bold] "clone-ping_stop_0" -> "prim-ping:0_stop_0 deglxen002" [ style = bold] "clone-ping_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-ping_stopped_0" [ style=bold color="green" fontcolor="orange" ] "degllx61-vm_stop_0 deglxen001" -> "all_stopped" [ style = bold] "degllx61-vm_stop_0 deglxen001" -> "clone-nfs_stop_0" [ style = bold] "degllx61-vm_stop_0 deglxen001" -> "load_stopped_deglxen001 deglxen001" [ style = bold] "degllx61-vm_stop_0 deglxen001" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_migrate_from_0 deglxen001" -> "degllx62-vm_stop_0 deglxen002" [ style = bold] "degllx62-vm_migrate_from_0 deglxen001" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_migrate_to_0 deglxen002" -> "degllx62-vm_migrate_from_0 deglxen001" [ style = bold] "degllx62-vm_migrate_to_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_monitor_30000 deglxen001" [ style=bold color="green" fontcolor="black" ] "degllx62-vm_start_0 deglxen001" -> "degllx62-vm_monitor_30000 deglxen001" [ style = bold] "degllx62-vm_start_0 deglxen001" [ style=bold color="green" fontcolor="orange" ] "degllx62-vm_stop_0 deglxen002" -> "all_stopped" [ style = bold] "degllx62-vm_stop_0 deglxen002" -> "clone-nfs_stop_0" [ style = bold] "degllx62-vm_stop_0 deglxen002" -> "degllx62-vm_start_0 deglxen001" [ style = bold] "degllx62-vm_stop_0 deglxen002" -> "load_stopped_deglxen002 deglxen002" [ style = bold] "degllx62-vm_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "grp-nfs:1_stop_0" -> "grp-nfs:1_stopped_0" [ style = bold] "grp-nfs:1_stop_0" -> "nfs-xen_config:1_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stop_0" -> "nfs-xen_images:1_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stop_0" -> "nfs-xen_swapfiles:1_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stop_0" [ style=bold color="green" fontcolor="orange" ] "grp-nfs:1_stopped_0" -> "clone-nfs_stopped_0" [ style = bold] "grp-nfs:1_stopped_0" -> "prim-ping:0_stop_0 deglxen002" [ style = bold] "grp-nfs:1_stopped_0" [ style=bold color="green" fontcolor="orange" ] -"load_stopped_deglxen001 deglxen001" -> "degllx62-vm_migrate_to_0 deglxen002" [ style = bold] "load_stopped_deglxen001 deglxen001" -> "degllx62-vm_start_0 deglxen001" [ style = bold] "load_stopped_deglxen001 deglxen001" [ style=bold color="green" fontcolor="orange" ] "load_stopped_deglxen002 deglxen002" [ style=bold color="green" fontcolor="orange" ] "nfs-xen_config:1_stop_0 deglxen002" -> "all_stopped" [ style = bold] "nfs-xen_config:1_stop_0 deglxen002" -> "grp-nfs:1_stopped_0" [ style = bold] "nfs-xen_config:1_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "nfs-xen_images:1_stop_0 deglxen002" -> "all_stopped" [ style = bold] "nfs-xen_images:1_stop_0 deglxen002" -> "grp-nfs:1_stopped_0" [ style = bold] "nfs-xen_images:1_stop_0 deglxen002" -> "nfs-xen_swapfiles:1_stop_0 deglxen002" [ style = bold] "nfs-xen_images:1_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "nfs-xen_swapfiles:1_stop_0 deglxen002" -> "all_stopped" [ style = bold] "nfs-xen_swapfiles:1_stop_0 deglxen002" -> "grp-nfs:1_stopped_0" [ style = bold] "nfs-xen_swapfiles:1_stop_0 deglxen002" -> "nfs-xen_config:1_stop_0 deglxen002" [ style = bold] "nfs-xen_swapfiles:1_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] "prim-ping:0_stop_0 deglxen002" -> "all_stopped" [ style = bold] "prim-ping:0_stop_0 deglxen002" -> "clone-ping_stopped_0" [ style = bold] "prim-ping:0_stop_0 deglxen002" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/utilization-order4.exp b/pengine/test10/utilization-order4.exp index 8a3557ecf8..078b5733de 100644 --- a/pengine/test10/utilization-order4.exp +++ b/pengine/test10/utilization-order4.exp @@ -1,281 +1,277 @@ - - - - - + diff --git a/pengine/test10/utilization-order4.summary b/pengine/test10/utilization-order4.summary index 469bd4bf99..22a9610507 100644 --- a/pengine/test10/utilization-order4.summary +++ b/pengine/test10/utilization-order4.summary @@ -1,60 +1,60 @@ Current cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen002 degllx63-vm (ocf::heartbeat:Xen): Stopped degllx61-vm (ocf::heartbeat:Xen): Started deglxen001 degllx64-vm (ocf::heartbeat:Xen): Stopped stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 deglxen002 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 deglxen002 ] Transition Summary: * Migrate degllx62-vm (Started deglxen002 -> deglxen001) * Stop degllx61-vm (deglxen001) * Stop nfs-xen_config:1 (deglxen002) * Stop nfs-xen_swapfiles:1 (deglxen002) * Stop nfs-xen_images:1 (deglxen002) * Stop prim-ping:1 (deglxen002) Executing cluster transition: + * Resource action: degllx62-vm migrate_to on deglxen002 * Resource action: degllx61-vm stop on deglxen001 * Pseudo action: load_stopped_deglxen001 - * Resource action: degllx62-vm migrate_to on deglxen002 * Resource action: degllx62-vm migrate_from on deglxen001 * Resource action: degllx62-vm stop on deglxen002 * Pseudo action: clone-nfs_stop_0 * Pseudo action: load_stopped_deglxen002 * Pseudo action: degllx62-vm_start_0 * Pseudo action: grp-nfs:1_stop_0 * Resource action: nfs-xen_images:1 stop on deglxen002 * Resource action: degllx62-vm monitor=30000 on deglxen001 * Resource action: nfs-xen_swapfiles:1 stop on deglxen002 * Resource action: nfs-xen_config:1 stop on deglxen002 * Pseudo action: grp-nfs:1_stopped_0 * Pseudo action: clone-nfs_stopped_0 * Pseudo action: clone-ping_stop_0 * Resource action: prim-ping:0 stop on deglxen002 * Pseudo action: clone-ping_stopped_0 * Pseudo action: all_stopped Revised cluster status: Node deglxen002: standby Online: [ deglxen001 ] degllx62-vm (ocf::heartbeat:Xen): Started deglxen001 degllx63-vm (ocf::heartbeat:Xen): Stopped degllx61-vm (ocf::heartbeat:Xen): Stopped deglxen002 degllx64-vm (ocf::heartbeat:Xen): Stopped stonith_sbd (stonith:external/sbd): Started deglxen001 Clone Set: clone-nfs [grp-nfs] Started: [ deglxen001 ] Stopped: [ grp-nfs:1 ] Clone Set: clone-ping [prim-ping] Started: [ deglxen001 ] Stopped: [ prim-ping:1 ]