diff --git a/lib/pengine/container.c b/lib/pengine/container.c index 175a366e14..118e7169bd 100644 --- a/lib/pengine/container.c +++ b/lib/pengine/container.c @@ -1,1099 +1,1104 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define VARIANT_CONTAINER 1 #include "./variant.h" void tuple_free(container_grouping_t *tuple); static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static int allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max) { if(data->ip_range_start == NULL) { return 0; } else if(data->ip_last) { tuple->ipaddr = next_ip(data->ip_last); } else { tuple->ipaddr = strdup(data->ip_range_start); } data->ip_last = tuple->ipaddr; #if 0 return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d", data->prefix, tuple->offset, tuple->ipaddr, data->prefix, tuple->offset, data->prefix, tuple->offset); #else return snprintf(buffer, max, " --add-host=%s-%d:%s", data->prefix, tuple->offset, tuple->ipaddr); #endif } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, "ocf"); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } static void create_nvp(xmlNode *parent, const char *name, const char *value) { xmlNode *xml_nvp = create_xml_node(parent, XML_CIB_TAG_NVPAIR); crm_xml_set_id(xml_nvp, "%s-%s", ID(parent), name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_VALUE, value); } static void create_op(xmlNode *parent, const char *prefix, const char *task, const char *interval) { xmlNode *xml_op = create_xml_node(parent, "op"); crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval); crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval); crm_xml_add(xml_op, "name", task); } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(container_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->replicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->replicas_per_host = 1; /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */ } return TRUE; } return FALSE; } static bool create_ip_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "ip", tuple->ipaddr); if(data->host_network) { create_nvp(xml_obj, "nic", data->host_network); } if(data->host_netmask) { create_nvp(xml_obj, "cidr_netmask", data->host_netmask); } else { create_nvp(xml_obj, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); create_op(xml_obj, ID(xml_ip), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) { return FALSE; } parent->children = g_list_append(parent->children, tuple->ip); } return TRUE; } static bool create_docker_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_docker = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset); crm_xml_sanitize_id(id); xml_docker = create_resource(id, "heartbeat", "docker"); free(id); xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "image", data->image); create_nvp(xml_obj, "allow_pull", "true"); create_nvp(xml_obj, "force_kill", "false"); create_nvp(xml_obj, "reuse", "false"); offset += snprintf(buffer+offset, max-offset, " --restart=no"); /* Set a container hostname only if we have an IP to map it to. * The user can set -h or --uts=host themselves if they want a nicer * name for logs, but this makes applications happy who need their * hostname to match the IP they bind to. */ if (data->ip_range_start != NULL) { offset += snprintf(buffer+offset, max-offset, " -h %s-%d", data->prefix, tuple->offset); } if(data->docker_network) { // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr); offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network); } if(data->control_port) { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port); } else { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { container_mount_t *mount = pIter->data; if(mount->flags) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, tuple->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target); } if(mount->options) { offset += snprintf(buffer+offset, max-offset, ":%s", mount->options); } } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { container_port_t *port = pIter->data; if(tuple->ipaddr) { offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s", tuple->ipaddr, port->source, port->target); } else { offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target); } } if(data->docker_run_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options); } if(data->docker_host_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options); } create_nvp(xml_obj, "run_opts", buffer); free(buffer); create_nvp(xml_obj, "mount_points", dbuffer); free(dbuffer); if(tuple->child) { if(data->docker_run_command) { create_nvp(xml_obj, "run_cmd", data->docker_run_command); } else { create_nvp(xml_obj, "run_cmd", SBIN_DIR"/pacemaker_remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independently */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * create_nvp(xml_obj, "run_cmd", "/usr/libexec/pacemaker/lrmd"); * create_nvp(xml_obj, "monitor_cmd", "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { if(data->docker_run_command) { create_nvp(xml_obj, "run_cmd", data->docker_run_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_docker, "operations"); create_op(xml_obj, ID(xml_docker), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) { return FALSE; } parent->children = g_list_append(parent->children, tuple->docker); return TRUE; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname); if (match) { ((pe_node_t *) match)->weight = -INFINITY; } if (rsc->children) { GListPtr child; for (child = rsc->children; child != NULL; child = child->next) { disallow_node((resource_t *) (child->data), uname); } } } static bool create_remote_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if (tuple->child && valid_network(data)) { GHashTableIter gIter; GListPtr rsc_iter = NULL; node_t *node = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset); const char *uname = NULL; if (remote_id_conflict(id, data_set)) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset); CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE); } xml_remote = create_resource(id, "pacemaker", "remote"); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during data set cleanup to use as * the node ID and uname. */ free(id); id = NULL; uname = ID(xml_remote); xml_obj = create_xml_node(xml_remote, "operations"); create_op(xml_obj, uname, "monitor", "60s"); xml_obj = create_xml_node(xml_remote, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); if(tuple->ipaddr) { create_nvp(xml_obj, "addr", tuple->ipaddr); } else { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside create_nvp(xml_obj, "addr", "#uname"); } if(data->control_port) { create_nvp(xml_obj, "port", data->control_port); } else { char *port_s = crm_itoa(DEFAULT_REMOTE_PORT); create_nvp(xml_obj, "port", port_s); free(port_s); } xml_obj = create_xml_node(xml_remote, XML_TAG_META_SETS); crm_xml_set_id(xml_obj, "%s-meta-%d", data->prefix, tuple->offset); create_nvp(xml_obj, XML_OP_ATTR_ALLOW_MIGRATE, "false"); /* This sets tuple->docker as tuple->remote's container, which is * similar to what happens with guest nodes. This is how the PE knows * that the bundle node is fenced by recovering docker, and that * remote should be ordered relative to docker. */ create_nvp(xml_obj, XML_RSC_ATTR_CONTAINER, tuple->docker->id); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pe_find_node(data_set->nodes, uname); if (node == NULL) { node = pe_create_node(uname, uname, "remote", "-INFINITY", data_set); } else { node->weight = -INFINITY; } /* unpack_remote_nodes() ensures that each remote node and guest node * has a pe_node_t entry. Ideally, it would do the same for bundle nodes. * Unfortunately, a bundle has to be mostly unpacked before it's obvious * what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when common_unpack() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) { disallow_node((resource_t *) (rsc_iter->data), uname); } tuple->node = node_copy(node); tuple->node->weight = 500; if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) { return FALSE; } g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if(is_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -INFINITY; } } tuple->node->details->remote_rsc = tuple->remote; /* #kind is irrelevant to bundles since it is only used in location * constraint rules, and those don't matter for resources inside * bundles. But just for clarity, a bundle is closer to "container" * (guest node) than the "remote" set by pe_create_node(). */ g_hash_table_insert(tuple->node->details->attrs, strdup("#kind"), strdup("container")); /* One effect of this is that setup_container() will add * tuple->remote to tuple->docker's fillers, which will make * rsc_contains_remote_node() true for tuple->docker. * * tuple->child does NOT get added to tuple->docker's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking tuple->docker's migration * threshold. */ parent->children = g_list_append(parent->children, tuple->remote); } return TRUE; } static bool create_container( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(create_docker_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_ip_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_remote_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(tuple->child && tuple->ipaddr) { add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr); } if(tuple->remote) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the docker container * is active. * * Makes it possible to have remote nodes, running docker * containers with pacemaker_remoted inside in order to start * services inside those containers. */ set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes); } return FALSE; } static void mount_free(container_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(container_port_t *port) { free(port->source); free(port->target); free(port); } gboolean container_unpack(resource_t * rsc, pe_working_set_t * data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; container_variant_data_t *container_data = NULL; CRM_ASSERT(rsc != NULL); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); container_data = calloc(1, sizeof(container_variant_data_t)); rsc->variant_opaque = container_data; container_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, "docker"); if(xml_obj == NULL) { return FALSE; } value = crm_element_value(xml_obj, "masters"); container_data->masters = crm_parse_int(value, "0"); if (container_data->masters < 0) { pe_err("'masters' for %s must be nonnegative integer, using 0", rsc->id); container_data->masters = 0; } value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && (container_data->masters > 0)) { container_data->replicas = container_data->masters; } else { container_data->replicas = crm_parse_int(value, "1"); } if (container_data->replicas < 1) { pe_err("'replicas' for %s must be positive integer, using 1", rsc->id); container_data->replicas = 1; } /* * Communication between containers on the same host via the * floating IPs only works if docker is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); container_data->replicas_per_host = crm_parse_int(value, "1"); if (container_data->replicas_per_host < 1) { pe_err("'replicas-per-host' for %s must be positive integer, using 1", rsc->id); container_data->replicas_per_host = 1; } if (container_data->replicas_per_host == 1) { clear_bit(rsc->flags, pe_rsc_unique); } container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command"); container_data->docker_run_options = crm_element_value_copy(xml_obj, "options"); container_data->image = crm_element_value_copy(xml_obj, "image"); container_data->docker_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); container_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); container_data->control_port = crm_element_value_copy(xml_obj, "control-port"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_port_t *port = calloc(1, sizeof(container_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } container_data->ports = g_list_append(container_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_mount_t *mount = calloc(1, sizeof(container_mount_t)); mount->source = crm_element_value_copy(xml_child, "source-dir"); if(mount->source == NULL) { mount->source = crm_element_value_copy(xml_child, "source-dir-root"); mount->flags = 1; } mount->target = crm_element_value_copy(xml_child, "target-dir"); mount->options = crm_element_value_copy(xml_child, "options"); if(mount->source && mount->target) { container_data->mounts = g_list_append(container_data->mounts, mount); } else { pe_err("Invalid mount directive %s", ID(xml_child)); mount_free(mount); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(container_data)) { char *value = NULL; xmlNode *xml_set = NULL; if(container_data->masters > 0) { xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER); } else { xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); } crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name); create_nvp(xml_set, XML_RSC_ATTR_ORDERED, "true"); value = crm_itoa(container_data->replicas); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = crm_itoa(container_data->replicas_per_host); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); if(container_data->replicas_per_host > 1) { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "true"); } else { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "false"); } if(container_data->masters) { value = crm_itoa(container_data->masters); create_nvp(xml_set, XML_RSC_ATTR_MASTER_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GListPtr childIter = NULL; resource_t *new_rsc = NULL; container_mount_t *mount = NULL; container_port_t *port = NULL; int offset = 0, max = 1024; char *buffer = NULL; if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", ID(rsc->xml)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } return FALSE; } container_data->child = new_rsc; mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->target = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->options = NULL; mount->flags = 0; container_data->mounts = g_list_append(container_data->mounts, mount); mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(CRM_LOG_DIR "/bundles"); mount->target = strdup("/var/log"); mount->options = NULL; mount->flags = 1; container_data->mounts = g_list_append(container_data->mounts, mount); port = calloc(1, sizeof(container_port_t)); if(container_data->control_port) { port->source = strdup(container_data->control_port); } else { port->source = crm_itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); container_data->ports = g_list_append(container_data->ports, port); buffer = calloc(1, max+1); for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->child = childIter->data; tuple->offset = lpc++; + // Ensure the child's notify gets set based on the underlying primitive's value + if(is_set(tuple->child->flags, pe_rsc_notify)) { + set_bit(container_data->child->flags, pe_rsc_notify); + } + offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } else { // Just a naked container, no pacemaker-remote int offset = 0, max = 1024; char *buffer = calloc(1, max+1); for(int lpc = 0; lpc < container_data->replicas; lpc++) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->offset = lpc; offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; // TODO: Remove from list if create_container() returns TRUE create_container(rsc, container_data, tuple, data_set); } if(container_data->child) { rsc->children = g_list_append(rsc->children, container_data->child); } return TRUE; } static int tuple_rsc_active(resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean container_active(resource_t * rsc, gboolean all) { container_variant_data_t *container_data = NULL; GListPtr iter = NULL; get_container_variant_data(container_data, rsc); for (iter = container_data->tuples; iter != NULL; iter = iter->next) { container_grouping_t *tuple = (container_grouping_t *)(iter->data); int rsc_active; rsc_active = tuple_rsc_active(tuple->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->docker, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } resource_t * find_container_child(const char *stem, resource_t * rsc, node_t *node) { container_variant_data_t *container_data = NULL; resource_t *parent = uber_parent(rsc); CRM_ASSERT(parent->parent); parent = parent->parent; get_container_variant_data(container_data, parent); if (is_not_set(rsc->flags, pe_rsc_unique)) { for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->node->details == node->details) { rsc = tuple->child; break; } } } if (rsc && safe_str_neq(stem, rsc->id)) { free(rsc->clone_name); rsc->clone_name = strdup(stem); } return rsc; } static void print_rsc_in_list(resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } static void container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_concat(pre_text, " ", ' '); get_container_variant_data(container_data, rsc); status_print("%sid); status_print("type=\"docker\" "); status_print("image=\"%s\" ", container_data->image); status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print(">\n"); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); status_print("%s \n", pre_text, tuple->offset); print_rsc_in_list(tuple->ip, child_text, options, print_data); print_rsc_in_list(tuple->child, child_text, options, print_data); print_rsc_in_list(tuple->docker, child_text, options, print_data); print_rsc_in_list(tuple->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } static void tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data) { node_t *node = NULL; resource_t *rsc = tuple->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = tuple->docker; } if(tuple->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker)); } if(tuple->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr); } if(tuple->docker && tuple->docker->running_on != NULL) { node = tuple->docker->running_on->data; } else if (tuple->docker == NULL && rsc->running_on != NULL) { node = rsc->running_on->data; } common_print(rsc, pre_text, buffer, node, options, print_data); } void container_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { container_print_xml(rsc, pre_text, options, print_data); return; } get_container_variant_data(container_data, rsc); if (pre_text == NULL) { pre_text = " "; } status_print("%sDocker container%s: %s [%s]%s%s\n", pre_text, container_data->replicas>1?" set":"", rsc->id, container_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if (options & pe_print_html) { status_print("
    • "); } if(is_set(options, pe_print_clone_details)) { child_text = crm_strdup_printf(" %s", pre_text); if(g_list_length(container_data->tuples) > 1) { status_print(" %sReplica[%d]\n", pre_text, tuple->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(tuple->ip, child_text, options, print_data); print_rsc_in_list(tuple->docker, child_text, options, print_data); print_rsc_in_list(tuple->remote, child_text, options, print_data); print_rsc_in_list(tuple->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); tuple_print(tuple, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } void tuple_free(container_grouping_t *tuple) { if(tuple == NULL) { return; } if(tuple->node) { free(tuple->node); tuple->node = NULL; } if(tuple->ip) { free_xml(tuple->ip->xml); tuple->ip->xml = NULL; tuple->ip->fns->free(tuple->ip); tuple->ip->xml = NULL; free_xml(tuple->ip->xml); tuple->ip = NULL; } if(tuple->docker) { free_xml(tuple->docker->xml); tuple->docker->xml = NULL; tuple->docker->fns->free(tuple->docker); tuple->docker = NULL; } if(tuple->remote) { free_xml(tuple->remote->xml); tuple->remote->xml = NULL; tuple->remote->fns->free(tuple->remote); tuple->remote = NULL; } free(tuple->ipaddr); free(tuple); } void container_free(resource_t * rsc) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(container_data->prefix); free(container_data->image); free(container_data->control_port); free(container_data->host_network); free(container_data->host_netmask); free(container_data->ip_range_start); free(container_data->docker_network); free(container_data->docker_run_options); free(container_data->docker_run_command); free(container_data->docker_host_options); g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free); g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(container_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); if(container_data->child) { free_xml(container_data->child->xml); container_data->child->xml = NULL; container_data->child->fns->free(container_data->child); } common_free(rsc); } enum rsc_role_e container_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e container_role = RSC_ROLE_UNKNOWN; return container_role; } diff --git a/pengine/container.c b/pengine/container.c index 280a61ab59..1e9b971577 100644 --- a/pengine/container.c +++ b/pengine/container.c @@ -1,898 +1,900 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #define VARIANT_CONTAINER 1 #include static bool is_child_container_node(container_variant_data_t *data, pe_node_t *node) { for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(node->details == tuple->node->details) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set); static GListPtr get_container_list(resource_t *rsc) { GListPtr containers = NULL; container_variant_data_t *data = NULL; if(rsc->variant == pe_container) { get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; containers = g_list_append(containers, tuple->docker); } } return containers; } static GListPtr get_containers_or_children(resource_t *rsc) { GListPtr containers = NULL; container_variant_data_t *data = NULL; if(rsc->variant == pe_container) { get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; containers = g_list_append(containers, tuple->docker); } return containers; } else { return rsc->children; } } node_t * container_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr containers = NULL; GListPtr nodes = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return NULL); get_container_variant_data(container_data, rsc); set_bit(rsc->flags, pe_rsc_allocating); containers = get_container_list(rsc); dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); containers = g_list_sort_with_data(containers, sort_clone_instance, data_set); distribute_children(rsc, containers, nodes, container_data->replicas, container_data->replicas_per_host, data_set); g_list_free(nodes); g_list_free(containers); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; pe_node_t *docker_host = tuple->docker->allocated_to; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->cmds->allocate(tuple->ip, prefer, data_set); } if(tuple->remote && is_remote_node(docker_host)) { /* We need 'nested' connection resources to be on the same * host because pacemaker-remoted only supports a single * active connection */ rsc_colocation_new("child-remote-with-docker-remote", NULL, INFINITY, tuple->remote, docker_host->details->remote_rsc, NULL, NULL, data_set); } if(tuple->remote) { tuple->remote->cmds->allocate(tuple->remote, prefer, data_set); } // Explicitly allocate tuple->child before the container->child if(tuple->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, tuple->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if(node->details != tuple->node->details) { node->weight = -INFINITY; } else { node->weight = INFINITY; } } set_bit(tuple->child->parent->flags, pe_rsc_allocating); tuple->child->cmds->allocate(tuple->child, tuple->node, data_set); clear_bit(tuple->child->parent->flags, pe_rsc_allocating); } } if(container_data->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, container_data->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if(is_child_container_node(container_data, node)) { node->weight = 0; } else { node->weight = -INFINITY; } } container_data->child->cmds->allocate(container_data->child, prefer, data_set); } clear_bit(rsc->flags, pe_rsc_allocating); clear_bit(rsc->flags, pe_rsc_provisional); return NULL; } void container_create_actions(resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *action = NULL; GListPtr containers = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); containers = get_container_list(rsc); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->cmds->create_actions(tuple->ip, data_set); } if(tuple->docker) { tuple->docker->cmds->create_actions(tuple->docker, data_set); } if(tuple->remote) { tuple->remote->cmds->create_actions(tuple->remote, data_set); } } clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set); if(container_data->child) { container_data->child->cmds->create_actions(container_data->child, data_set); if(container_data->child->variant == pe_master) { /* promote */ action = create_pseudo_resource_op(rsc, RSC_PROMOTE, TRUE, TRUE, data_set); action = create_pseudo_resource_op(rsc, RSC_PROMOTED, TRUE, TRUE, data_set); action->priority = INFINITY; /* demote */ action = create_pseudo_resource_op(rsc, RSC_DEMOTE, TRUE, TRUE, data_set); action = create_pseudo_resource_op(rsc, RSC_DEMOTED, TRUE, TRUE, data_set); action->priority = INFINITY; } } g_list_free(containers); } void container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); if(container_data->child) { new_rsc_order(rsc, RSC_START, container_data->child, RSC_START, pe_order_implies_first_printed, data_set); new_rsc_order(rsc, RSC_STOP, container_data->child, RSC_STOP, pe_order_implies_first_printed, data_set); if(container_data->child->children) { new_rsc_order(container_data->child, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(container_data->child, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } else { new_rsc_order(container_data->child, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(container_data->child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); CRM_ASSERT(tuple->docker); tuple->docker->cmds->internal_constraints(tuple->docker, data_set); order_start_start(rsc, tuple->docker, pe_order_runnable_left | pe_order_implies_first_printed); if(tuple->child) { order_stop_stop(rsc, tuple->child, pe_order_implies_first_printed); } order_stop_stop(rsc, tuple->docker, pe_order_implies_first_printed); new_rsc_order(tuple->docker, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(tuple->docker, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if(tuple->ip) { tuple->ip->cmds->internal_constraints(tuple->ip, data_set); // Start ip then docker new_rsc_order(tuple->ip, RSC_START, tuple->docker, RSC_START, pe_order_runnable_left|pe_order_preserve, data_set); new_rsc_order(tuple->docker, RSC_STOP, tuple->ip, RSC_STOP, pe_order_implies_first|pe_order_preserve, data_set); rsc_colocation_new("ip-with-docker", NULL, INFINITY, tuple->ip, tuple->docker, NULL, NULL, data_set); } if(tuple->remote) { /* This handles ordering and colocating remote relative to docker * (via "resource-with-container"). Since IP is also ordered and * colocated relative to docker, we don't need to do anything * explicit here with IP. */ tuple->remote->cmds->internal_constraints(tuple->remote, data_set); } if(tuple->child) { CRM_ASSERT(tuple->remote); // Start of the remote then child is implicit in the PE's remote logic } } if(container_data->child) { container_data->child->cmds->internal_constraints(container_data->child, data_set); if(container_data->child->variant == pe_master) { master_promotion_constraints(rsc, data_set); /* child demoted before global demoted */ new_rsc_order(container_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ new_rsc_order(rsc, RSC_DEMOTE, container_data->child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); /* child promoted before global promoted */ new_rsc_order(container_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed, data_set); /* global promote before child promote */ new_rsc_order(rsc, RSC_PROMOTE, container_data->child, RSC_PROMOTE, pe_order_implies_first_printed, data_set); } } else { // int type = pe_order_optional | pe_order_implies_then | pe_order_restart; // custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, // rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); } } static resource_t * find_compatible_tuple_by_node(resource_t * rsc_lh, node_t * candidate, resource_t * rsc, enum rsc_role_e filter, gboolean current) { container_variant_data_t *container_data = NULL; CRM_CHECK(candidate != NULL, return NULL); get_container_variant_data(container_data, rsc); crm_trace("Looking for compatible child from %s for %s on %s", rsc_lh->id, rsc->id, candidate->details->uname); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(is_child_compatible(tuple->docker, candidate, filter, current)) { crm_trace("Pairing %s with %s on %s", rsc_lh->id, tuple->docker->id, candidate->details->uname); return tuple->docker; } } crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id); return NULL; } static resource_t * find_compatible_tuple(resource_t *rsc_lh, resource_t * rsc, enum rsc_role_e filter, gboolean current) { GListPtr scratch = NULL; resource_t *pair = NULL; node_t *active_node_lh = NULL; active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current); if (active_node_lh) { return find_compatible_tuple_by_node(rsc_lh, active_node_lh, rsc, filter, current); } scratch = g_hash_table_get_values(rsc_lh->allowed_nodes); scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL); for (GListPtr gIter = scratch; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pair = find_compatible_tuple_by_node(rsc_lh, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, rsc->id); done: g_list_free(scratch); return pair; } void container_rsc_colocation_lh(resource_t * rsc, resource_t * rsc_rh, rsc_colocation_t * constraint) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } int copies_per_node(resource_t * rsc) { /* Strictly speaking, there should be a 'copies_per_node' addition * to the resource function table and each case would be a * function. However that would be serious overkill to return an * int. In fact, it seems to me that both function tables * could/should be replaced by resources.{c,h} full of * rsc_{some_operation} functions containing a switch as below * which calls out to functions named {variant}_{some_operation} * as needed. */ switch(rsc->variant) { case pe_unknown: return 0; case pe_native: case pe_group: return 1; case pe_clone: case pe_master: { const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); return crm_parse_int(max_clones_node, "1"); } case pe_container: { container_variant_data_t *data = NULL; get_container_variant_data(data, rsc); return data->replicas_per_host; } } return 0; } void container_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc, rsc_colocation_t * constraint) { GListPtr allocated_rhs = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return); CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return); CRM_ASSERT(rsc_lh->variant == pe_native); if (is_set(rsc->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc, "%s is still provisional", rsc->id); return; } else if(constraint->rsc_lh->variant > pe_group) { resource_t *rh_child = find_compatible_tuple(rsc_lh, rsc, RSC_ROLE_UNKNOWN, FALSE); if (rh_child) { pe_rsc_debug(rsc, "Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc->id); assign_node(rsc_lh, NULL, TRUE); } else { pe_rsc_debug(rsc, "Cannot pair %s with instance of %s", rsc_lh->id, rsc->id); } return; } get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc->id, constraint->score); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if (constraint->score < INFINITY) { tuple->docker->cmds->rsc_colocation_rh(rsc_lh, tuple->docker, constraint); } else { node_t *chosen = tuple->docker->fns->location(tuple->docker, NULL, FALSE); if (chosen != NULL && is_set_recursive(tuple->docker, pe_rsc_block, TRUE) == FALSE) { pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); allocated_rhs = g_list_prepend(allocated_rhs, chosen); } } } if (constraint->score >= INFINITY) { node_list_exclude(rsc_lh->allowed_nodes, allocated_rhs, FALSE); } g_list_free(allocated_rhs); } enum pe_action_flags container_action_flags(action_t * action, node_t * node) { enum pe_action_flags flags = 0; container_variant_data_t *data = NULL; get_container_variant_data(data, action->rsc); if(data->child) { flags = summary_action_flags(action, data->child->children, node); } else { GListPtr containers = get_container_list(action->rsc); flags = summary_action_flags(action, containers, node); g_list_free(containers); } return flags; } resource_t * find_compatible_child_by_node(resource_t * local_child, node_t * local_node, resource_t * rsc, enum rsc_role_e filter, gboolean current) { GListPtr gIter = NULL; GListPtr children = NULL; if (local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, local_node->details->uname); children = get_containers_or_children(rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if(is_child_compatible(child_rsc, local_node, filter, current)) { crm_trace("Pairing %s with %s on %s", local_child->id, child_rsc->id, local_node->details->uname); return child_rsc; } } crm_trace("Can't pair %s with %s", local_child->id, rsc->id); if(children != rsc->children) { g_list_free(children); } return NULL; } static container_grouping_t * tuple_for_docker(resource_t *rsc, resource_t *docker, node_t *node) { if(rsc->variant == pe_container) { container_variant_data_t *data = NULL; get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(tuple->child && docker == tuple->docker && node->details == tuple->node->details) { return tuple; } } } return NULL; } static enum pe_graph_flags container_update_interleave_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { GListPtr gIter = NULL; GListPtr children = NULL; gboolean current = FALSE; enum pe_graph_flags changed = pe_graph_none; /* Fix this - lazy */ if (crm_ends_with(first->uuid, "_stopped_0") || crm_ends_with(first->uuid, "_demoted_0")) { current = TRUE; } children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; resource_t *first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current); if (first_child == NULL && current) { crm_trace("Ignore"); } else if (first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) { pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); if(assign_node(then_child, NULL, TRUE)) { changed |= pe_graph_updated_then; } } } else { pe_action_t *first_action = NULL; pe_action_t *then_action = NULL; enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); container_grouping_t *first_tuple = tuple_for_docker(first->rsc, first_child, node); container_grouping_t *then_tuple = tuple_for_docker(then->rsc, then_child, node); if(strstr(first->task, "stop") && first_tuple && first_tuple->child) { /* Except for 'stopped' we should be looking at the * in-container resource, actions for the child will * happen later and are therefor more likely to align * with the user's intent. */ first_action = find_first_action(first_tuple->child->actions, NULL, task2text(task), node); } else { first_action = find_first_action(first_child->actions, NULL, task2text(task), node); } if(strstr(then->task, "mote") && then_tuple && then_tuple->child) { /* Promote/demote actions will never be found for the * docker resource, look in the child instead * * Alternatively treat: * 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and * 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY' */ then_action = find_first_action(then_tuple->child->actions, NULL, then->task, node); } else { then_action = find_first_action(then_child->actions, NULL, then->task, node); } if (first_action == NULL) { if (is_not_set(first_child->flags, pe_rsc_orphan) && crm_str_eq(first_task, RSC_STOP, TRUE) == FALSE && crm_str_eq(first_task, RSC_DEMOTE, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (first)", first_task, first_child->id); } else { crm_trace("No action found for %s in %s%s (first)", first_task, first_child->id, is_set(first_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : ""); } continue; } /* We're only interested if 'then' is neither stopping nor being demoted */ if (then_action == NULL) { if (is_not_set(then_child->flags, pe_rsc_orphan) && crm_str_eq(then->task, RSC_STOP, TRUE) == FALSE && crm_str_eq(then->task, RSC_DEMOTE, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } else { crm_trace("No action found for %s in %s%s (then)", then->task, then_child->id, is_set(then_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : ""); } continue; } if (order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x", first_action->uuid, is_set(first_action->flags, pe_action_optional), then_action->uuid, is_set(then_action->flags, pe_action_optional), type); changed |= (pe_graph_updated_first | pe_graph_updated_then); } if(first_action && then_action) { changed |= then_child->cmds->update_actions(first_action, then_action, node, first_child->cmds->action_flags(first_action, node), filter, type); } else { crm_err("Nothing found either for %s (%p) or %s (%p) %s", first_child->id, first_action, then_child->id, then_action, task2text(task)); } } } if(children != then->rsc->children) { g_list_free(children); } return changed; } bool can_interleave_actions(pe_action_t *first, pe_action_t *then) { bool interleave = FALSE; resource_t *rsc = NULL; const char *interleave_s = NULL; if(first->rsc == NULL || then->rsc == NULL) { crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc == then->rsc) { crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) { crm_trace("Not interleaving %s with %s (both sides must be clones, masters, or bundles)", first->uuid, then->uuid); return FALSE; } if (crm_ends_with(then->uuid, "_stop_0") || crm_ends_with(then->uuid, "_demote_0")) { rsc = first->rsc; } else { rsc = then->rsc; } interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id); return interleave; } enum pe_graph_flags container_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; crm_trace("%s -> %s", first->uuid, then->uuid); if(can_interleave_actions(first, then)) { changed = container_update_interleave_actions(first, then, node, flags, filter, type); } else if(then->rsc) { GListPtr gIter = NULL; GListPtr children = NULL; // Handle the 'primitive' ordering case changed |= native_update_actions(first, then, node, flags, filter, type); // Now any children (or containers in the case of a bundle) children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; enum pe_graph_flags then_child_changed = pe_graph_none; action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node); if (then_child_action) { enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node); if (is_set(then_child_flags, pe_action_runnable)) { then_child_changed |= then_child->cmds->update_actions(first, then_child_action, node, flags, filter, type); } changed |= then_child_changed; if (then_child_changed & pe_graph_updated_then) { for (GListPtr lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *next = (action_wrapper_t *) lpc->data; update_action(next->action); } } } } if(children != then->rsc->children) { g_list_free(children); } } return changed; } void container_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { container_variant_data_t *container_data = NULL; get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if (tuple->docker) { tuple->docker->cmds->rsc_location(tuple->docker, constraint); } if(tuple->ip) { tuple->ip->cmds->rsc_location(tuple->ip, constraint); } } if(container_data->child && (constraint->role_filter == RSC_ROLE_SLAVE || constraint->role_filter == RSC_ROLE_MASTER)) { container_data->child->cmds->rsc_location(container_data->child, constraint); container_data->child->rsc_location = g_list_prepend(container_data->child->rsc_location, constraint); } } void container_expand(resource_t * rsc, pe_working_set_t * data_set) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); + + if(container_data->child) { + container_data->child->cmds->expand(container_data->child, data_set); + } + for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if (tuple->docker && tuple->remote && tuple->docker->allocated_to && fix_remote_addr(tuple->remote)) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside xmlNode *nvpair = get_xpath_object("//nvpair[@name='addr']", tuple->remote->xml, LOG_ERR); g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(tuple->docker->allocated_to->details->uname)); crm_xml_add(nvpair, "value", tuple->docker->allocated_to->details->uname); } if(tuple->ip) { tuple->ip->cmds->expand(tuple->ip, data_set); } - if(tuple->child) { - tuple->child->cmds->expand(tuple->child, data_set); - } if(tuple->docker) { tuple->docker->cmds->expand(tuple->docker, data_set); } if(tuple->remote) { tuple->remote->cmds->expand(tuple->remote, data_set); } } } gboolean container_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { bool any_created = FALSE; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return FALSE); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { any_created |= tuple->ip->cmds->create_probe(tuple->ip, node, complete, force, data_set); } if(tuple->child && node->details == tuple->node->details) { any_created |= tuple->child->cmds->create_probe(tuple->child, node, complete, force, data_set); } if(tuple->docker) { bool created = tuple->docker->cmds->create_probe(tuple->docker, node, complete, force, data_set); if(created) { any_created = TRUE; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that replicas_per_host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ for (GListPtr tIter = container_data->tuples; tIter != NULL && container_data->replicas_per_host == 1; tIter = tIter->next) { container_grouping_t *other = (container_grouping_t *)tIter->data; if ((other != tuple) && (other != NULL) && (other->docker != NULL)) { custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_STATUS, 0), NULL, other->docker, generate_op_key(other->docker->id, RSC_START, 0), NULL, pe_order_optional, data_set); } } } } if(FALSE && tuple->remote) { // TODO: Needed? any_created |= tuple->remote->cmds->create_probe(tuple->remote, node, complete, force, data_set); } } return any_created; } void container_append_meta(resource_t * rsc, xmlNode * xml) { } GHashTable * container_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } void container_LogActions( resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { LogActions(tuple->ip, data_set, terminal); } if(tuple->docker) { LogActions(tuple->docker, data_set, terminal); } if(tuple->remote) { LogActions(tuple->remote, data_set, terminal); } if(tuple->child) { LogActions(tuple->child, data_set, terminal); } } }