diff --git a/lib/pengine/container.c b/lib/pengine/container.c index 9df8161f07..45fd62ef80 100644 --- a/lib/pengine/container.c +++ b/lib/pengine/container.c @@ -1,1102 +1,1290 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define VARIANT_CONTAINER 1 #include "./variant.h" void tuple_free(container_grouping_t *tuple); static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static int allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max) { if(data->ip_range_start == NULL) { return 0; } else if(data->ip_last) { tuple->ipaddr = next_ip(data->ip_last); } else { tuple->ipaddr = strdup(data->ip_range_start); } data->ip_last = tuple->ipaddr; #if 0 return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d", data->prefix, tuple->offset, tuple->ipaddr, data->prefix, tuple->offset, data->prefix, tuple->offset); #else - return snprintf(buffer, max, " --add-host=%s-%d:%s", - data->prefix, tuple->offset, tuple->ipaddr); + if (data->type == PE_CONTAINER_TYPE_DOCKER) { + return snprintf(buffer, max, " --add-host=%s-%d:%s", + data->prefix, tuple->offset, tuple->ipaddr); + } else if (data->type == PE_CONTAINER_TYPE_RKT) { + return snprintf(buffer, max, " --hosts-entry=%s=%s-%d", + tuple->ipaddr, data->prefix, tuple->offset); + } else { + return 0; + } #endif } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, "ocf"); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } static void create_nvp(xmlNode *parent, const char *name, const char *value) { xmlNode *xml_nvp = create_xml_node(parent, XML_CIB_TAG_NVPAIR); crm_xml_set_id(xml_nvp, "%s-%s", ID(parent), name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_VALUE, value); } static void create_op(xmlNode *parent, const char *prefix, const char *task, const char *interval) { xmlNode *xml_op = create_xml_node(parent, "op"); crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval); crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval); crm_xml_add(xml_op, "name", task); } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(container_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->replicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->replicas_per_host = 1; /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */ } return TRUE; } return FALSE; } static bool create_ip_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "ip", tuple->ipaddr); if(data->host_network) { create_nvp(xml_obj, "nic", data->host_network); } if(data->host_netmask) { create_nvp(xml_obj, "cidr_netmask", data->host_netmask); } else { create_nvp(xml_obj, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); create_op(xml_obj, ID(xml_ip), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) { return FALSE; } parent->children = g_list_append(parent->children, tuple->ip); } return TRUE; } static bool create_docker_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_docker = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset); crm_xml_sanitize_id(id); xml_docker = create_resource(id, "heartbeat", "docker"); free(id); xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "image", data->image); create_nvp(xml_obj, "allow_pull", "true"); create_nvp(xml_obj, "force_kill", "false"); create_nvp(xml_obj, "reuse", "false"); offset += snprintf(buffer+offset, max-offset, " --restart=no"); /* Set a container hostname only if we have an IP to map it to. * The user can set -h or --uts=host themselves if they want a nicer * name for logs, but this makes applications happy who need their * hostname to match the IP they bind to. */ if (data->ip_range_start != NULL) { offset += snprintf(buffer+offset, max-offset, " -h %s-%d", data->prefix, tuple->offset); } if(data->docker_network) { // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr); offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network); } if(data->control_port) { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port); } else { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { container_mount_t *mount = pIter->data; if(mount->flags) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, tuple->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target); } if(mount->options) { offset += snprintf(buffer+offset, max-offset, ":%s", mount->options); } } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { container_port_t *port = pIter->data; if(tuple->ipaddr) { offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s", tuple->ipaddr, port->source, port->target); } else { offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target); } } if(data->docker_run_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options); } if(data->docker_host_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options); } create_nvp(xml_obj, "run_opts", buffer); free(buffer); create_nvp(xml_obj, "mount_points", dbuffer); free(dbuffer); if(tuple->child) { if(data->docker_run_command) { create_nvp(xml_obj, "run_cmd", data->docker_run_command); } else { create_nvp(xml_obj, "run_cmd", SBIN_DIR"/pacemaker_remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independently */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * create_nvp(xml_obj, "run_cmd", "/usr/libexec/pacemaker/lrmd"); * create_nvp(xml_obj, "monitor_cmd", "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { if(data->docker_run_command) { create_nvp(xml_obj, "run_cmd", data->docker_run_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_docker, "operations"); create_op(xml_obj, ID(xml_docker), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) { return FALSE; } parent->children = g_list_append(parent->children, tuple->docker); return TRUE; } +static bool +create_rkt_resource( + resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, + pe_working_set_t * data_set) +{ + int offset = 0, max = 4096; + char *buffer = calloc(1, max+1); + + int doffset = 0, dmax = 1024; + char *dbuffer = calloc(1, dmax+1); + + char *id = NULL; + xmlNode *xml_docker = NULL; + xmlNode *xml_obj = NULL; + + int volid = 0; + + id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset); + crm_xml_sanitize_id(id); + xml_docker = create_resource(id, "heartbeat", "rkt"); + free(id); + + xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS); + crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); + + create_nvp(xml_obj, "image", data->image); + create_nvp(xml_obj, "allow_pull", "true"); + create_nvp(xml_obj, "force_kill", "false"); + create_nvp(xml_obj, "reuse", "false"); + + /* Set a container hostname only if we have an IP to map it to. + * The user can set -h or --uts=host themselves if they want a nicer + * name for logs, but this makes applications happy who need their + * hostname to match the IP they bind to. + */ + if (data->ip_range_start != NULL) { + offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d", + data->prefix, tuple->offset); + } + + if(data->docker_network) { +// offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr); + offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network); + } + + if(data->control_port) { + offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port); + } else { + offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); + } + + for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { + container_mount_t *mount = pIter->data; + + if(mount->flags) { + char *source = crm_strdup_printf( + "%s/%s-%d", mount->source, data->prefix, tuple->offset); + + if(doffset > 0) { + doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); + } + doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); + offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source); + if(mount->options) { + offset += snprintf(buffer+offset, max-offset, ",%s", mount->options); + } + offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target); + free(source); + + } else { + offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source); + if(mount->options) { + offset += snprintf(buffer+offset, max-offset, ",%s", mount->options); + } + offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target); + } + volid++; + } + + for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { + container_port_t *port = pIter->data; + + if(tuple->ipaddr) { + offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s", + port->target, tuple->ipaddr, port->source); + } else { + offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source); + } + } + + if(data->docker_run_options) { + offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options); + } + + if(data->docker_host_options) { + offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options); + } + + create_nvp(xml_obj, "run_opts", buffer); + free(buffer); + + create_nvp(xml_obj, "mount_points", dbuffer); + free(dbuffer); + + if(tuple->child) { + if(data->docker_run_command) { + create_nvp(xml_obj, "run_cmd", data->docker_run_command); + } else { + create_nvp(xml_obj, "run_cmd", SBIN_DIR"/pacemaker_remoted"); + } + + /* TODO: Allow users to specify their own? + * + * We just want to know if the container is alive, we'll + * monitor the child independently + */ + create_nvp(xml_obj, "monitor_cmd", "/bin/true"); + /* } else if(child && data->untrusted) { + * Support this use-case? + * + * The ability to have resources started/stopped by us, but + * unable to set attributes, etc. + * + * Arguably better to control API access this with ACLs like + * "normal" remote nodes + * + * create_nvp(xml_obj, "run_cmd", "/usr/libexec/pacemaker/lrmd"); + * create_nvp(xml_obj, "monitor_cmd", "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); + */ + } else { + if(data->docker_run_command) { + create_nvp(xml_obj, "run_cmd", data->docker_run_command); + } + + /* TODO: Allow users to specify their own? + * + * We don't know what's in the container, so we just want + * to know if it is alive + */ + create_nvp(xml_obj, "monitor_cmd", "/bin/true"); + } + + + xml_obj = create_xml_node(xml_docker, "operations"); + create_op(xml_obj, ID(xml_docker), "monitor", "60s"); + + // TODO: Other ops? Timeouts and intervals from underlying resource? + + if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) { + return FALSE; + } + parent->children = g_list_append(parent->children, tuple->docker); + return TRUE; +} + /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname); if (match) { ((pe_node_t *) match)->weight = -INFINITY; } if (rsc->children) { GListPtr child; for (child = rsc->children; child != NULL; child = child->next) { disallow_node((resource_t *) (child->data), uname); } } } static bool create_remote_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if (tuple->child && valid_network(data)) { GHashTableIter gIter; GListPtr rsc_iter = NULL; node_t *node = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset); const char *uname = NULL; if (remote_id_conflict(id, data_set)) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset); CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE); } xml_remote = create_resource(id, "pacemaker", "remote"); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during data set cleanup to use as * the node ID and uname. */ free(id); id = NULL; uname = ID(xml_remote); xml_obj = create_xml_node(xml_remote, "operations"); create_op(xml_obj, uname, "monitor", "60s"); xml_obj = create_xml_node(xml_remote, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); if(tuple->ipaddr) { create_nvp(xml_obj, "addr", tuple->ipaddr); } else { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside create_nvp(xml_obj, "addr", "#uname"); } if(data->control_port) { create_nvp(xml_obj, "port", data->control_port); } else { char *port_s = crm_itoa(DEFAULT_REMOTE_PORT); create_nvp(xml_obj, "port", port_s); free(port_s); } xml_obj = create_xml_node(xml_remote, XML_TAG_META_SETS); crm_xml_set_id(xml_obj, "%s-meta-%d", data->prefix, tuple->offset); create_nvp(xml_obj, XML_OP_ATTR_ALLOW_MIGRATE, "false"); /* This sets tuple->docker as tuple->remote's container, which is * similar to what happens with guest nodes. This is how the PE knows * that the bundle node is fenced by recovering docker, and that * remote should be ordered relative to docker. */ create_nvp(xml_obj, XML_RSC_ATTR_CONTAINER, tuple->docker->id); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pe_find_node(data_set->nodes, uname); if (node == NULL) { node = pe_create_node(uname, uname, "remote", "-INFINITY", data_set); } else { node->weight = -INFINITY; } /* unpack_remote_nodes() ensures that each remote node and guest node * has a pe_node_t entry. Ideally, it would do the same for bundle nodes. * Unfortunately, a bundle has to be mostly unpacked before it's obvious * what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when common_unpack() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) { disallow_node((resource_t *) (rsc_iter->data), uname); } tuple->node = node_copy(node); tuple->node->weight = 500; if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) { return FALSE; } g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if(is_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -INFINITY; } } tuple->node->details->remote_rsc = tuple->remote; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ g_hash_table_insert(tuple->node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); /* One effect of this is that setup_container() will add * tuple->remote to tuple->docker's fillers, which will make * rsc_contains_remote_node() true for tuple->docker. * * tuple->child does NOT get added to tuple->docker's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking tuple->docker's migration * threshold. */ parent->children = g_list_append(parent->children, tuple->remote); } return TRUE; } static bool create_container( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { - if(create_docker_resource(parent, data, tuple, data_set) == FALSE) { + if (data->type == PE_CONTAINER_TYPE_DOCKER && + create_docker_resource(parent, data, tuple, data_set) == FALSE) { + return TRUE; + } + if (data->type == PE_CONTAINER_TYPE_RKT && + create_rkt_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } + if(create_ip_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_remote_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(tuple->child && tuple->ipaddr) { add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr); } if(tuple->remote) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the docker container * is active. * * Makes it possible to have remote nodes, running docker * containers with pacemaker_remoted inside in order to start * services inside those containers. */ set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes); } return FALSE; } static void mount_free(container_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(container_port_t *port) { free(port->source); free(port->target); free(port); } gboolean container_unpack(resource_t * rsc, pe_working_set_t * data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; container_variant_data_t *container_data = NULL; CRM_ASSERT(rsc != NULL); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); container_data = calloc(1, sizeof(container_variant_data_t)); rsc->variant_opaque = container_data; container_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, "docker"); - if(xml_obj == NULL) { - return FALSE; + if (xml_obj != NULL) { + container_data->type = PE_CONTAINER_TYPE_DOCKER; + } else { + xml_obj = first_named_child(rsc->xml, "rkt"); + if (xml_obj != NULL) { + container_data->type = PE_CONTAINER_TYPE_RKT; + } else { + return FALSE; + } } value = crm_element_value(xml_obj, "masters"); container_data->masters = crm_parse_int(value, "0"); if (container_data->masters < 0) { pe_err("'masters' for %s must be nonnegative integer, using 0", rsc->id); container_data->masters = 0; } value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && (container_data->masters > 0)) { container_data->replicas = container_data->masters; } else { container_data->replicas = crm_parse_int(value, "1"); } if (container_data->replicas < 1) { pe_err("'replicas' for %s must be positive integer, using 1", rsc->id); container_data->replicas = 1; } /* * Communication between containers on the same host via the * floating IPs only works if docker is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); container_data->replicas_per_host = crm_parse_int(value, "1"); if (container_data->replicas_per_host < 1) { pe_err("'replicas-per-host' for %s must be positive integer, using 1", rsc->id); container_data->replicas_per_host = 1; } if (container_data->replicas_per_host == 1) { clear_bit(rsc->flags, pe_rsc_unique); } container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command"); container_data->docker_run_options = crm_element_value_copy(xml_obj, "options"); container_data->image = crm_element_value_copy(xml_obj, "image"); container_data->docker_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); container_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); container_data->control_port = crm_element_value_copy(xml_obj, "control-port"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_port_t *port = calloc(1, sizeof(container_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } container_data->ports = g_list_append(container_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_mount_t *mount = calloc(1, sizeof(container_mount_t)); mount->source = crm_element_value_copy(xml_child, "source-dir"); if(mount->source == NULL) { mount->source = crm_element_value_copy(xml_child, "source-dir-root"); mount->flags = 1; } mount->target = crm_element_value_copy(xml_child, "target-dir"); mount->options = crm_element_value_copy(xml_child, "options"); if(mount->source && mount->target) { container_data->mounts = g_list_append(container_data->mounts, mount); } else { pe_err("Invalid mount directive %s", ID(xml_child)); mount_free(mount); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(container_data)) { char *value = NULL; xmlNode *xml_set = NULL; if(container_data->masters > 0) { xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER); } else { xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); } crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name); create_nvp(xml_set, XML_RSC_ATTR_ORDERED, "true"); value = crm_itoa(container_data->replicas); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = crm_itoa(container_data->replicas_per_host); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); if(container_data->replicas_per_host > 1) { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "true"); } else { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "false"); } if(container_data->masters) { value = crm_itoa(container_data->masters); create_nvp(xml_set, XML_RSC_ATTR_MASTER_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GListPtr childIter = NULL; resource_t *new_rsc = NULL; container_mount_t *mount = NULL; container_port_t *port = NULL; int offset = 0, max = 1024; char *buffer = NULL; if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", ID(rsc->xml)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } return FALSE; } container_data->child = new_rsc; mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->target = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->options = NULL; mount->flags = 0; container_data->mounts = g_list_append(container_data->mounts, mount); mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(CRM_LOG_DIR "/bundles"); mount->target = strdup("/var/log"); mount->options = NULL; mount->flags = 1; container_data->mounts = g_list_append(container_data->mounts, mount); port = calloc(1, sizeof(container_port_t)); if(container_data->control_port) { port->source = strdup(container_data->control_port); } else { port->source = crm_itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); container_data->ports = g_list_append(container_data->ports, port); buffer = calloc(1, max+1); for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->child = childIter->data; tuple->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if(is_set(tuple->child->flags, pe_rsc_notify)) { set_bit(container_data->child->flags, pe_rsc_notify); } offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } else { // Just a naked container, no pacemaker-remote int offset = 0, max = 1024; char *buffer = calloc(1, max+1); for(int lpc = 0; lpc < container_data->replicas; lpc++) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->offset = lpc; offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; // TODO: Remove from list if create_container() returns TRUE create_container(rsc, container_data, tuple, data_set); } if(container_data->child) { rsc->children = g_list_append(rsc->children, container_data->child); } return TRUE; } static int tuple_rsc_active(resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean container_active(resource_t * rsc, gboolean all) { container_variant_data_t *container_data = NULL; GListPtr iter = NULL; get_container_variant_data(container_data, rsc); for (iter = container_data->tuples; iter != NULL; iter = iter->next) { container_grouping_t *tuple = (container_grouping_t *)(iter->data); int rsc_active; rsc_active = tuple_rsc_active(tuple->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->docker, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } resource_t * find_container_child(const char *stem, resource_t * rsc, node_t *node) { container_variant_data_t *container_data = NULL; resource_t *parent = uber_parent(rsc); CRM_ASSERT(parent->parent); parent = parent->parent; get_container_variant_data(container_data, parent); if (is_not_set(rsc->flags, pe_rsc_unique)) { for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->node->details == node->details) { rsc = tuple->child; break; } } } if (rsc && safe_str_neq(stem, rsc->id)) { free(rsc->clone_name); rsc->clone_name = strdup(stem); } return rsc; } static void print_rsc_in_list(resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } +static const char* +container_type_as_string(enum container_type t) +{ + if (t == PE_CONTAINER_TYPE_DOCKER) { + return PE_CONTAINER_TYPE_DOCKER_S; + } else if (t == PE_CONTAINER_TYPE_RKT) { + return PE_CONTAINER_TYPE_RKT_S; + } else { + return PE_CONTAINER_TYPE_UNKNOWN_S; + } +} + static void container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_concat(pre_text, " ", ' '); get_container_variant_data(container_data, rsc); status_print("%sid); - status_print("type=\"docker\" "); + status_print("type=\"%s\" ", container_type_as_string(container_data->type)); status_print("image=\"%s\" ", container_data->image); status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print(">\n"); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); status_print("%s \n", pre_text, tuple->offset); print_rsc_in_list(tuple->ip, child_text, options, print_data); print_rsc_in_list(tuple->child, child_text, options, print_data); print_rsc_in_list(tuple->docker, child_text, options, print_data); print_rsc_in_list(tuple->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } static void tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data) { node_t *node = NULL; resource_t *rsc = tuple->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = tuple->docker; } if(tuple->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker)); } if(tuple->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr); } if(tuple->docker && tuple->docker->running_on != NULL) { node = tuple->docker->running_on->data; } else if (tuple->docker == NULL && rsc->running_on != NULL) { node = rsc->running_on->data; } common_print(rsc, pre_text, buffer, node, options, print_data); } void container_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { container_print_xml(rsc, pre_text, options, print_data); return; } get_container_variant_data(container_data, rsc); if (pre_text == NULL) { pre_text = " "; } - status_print("%sDocker container%s: %s [%s]%s%s\n", - pre_text, container_data->replicas>1?" set":"", rsc->id, container_data->image, + status_print("%s%s container%s: %s [%s]%s%s\n", + pre_text, container_type_as_string(container_data->type), + container_data->replicas>1?" set":"", rsc->id, container_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if (options & pe_print_html) { status_print("
    • "); } if(is_set(options, pe_print_clone_details)) { child_text = crm_strdup_printf(" %s", pre_text); if(g_list_length(container_data->tuples) > 1) { status_print(" %sReplica[%d]\n", pre_text, tuple->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(tuple->ip, child_text, options, print_data); print_rsc_in_list(tuple->docker, child_text, options, print_data); print_rsc_in_list(tuple->remote, child_text, options, print_data); print_rsc_in_list(tuple->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); tuple_print(tuple, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } void tuple_free(container_grouping_t *tuple) { if(tuple == NULL) { return; } if(tuple->node) { free(tuple->node); tuple->node = NULL; } if(tuple->ip) { free_xml(tuple->ip->xml); tuple->ip->xml = NULL; tuple->ip->fns->free(tuple->ip); tuple->ip->xml = NULL; free_xml(tuple->ip->xml); tuple->ip = NULL; } if(tuple->docker) { free_xml(tuple->docker->xml); tuple->docker->xml = NULL; tuple->docker->fns->free(tuple->docker); tuple->docker = NULL; } if(tuple->remote) { free_xml(tuple->remote->xml); tuple->remote->xml = NULL; tuple->remote->fns->free(tuple->remote); tuple->remote = NULL; } free(tuple->ipaddr); free(tuple); } void container_free(resource_t * rsc) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(container_data->prefix); free(container_data->image); free(container_data->control_port); free(container_data->host_network); free(container_data->host_netmask); free(container_data->ip_range_start); free(container_data->docker_network); free(container_data->docker_run_options); free(container_data->docker_run_command); free(container_data->docker_host_options); g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free); g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(container_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); if(container_data->child) { free_xml(container_data->child->xml); container_data->child->xml = NULL; container_data->child->fns->free(container_data->child); } common_free(rsc); } enum rsc_role_e container_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e container_role = RSC_ROLE_UNKNOWN; return container_role; } diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h index c8fe159370..d89298edd3 100644 --- a/lib/pengine/variant.h +++ b/lib/pengine/variant.h @@ -1,154 +1,164 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef PE_VARIANT__H # define PE_VARIANT__H # if VARIANT_CLONE typedef struct clone_variant_data_s { int clone_max; int clone_node_max; int master_max; int master_node_max; int total_clones; int active_clones; int max_nodes; int masters_active; int masters_allocated; gboolean interleave; gboolean ordered; gboolean applied_master_prefs; gboolean merged_master_weights; notify_data_t *stop_notify; notify_data_t *start_notify; notify_data_t *demote_notify; notify_data_t *promote_notify; xmlNode *xml_obj_child; gboolean notify_confirm; } clone_variant_data_t; # define get_clone_variant_data(data, rsc) \ CRM_ASSERT(rsc != NULL); \ CRM_ASSERT(rsc->variant == pe_clone || rsc->variant == pe_master); \ data = (clone_variant_data_t *)rsc->variant_opaque; # elif VARIANT_CONTAINER typedef struct { int offset; node_t *node; char *ipaddr; resource_t *ip; resource_t *child; resource_t *docker; resource_t *remote; } container_grouping_t; typedef struct { char *source; char *target; char *options; int flags; } container_mount_t; typedef struct { char *source; char *target; } container_port_t; +enum container_type { + PE_CONTAINER_TYPE_UNKNOWN, + PE_CONTAINER_TYPE_DOCKER, + PE_CONTAINER_TYPE_RKT +}; + +#define PE_CONTAINER_TYPE_UNKNOWN_S "unknown" +#define PE_CONTAINER_TYPE_DOCKER_S "Docker" +#define PE_CONTAINER_TYPE_RKT_S "rkt" + typedef struct container_variant_data_s { int masters; int replicas; int replicas_per_host; char *prefix; char *image; const char *ip_last; char *host_network; char *host_netmask; char *control_port; char *docker_network; char *ip_range_start; char *docker_host_options; char *docker_run_options; char *docker_run_command; resource_t *child; GListPtr tuples; /* container_grouping_t * */ GListPtr ports; /* */ GListPtr mounts; /* */ - + enum container_type type; } container_variant_data_t; # define get_container_variant_data(data, rsc) \ CRM_ASSERT(rsc != NULL); \ CRM_ASSERT(rsc->variant == pe_container); \ CRM_ASSERT(rsc->variant_opaque != NULL); \ data = (container_variant_data_t *)rsc->variant_opaque; \ # elif VARIANT_GROUP typedef struct group_variant_data_s { int num_children; resource_t *first_child; resource_t *last_child; gboolean colocated; gboolean ordered; gboolean child_starting; gboolean child_stopping; } group_variant_data_t; # define get_group_variant_data(data, rsc) \ CRM_ASSERT(rsc != NULL); \ CRM_ASSERT(rsc->variant == pe_group); \ CRM_ASSERT(rsc->variant_opaque != NULL); \ data = (group_variant_data_t *)rsc->variant_opaque; \ # elif VARIANT_NATIVE typedef struct native_variant_data_s { int dummy; } native_variant_data_t; # define get_native_variant_data(data, rsc) \ CRM_ASSERT(rsc != NULL); \ CRM_ASSERT(rsc->variant == pe_native); \ CRM_ASSERT(rsc->variant_opaque != NULL); \ data = (native_variant_data_t *)rsc->variant_opaque; # endif #endif diff --git a/tools/regression.validity.exp b/tools/regression.validity.exp index d5edfe6a4b..2393a1e5b2 100644 --- a/tools/regression.validity.exp +++ b/tools/regression.validity.exp @@ -1,425 +1,439 @@ Created new pacemaker configuration Setting up shadow instance A new shadow instance was created. To begin using it paste the following into your shell: CIB_shadow=tools-regression ; export CIB_shadow =#=#=#= Begin test: Try to make resulting CIB invalid (enum violation) =#=#=#= Call failed: Update does not conform to the configured schema =#=#=#= Current cib after: Try to make resulting CIB invalid (enum violation) =#=#=#= =#=#=#= End test: Try to make resulting CIB invalid (enum violation) - Update does not conform to the configured schema (203) =#=#=#= * Passed: cibadmin - Try to make resulting CIB invalid (enum violation) =#=#=#= Begin test: Run crm_simulate with invalid CIB (enum violation) =#=#=#= ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.2' validation (4 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-1.2 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.3' validation (5 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-1.3 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.0' validation (6 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.0 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.1' validation (7 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.1 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.2' validation (8 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.2 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.3' validation (9 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.3 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.4' validation (10 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.4 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.5' validation (11 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.5 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.6' validation (12 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.6 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.7' validation (13 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.7 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.8' validation (14 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.8 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.9' validation (15 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-2.9 validation failed -Your current configuration pacemaker-1.2 could not validate with any schema in range [pacemaker-1.2, pacemaker-2.9], cannot upgrade to pacemaker-2.0. +( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.10' validation (16 of X) +element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order +element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order +( schemas.c:NNN ) trace: update_validation: pacemaker-2.10 validation failed +Your current configuration pacemaker-1.2 could not validate with any schema in range [pacemaker-1.2, pacemaker-2.10], cannot upgrade to pacemaker-2.0. =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Required key not available (126) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation) =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= Call failed: Update does not conform to the configured schema =#=#=#= Current cib after: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= =#=#=#= End test: Try to make resulting CIB invalid (unrecognized validate-with) - Update does not conform to the configured schema (203) =#=#=#= * Passed: cibadmin - Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= Begin test: Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#= ( schemas.c:NNN ) debug: update_validation: Unknown validation schema ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-0.6' validation (0 of X) element rsc_order: validity error : Element rsc_order does not carry attribute to element rsc_order: validity error : Element rsc_order does not carry attribute from element rsc_order: validity error : No declaration for attribute first of element rsc_order element rsc_order: validity error : No declaration for attribute first-action of element rsc_order element rsc_order: validity error : No declaration for attribute then of element rsc_order ( schemas.c:NNN ) trace: update_validation: pacemaker-0.6 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'transitional-0.6' validation (1 of X) element rsc_order: validity error : Element rsc_order does not carry attribute to element rsc_order: validity error : Element rsc_order does not carry attribute from element rsc_order: validity error : No declaration for attribute first of element rsc_order element rsc_order: validity error : No declaration for attribute first-action of element rsc_order element rsc_order: validity error : No declaration for attribute then of element rsc_order ( schemas.c:NNN ) trace: update_validation: transitional-0.6 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-0.7' validation (2 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-0.7 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.0' validation (3 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-1.0 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.2' validation (4 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-1.2 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.3' validation (5 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-1.3 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.0' validation (6 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.0 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.1' validation (7 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.1 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.2' validation (8 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.2 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.3' validation (9 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.3 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.4' validation (10 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.4 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.5' validation (11 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.5 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.6' validation (12 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.6 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.7' validation (13 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.7 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.8' validation (14 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.8 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.9' validation (15 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ( schemas.c:NNN ) trace: update_validation: pacemaker-2.9 validation failed -Your current configuration pacemaker-9999.0 could not validate with any schema in range [unknown, pacemaker-2.9], cannot upgrade to pacemaker-2.0. +( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.10' validation (16 of X) +element cib: Relax-NG validity error : Invalid attribute validate-with for element cib +( schemas.c:NNN ) trace: update_validation: pacemaker-2.10 validation failed +Your current configuration pacemaker-9999.0 could not validate with any schema in range [unknown, pacemaker-2.10], cannot upgrade to pacemaker-2.0. =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Required key not available (126) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= Call failed: Update does not conform to the configured schema =#=#=#= Current cib after: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= =#=#=#= End test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) - Update does not conform to the configured schema (203) =#=#=#= * Passed: cibadmin - Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= Begin test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#= ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.2' validation (4 of X) element tags: Relax-NG validity error : Element configuration has extra content: tags ( schemas.c:NNN ) trace: update_validation: pacemaker-1.2 validation failed ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-1.3' validation (5 of X) ( schemas.c:NNN ) debug: update_validation: Upgrading pacemaker-1.3-style configuration to pacemaker-2.0 with upgrade-1.3.xsl ( schemas.c:NNN ) info: update_validation: Transformation upgrade-1.3.xsl successful ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.0' validation (6 of X) ( schemas.c:NNN ) debug: update_validation: pacemaker-2.0-style configuration is also valid for pacemaker-2.1 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.1' validation (7 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.1 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.1-style configuration is also valid for pacemaker-2.2 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.2' validation (8 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.2 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.2-style configuration is also valid for pacemaker-2.3 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.3' validation (9 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.3 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.3-style configuration is also valid for pacemaker-2.4 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.4' validation (10 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.4 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.4-style configuration is also valid for pacemaker-2.5 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.5' validation (11 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.5 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.5-style configuration is also valid for pacemaker-2.6 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.6' validation (12 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.6 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.6-style configuration is also valid for pacemaker-2.7 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.7' validation (13 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.7 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.7-style configuration is also valid for pacemaker-2.8 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.8' validation (14 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.8 ( schemas.c:NNN ) debug: update_validation: pacemaker-2.8-style configuration is also valid for pacemaker-2.9 ( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.9' validation (15 of X) ( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.9 -( schemas.c:NNN ) trace: update_validation: Stopping at pacemaker-2.9 -( schemas.c:NNN ) info: update_validation: Transformed the configuration from pacemaker-1.2 to pacemaker-2.9 +( schemas.c:NNN ) debug: update_validation: pacemaker-2.9-style configuration is also valid for pacemaker-2.10 +( schemas.c:NNN ) debug: update_validation: Testing 'pacemaker-2.10' validation (16 of X) +( schemas.c:NNN ) debug: update_validation: Configuration valid for schema: pacemaker-2.10 +( schemas.c:NNN ) trace: update_validation: Stopping at pacemaker-2.10 +( schemas.c:NNN ) info: update_validation: Transformed the configuration from pacemaker-1.2 to pacemaker-2.10 error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Current cluster status: dummy1 (ocf::pacemaker:Dummy): Stopped dummy2 (ocf::pacemaker:Dummy): Stopped Transition Summary: error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Executing cluster transition: Revised cluster status: dummy1 (ocf::pacemaker:Dummy): Stopped dummy2 (ocf::pacemaker:Dummy): Stopped =#=#=#= End test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#= Begin test: Make resulting CIB valid, although without validate-with attribute =#=#=#= element rsc_order: validity error : Element rsc_order does not carry attribute to element rsc_order: validity error : Element rsc_order does not carry attribute from element rsc_order: validity error : No declaration for attribute first of element rsc_order element rsc_order: validity error : No declaration for attribute first-action of element rsc_order element rsc_order: validity error : No declaration for attribute then of element rsc_order element rsc_order: validity error : Element rsc_order does not carry attribute to element rsc_order: validity error : Element rsc_order does not carry attribute from element rsc_order: validity error : No declaration for attribute first of element rsc_order element rsc_order: validity error : No declaration for attribute first-action of element rsc_order element rsc_order: validity error : No declaration for attribute then of element rsc_order =#=#=#= Current cib after: Make resulting CIB valid, although without validate-with attribute =#=#=#= =#=#=#= End test: Make resulting CIB valid, although without validate-with attribute - OK (0) =#=#=#= * Passed: cibadmin - Make resulting CIB valid, although without validate-with attribute =#=#=#= Begin test: Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#= Configuration validation is currently disabled. It is highly encouraged and prevents many common cluster issues. bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute to bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute from bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first-action of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute then of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute to bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute from bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first-action of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute then of element rsc_order error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Current cluster status: dummy1 (ocf::pacemaker:Dummy): Stopped dummy2 (ocf::pacemaker:Dummy): Stopped Transition Summary: error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Executing cluster transition: Revised cluster status: dummy1 (ocf::pacemaker:Dummy): Stopped dummy2 (ocf::pacemaker:Dummy): Stopped =#=#=#= End test: Run crm_simulate with valid CIB, but without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#= Begin test: Make resulting CIB invalid, and without validate-with attribute =#=#=#= element rsc_order: validity error : Element rsc_order does not carry attribute to element rsc_order: validity error : Element rsc_order does not carry attribute from element rsc_order: validity error : No declaration for attribute first of element rsc_order element rsc_order: validity error : No declaration for attribute first-action of element rsc_order element rsc_order: validity error : No declaration for attribute then of element rsc_order element rsc_order: validity error : Element rsc_order does not carry attribute to element rsc_order: validity error : Element rsc_order does not carry attribute from element rsc_order: validity error : No declaration for attribute first of element rsc_order element rsc_order: validity error : No declaration for attribute first-action of element rsc_order element rsc_order: validity error : No declaration for attribute then of element rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order +element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order +element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#= =#=#=#= End test: Make resulting CIB invalid, and without validate-with attribute - OK (0) =#=#=#= * Passed: cibadmin - Make resulting CIB invalid, and without validate-with attribute =#=#=#= Begin test: Run crm_simulate with invalid CIB, also without validate-with attribute =#=#=#= Configuration validation is currently disabled. It is highly encouraged and prevents many common cluster issues. bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute to bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute from bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first-action of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute then of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute to bad-1.2.xml:10: element rsc_order: validity error : Element rsc_order does not carry attribute from bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute first-action of element rsc_order bad-1.2.xml:10: element rsc_order: validity error : No declaration for attribute then of element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order +bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order +bad-1.2.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity (constraints.:430 ) error: unpack_simple_rsc_order: Cannot invert rsc_order constraint ord_1-2. Please specify the inverse manually. Current cluster status: dummy1 (ocf::pacemaker:Dummy): Stopped dummy2 (ocf::pacemaker:Dummy): Stopped Transition Summary: error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Executing cluster transition: Revised cluster status: dummy1 (ocf::pacemaker:Dummy): Stopped dummy2 (ocf::pacemaker:Dummy): Stopped =#=#=#= End test: Run crm_simulate with invalid CIB, also without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB, also without validate-with attribute diff --git a/xml/crm_mon.rng b/xml/crm_mon.rng index 731c118615..3d9cd2217d 100644 --- a/xml/crm_mon.rng +++ b/xml/crm_mon.rng @@ -1,327 +1,328 @@ unknown unknown member remote ping docker + rkt diff --git a/xml/resources-2.10.rng b/xml/resources-2.10.rng new file mode 100644 index 0000000000..ac1b11a16a --- /dev/null +++ b/xml/resources-2.10.rng @@ -0,0 +1,329 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ([0-9\-]+) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Stopped + Started + Slave + Master + + + + + + + nothing + quorum + fencing + unfencing + + + + + + + ignore + block + stop + restart + standby + fence + restart-container + + + + + + + + + + + + + + ocf + + + + + lsb + heartbeat + stonith + upstart + service + systemd + nagios + + + + +