Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4512296
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
118 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 8175d462fd..60f3e7f888 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,1962 +1,1971 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <ctype.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <pe_status_private.h>
#define PE__VARIANT_BUNDLE 1
#include "./variant.h"
static char *
next_ip(const char *last_ip)
{
unsigned int oct1 = 0;
unsigned int oct2 = 0;
unsigned int oct3 = 0;
unsigned int oct4 = 0;
int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
if (rc != 4) {
/*@ TODO check for IPv6 */
return NULL;
} else if (oct3 > 253) {
return NULL;
} else if (oct4 > 253) {
++oct3;
oct4 = 1;
} else {
++oct4;
}
return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
}
static int
allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
char *buffer, int max)
{
if(data->ip_range_start == NULL) {
return 0;
} else if(data->ip_last) {
replica->ipaddr = next_ip(data->ip_last);
} else {
replica->ipaddr = strdup(data->ip_range_start);
}
data->ip_last = replica->ipaddr;
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
if (data->add_host) {
return snprintf(buffer, max, " --add-host=%s-%d:%s",
data->prefix, replica->offset,
replica->ipaddr);
}
case PE__CONTAINER_AGENT_RKT:
return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
replica->ipaddr, data->prefix, replica->offset);
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return 0;
}
static xmlNode *
create_resource(const char *name, const char *provider, const char *kind)
{
xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
crm_xml_add(rsc, XML_ATTR_ID, name);
crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
crm_xml_add(rsc, XML_ATTR_TYPE, kind);
return rsc;
}
/*!
* \internal
* \brief Check whether cluster can manage resource inside container
*
* \param[in] data Container variant data
*
* \return TRUE if networking configuration is acceptable, FALSE otherwise
*
* \note The resource is manageable if an IP range or control port has been
* specified. If a control port is used without an IP range, replicas per
* host must be 1.
*/
static bool
valid_network(pe__bundle_variant_data_t *data)
{
if(data->ip_range_start) {
return TRUE;
}
if(data->control_port) {
if(data->nreplicas_per_host > 1) {
pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
data->nreplicas_per_host = 1;
/* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
}
return TRUE;
}
return FALSE;
}
static bool
create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
if(data->ip_range_start) {
char *id = NULL;
xmlNode *xml_ip = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
crm_xml_sanitize_id(id);
xml_ip = create_resource(id, "heartbeat", "IPaddr2");
free(id);
xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
if(data->host_network) {
crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
}
if(data->host_netmask) {
crm_create_nvpair_xml(xml_obj, NULL,
"cidr_netmask", data->host_netmask);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
}
xml_obj = create_xml_node(xml_ip, "operations");
crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->ip);
}
return TRUE;
}
static bool
create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_DOCKER_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
offset += snprintf(buffer+offset, max-offset, " --restart=no");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
if (data->container_network) {
#if 0
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
}
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
}
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
replica->ipaddr, port->source,
port->target);
} else if(safe_str_neq(data->container_network, "host")) {
// No need to do port mapping if net=host
offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
static bool
create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_PODMAN_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
// FIXME: (bandini 2018-08) podman has no restart policies
//offset += snprintf(buffer+offset, max-offset, " --restart=no");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
if (data->container_network) {
#if 0
// podman has no support for --link-local-ip
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
}
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
}
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
replica->ipaddr, port->source,
port->target);
} else if(safe_str_neq(data->container_network, "host")) {
// No need to do port mapping if net=host
offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent,
data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
static bool
create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
int volid = 0;
id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_RKT_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
if (data->container_network) {
#if 0
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
}
offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
}
offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
}
volid++;
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset,
" --port=%s:%s:%s", port->target,
replica->ipaddr, port->source);
} else {
offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
/*!
* \brief Ban a node from a resource's (and its children's) allowed nodes list
*
* \param[in,out] rsc Resource to modify
* \param[in] uname Name of node to ban
*/
static void
disallow_node(pe_resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
((pe_node_t *) match)->weight = -INFINITY;
((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
}
if (rsc->children) {
GListPtr child;
for (child = rsc->children; child != NULL; child = child->next) {
disallow_node((pe_resource_t *) (child->data), uname);
}
}
}
static bool
create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
GListPtr rsc_iter = NULL;
pe_node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
const char *uname = NULL;
const char *connect_name = NULL;
if (pe_find_resource(data_set->resources, id) != NULL) {
free(id);
// The biggest hammer we have
id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
replica->child->id, replica->offset);
//@TODO return false instead of asserting?
CRM_ASSERT(pe_find_resource(data_set->resources, id) == NULL);
}
/* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
* connection does not have its own IP is a magic string that we use to
* support nested remotes (i.e. a bundle running on a remote node).
*/
connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
if (data->control_port == NULL) {
port_s = crm_itoa(DEFAULT_REMOTE_PORT);
}
/* This sets replica->container as replica->remote's container, which is
* similar to what happens with guest nodes. This is how the scheduler
* knows that the bundle node is fenced by recovering the container, and
* that remote should be ordered relative to the container.
*/
xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
NULL, NULL, NULL,
connect_name, (data->control_port?
data->control_port : port_s));
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
* need something that will get freed during data set cleanup to use as
* the node ID and uname.
*/
free(id);
id = NULL;
uname = ID(xml_remote);
/* Ensure a node has been created for the guest (it may have already
* been, if it has a permanent node attribute), and ensure its weight is
* -INFINITY so no other resources can run on it.
*/
node = pe_find_node(data_set->nodes, uname);
if (node == NULL) {
node = pe_create_node(uname, uname, "remote", "-INFINITY",
data_set);
} else {
node->weight = -INFINITY;
}
node->rsc_discover_mode = pe_discover_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
* has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
* Unfortunately, a bundle has to be mostly unpacked before it's obvious
* what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
* likely place for this to happen is when common_unpack() calls
* resource_location() to set a default score in symmetric clusters.
* This adds a node *copy* to each resource's allowed nodes, and these
* copies will have the wrong weight.
*
* As a hacky workaround, fix those copies here.
*
* @TODO Possible alternative: ensure bundles are unpacked before other
* resources, so the weight is correct before any copies are made.
*/
for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
disallow_node((pe_resource_t *) (rsc_iter->data), uname);
}
replica->node = pe__copy_node(node);
replica->node->weight = 500;
replica->node->rsc_discover_mode = pe_discover_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
g_hash_table_destroy(replica->child->allowed_nodes);
}
replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
g_str_equal,
NULL, free);
g_hash_table_insert(replica->child->allowed_nodes,
(gpointer) replica->node->details->id,
pe__copy_node(replica->node));
{
pe_node_t *copy = pe__copy_node(replica->node);
copy->weight = -INFINITY;
g_hash_table_insert(replica->child->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
}
if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
return FALSE;
}
g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
if (pe__is_guest_or_remote_node(node)) {
/* Remote resources can only run on 'normal' cluster node */
node->weight = -INFINITY;
}
}
replica->node->details->remote_rsc = replica->remote;
// Ensure pe__is_guest_node() functions correctly immediately
replica->remote->container = replica->container;
/* A bundle's #kind is closer to "container" (guest node) than the
* "remote" set by pe_create_node().
*/
g_hash_table_insert(replica->node->details->attrs,
strdup(CRM_ATTR_KIND), strdup("container"));
/* One effect of this is that setup_container() will add
* replica->remote to replica->container's fillers, which will make
* pe__resource_contains_guest_node() true for replica->container.
*
* replica->child does NOT get added to replica->container's fillers.
* The only noticeable effect if it did would be for its fail count to
* be taken into account when checking replica->container's migration
* threshold.
*/
parent->children = g_list_append(parent->children, replica->remote);
}
return TRUE;
}
static bool
create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
if (!create_docker_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
case PE__CONTAINER_AGENT_PODMAN:
if (!create_podman_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
case PE__CONTAINER_AGENT_RKT:
if (!create_rkt_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
default: // PE__CONTAINER_AGENT_UNKNOWN
return FALSE;
}
if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
return FALSE;
}
if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
return FALSE;
}
if (replica->child && replica->ipaddr) {
add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
}
if (replica->remote) {
/*
* Allow the remote connection resource to be allocated to a
* different node than the one on which the container is active.
*
* This makes it possible to have Pacemaker Remote nodes running
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
set_bit(replica->remote->flags, pe_rsc_allow_remote_remotes);
}
return TRUE;
}
static void
mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
const char *target, const char *options, uint32_t flags)
{
pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
mount->source = strdup(source);
mount->target = strdup(target);
if (options) {
mount->options = strdup(options);
}
mount->flags = flags;
bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
}
static void
mount_free(pe__bundle_mount_t *mount)
{
free(mount->source);
free(mount->target);
free(mount->options);
free(mount);
}
static void
port_free(pe__bundle_port_t *port)
{
free(port->source);
free(port->target);
free(port);
}
static pe__bundle_replica_t *
replica_for_remote(pe_resource_t *remote)
{
pe_resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
return NULL;
}
while (top->parent != NULL) {
top = top->parent;
}
get_bundle_variant_data(bundle_data, top);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->remote == remote) {
return replica;
}
}
CRM_LOG_ASSERT(FALSE);
return NULL;
}
bool
pe__bundle_needs_remote_name(pe_resource_t *rsc)
{
const char *value;
if (rsc == NULL) {
return FALSE;
}
value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
if (safe_str_eq(value, "#uname") == FALSE) {
return FALSE;
} else {
const char *match[3][2] = {
{ XML_ATTR_TYPE, "remote" },
{ XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF },
{ XML_AGENT_ATTR_PROVIDER, "pacemaker" },
};
for (int m = 0; m < 3; m++) {
value = crm_element_value(rsc->xml, match[m][0]);
if (safe_str_neq(value, match[m][1])) {
return FALSE;
}
}
}
return TRUE;
}
const char *
pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
pe_node_t *node = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
return NULL;
}
replica = replica_for_remote(rsc);
if (replica == NULL) {
return NULL;
}
node = replica->container->allocated_to;
if (node == NULL) {
/* If it won't be running anywhere after the
* transition, go with where it's running now.
*/
node = pe__current_node(replica->container);
}
if(node == NULL) {
crm_trace("Cannot determine address for bundle connection %s", rsc->id);
return NULL;
}
crm_trace("Setting address for bundle connection %s to bundle host %s",
rsc->id, node->details->uname);
if(xml != NULL && field != NULL) {
crm_xml_add(xml, field, node->details->uname);
}
return node->details->uname;
}
gboolean
pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_resource = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
bool need_log_mount = TRUE;
CRM_ASSERT(rsc != NULL);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
rsc->variant_opaque = bundle_data;
bundle_data->prefix = strdup(rsc->id);
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
} else {
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
} else {
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
} else {
return FALSE;
}
}
}
value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, "masters");
}
bundle_data->promoted_max = crm_parse_int(value, "0");
if (bundle_data->promoted_max < 0) {
pe_err("%s for %s must be nonnegative integer, using 0",
XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
bundle_data->promoted_max = 0;
}
value = crm_element_value(xml_obj, "replicas");
if ((value == NULL) && bundle_data->promoted_max) {
bundle_data->nreplicas = bundle_data->promoted_max;
} else {
bundle_data->nreplicas = crm_parse_int(value, "1");
}
if (bundle_data->nreplicas < 1) {
pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
bundle_data->nreplicas = 1;
}
/*
* Communication between containers on the same host via the
* floating IPs only works if the container is started with:
* --userland-proxy=false --ip-masq=false
*/
value = crm_element_value(xml_obj, "replicas-per-host");
bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
if (bundle_data->nreplicas_per_host < 1) {
pe_err("'replicas-per-host' for %s must be positive integer, using 1",
rsc->id);
bundle_data->nreplicas_per_host = 1;
}
if (bundle_data->nreplicas_per_host == 1) {
clear_bit(rsc->flags, pe_rsc_unique);
}
bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
bundle_data->image = crm_element_value_copy(xml_obj, "image");
bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
xml_obj = first_named_child(rsc->xml, "network");
if(xml_obj) {
bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
value = crm_element_value(xml_obj, "add-host");
if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
bundle_data->add_host = TRUE;
}
for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
xml_child = __xml_next_element(xml_child)) {
pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
port->source = crm_element_value_copy(xml_child, "port");
if(port->source == NULL) {
port->source = crm_element_value_copy(xml_child, "range");
} else {
port->target = crm_element_value_copy(xml_child, "internal-port");
}
if(port->source != NULL && strlen(port->source) > 0) {
if(port->target == NULL) {
port->target = strdup(port->source);
}
bundle_data->ports = g_list_append(bundle_data->ports, port);
} else {
pe_err("Invalid port directive %s", ID(xml_child));
port_free(port);
}
}
}
xml_obj = first_named_child(rsc->xml, "storage");
for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
xml_child = __xml_next_element(xml_child)) {
const char *source = crm_element_value(xml_child, "source-dir");
const char *target = crm_element_value(xml_child, "target-dir");
const char *options = crm_element_value(xml_child, "options");
int flags = pe__bundle_mount_none;
if (source == NULL) {
source = crm_element_value(xml_child, "source-dir-root");
set_bit(flags, pe__bundle_mount_subdir);
}
if (source && target) {
mount_add(bundle_data, source, target, options, flags);
if (strcmp(target, "/var/log") == 0) {
need_log_mount = FALSE;
}
} else {
pe_err("Invalid mount directive %s", ID(xml_child));
}
}
xml_obj = first_named_child(rsc->xml, "primitive");
if (xml_obj && valid_network(bundle_data)) {
char *value = NULL;
xmlNode *xml_set = NULL;
xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
/* @COMPAT We no longer use the <master> tag, but we need to keep it as
* part of the resource name, so that bundles don't restart in a rolling
* upgrade. (It also avoids needing to change regression tests.)
*/
crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
(bundle_data->promoted_max? "master"
: (const char *)xml_resource->name));
xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
value = crm_itoa(bundle_data->nreplicas);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_INCARNATION_MAX, value);
free(value);
value = crm_itoa(bundle_data->nreplicas_per_host);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_INCARNATION_NODEMAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
(bundle_data->nreplicas_per_host > 1)?
XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
if (bundle_data->promoted_max) {
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
value = crm_itoa(bundle_data->promoted_max);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_PROMOTED_MAX, value);
free(value);
}
//crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
add_node_copy(xml_resource, xml_obj);
} else if(xml_obj) {
pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
rsc->id, ID(xml_obj));
return FALSE;
}
if(xml_resource) {
int lpc = 0;
GListPtr childIter = NULL;
pe_resource_t *new_rsc = NULL;
pe__bundle_port_t *port = NULL;
int offset = 0, max = 1024;
char *buffer = NULL;
if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", ID(rsc->xml));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
return FALSE;
}
bundle_data->child = new_rsc;
/* Currently, we always map the default authentication key location
* into the same location inside the container.
*
* Ideally, we would respect the host's PCMK_authkey_location, but:
* - it may be different on different nodes;
* - the actual connection will do extra checking to make sure the key
* file exists and is readable, that we can't do here on the DC
* - tools such as crm_resource and crm_simulate may not have the same
* environment variables as the cluster, causing operation digests to
* differ
*
* Always using the default location inside the container is fine,
* because we control the pacemaker_remote environment, and it avoids
* having to pass another environment variable to the container.
*
* @TODO A better solution may be to have only pacemaker_remote use the
* environment variable, and have the cluster nodes use a new
* cluster option for key location. This would introduce the limitation
* of the location being the same on all cluster nodes, but that's
* reasonable.
*/
mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
if (need_log_mount) {
mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
pe__bundle_mount_subdir);
}
port = calloc(1, sizeof(pe__bundle_port_t));
if(bundle_data->control_port) {
port->source = strdup(bundle_data->control_port);
} else {
/* If we wanted to respect PCMK_remote_port, we could use
* crm_default_remote_port() here and elsewhere in this file instead
* of DEFAULT_REMOTE_PORT.
*
* However, it gains nothing, since we control both the container
* environment and the connection resource parameters, and the user
* can use a different port if desired by setting control-port.
*/
port->source = crm_itoa(DEFAULT_REMOTE_PORT);
}
port->target = strdup(port->source);
bundle_data->ports = g_list_append(bundle_data->ports, port);
buffer = calloc(1, max+1);
for (childIter = bundle_data->child->children; childIter != NULL;
childIter = childIter->next) {
pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
replica->child = childIter->data;
replica->child->exclusive_discover = TRUE;
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
if (is_set(replica->child->flags, pe_rsc_notify)) {
set_bit(bundle_data->child->flags, pe_rsc_notify);
}
offset += allocate_ip(bundle_data, replica, buffer+offset,
max-offset);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
XML_RSC_ATTR_TARGET);
}
bundle_data->container_host_options = buffer;
if (bundle_data->attribute_target) {
g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
strdup(bundle_data->attribute_target));
g_hash_table_replace(bundle_data->child->meta,
strdup(XML_RSC_ATTR_TARGET),
strdup(bundle_data->attribute_target));
}
} else {
// Just a naked container, no pacemaker-remote
int offset = 0, max = 1024;
char *buffer = calloc(1, max+1);
for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
replica->offset = lpc;
offset += allocate_ip(bundle_data, replica, buffer+offset,
max-offset);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
}
bundle_data->container_host_options = buffer;
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (!create_container(rsc, bundle_data, replica, data_set)) {
pe_err("Failed unpacking resource %s", rsc->id);
rsc->fns->free(rsc);
return FALSE;
}
}
if (bundle_data->child) {
rsc->children = g_list_append(rsc->children, bundle_data->child);
}
return TRUE;
}
static int
replica_resource_active(pe_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->fns->active(rsc, all);
if (child_active && !all) {
return TRUE;
} else if (!child_active && all) {
return FALSE;
}
}
return -1;
}
gboolean
pe__bundle_active(pe_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GListPtr iter = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
pe__bundle_replica_t *replica = iter->data;
int rsc_active;
rsc_active = replica_resource_active(replica->ip, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->child, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->container, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->remote, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
}
/* If "all" is TRUE, we've already checked that no resources were inactive,
* so return TRUE; if "all" is FALSE, we didn't find any active resources,
* so return FALSE.
*/
return all;
}
/*!
* \internal
* \brief Find the bundle replica corresponding to a given node
*
* \param[in] bundle Top-level bundle resource
* \param[in] node Node to search for
*
* \return Bundle replica if found, NULL otherwise
*/
pe_resource_t *
pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
get_bundle_variant_data(bundle_data, bundle);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica && replica->node);
if (replica->node->details == node->details) {
return replica->child;
}
}
return NULL;
}
static void
print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
if (rsc != NULL) {
if (options & pe_print_html) {
status_print("<li>");
}
rsc->fns->print(rsc, pre_text, options, print_data);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
static const char*
container_agent_str(enum pe__container_agent t)
{
switch (t) {
case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S;
case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return PE__CONTAINER_AGENT_UNKNOWN_S;
}
static void
bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_CHECK(rsc != NULL, return);
if (pre_text == NULL) {
pre_text = "";
}
child_text = crm_strdup_printf("%s ", pre_text);
get_bundle_variant_data(bundle_data, rsc);
status_print("%s<bundle ", pre_text);
status_print("id=\"%s\" ", rsc->id);
status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
status_print("image=\"%s\" ", bundle_data->image);
status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print(">\n");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
status_print("%s <replica id=\"%d\">\n", pre_text, replica->offset);
print_rsc_in_list(replica->ip, child_text, options, print_data);
print_rsc_in_list(replica->child, child_text, options, print_data);
print_rsc_in_list(replica->container, child_text, options, print_data);
print_rsc_in_list(replica->remote, child_text, options, print_data);
status_print("%s </replica>\n", pre_text);
}
status_print("%s</bundle>\n", pre_text);
free(child_text);
}
PCMK__OUTPUT_ARGS("bundle", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show = va_arg(args, GListPtr);
pe__bundle_variant_data_t *bundle_data = NULL;
int rc = pcmk_rc_no_output;
+ gboolean printed_header = FALSE;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
- rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6
- , "id", rsc->id
- , "type", container_agent_str(bundle_data->agent_type)
- , "image", bundle_data->image
- , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
- , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
- , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)));
- CRM_ASSERT(rc == pcmk_rc_ok);
-
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
char *id = crm_itoa(replica->offset);
CRM_ASSERT(replica);
if (!pe__rsc_running_on_any_node_in_list(replica->container, only_show)) {
continue;
}
+ if (!printed_header) {
+ printed_header = TRUE;
+
+ rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6
+ , "id", rsc->id
+ , "type", container_agent_str(bundle_data->agent_type)
+ , "image", bundle_data->image
+ , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
+ , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
+ , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)));
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+
rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
free(id);
CRM_ASSERT(rc == pcmk_rc_ok);
if (replica->ip != NULL) {
out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_show);
}
if (replica->child != NULL) {
out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_show);
}
out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_show);
if (replica->remote != NULL) {
out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_show);
}
pcmk__output_xml_pop_parent(out); // replica
}
- pcmk__output_xml_pop_parent(out); // bundle
+
+ if (printed_header) {
+ pcmk__output_xml_pop_parent(out); // bundle
+ }
+
return rc;
}
static void
pe__bundle_replica_output_html(pcmk__output_t *out, GListPtr only_show,
pe__bundle_replica_t *replica, long options)
{
pe_node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
if (pcmk__str_in_list(only_show, node->details->uname)) {
pe__common_output_html(out, rsc, buffer, node, options);
}
}
PCMK__OUTPUT_ARGS("bundle", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show = va_arg(args, GListPtr);
pe__bundle_variant_data_t *bundle_data = NULL;
char buffer[LINE_MAX];
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
pcmk__output_create_xml_node(out, "br");
out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (!pe__rsc_running_on_any_node_in_list(replica->container, only_show)) {
continue;
}
pcmk__output_xml_create_parent(out, "li");
if (is_set(options, pe_print_implicit)) {
if (pcmk__list_of_multiple(bundle_data->replicas)) {
snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset);
xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer);
}
pcmk__output_create_xml_node(out, "br");
out->begin_list(out, NULL, NULL, NULL);
if (replica->ip != NULL) {
out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_show);
}
if (replica->child != NULL) {
out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_show);
}
out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_show);
if (replica->remote != NULL) {
out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_show);
}
out->end_list(out);
} else {
pe__bundle_replica_output_html(out, only_show, replica, options);
}
pcmk__output_xml_pop_parent(out);
}
out->end_list(out);
return pcmk_rc_ok;
}
static void
pe__bundle_replica_output_text(pcmk__output_t *out, GListPtr only_show,
pe__bundle_replica_t *replica, long options)
{
pe_node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
if (pcmk__str_in_list(only_show, node->details->uname)) {
pe__common_output_text(out, rsc, buffer, node, options);
}
}
PCMK__OUTPUT_ARGS("bundle", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show = va_arg(args, GListPtr);
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (!pe__rsc_running_on_any_node_in_list(replica->container, only_show)) {
continue;
}
if (is_set(options, pe_print_implicit)) {
if (pcmk__list_of_multiple(bundle_data->replicas)) {
out->list_item(out, NULL, "Replica[%d]", replica->offset);
}
out->begin_list(out, NULL, NULL, NULL);
if (replica->ip != NULL) {
out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_show);
}
if (replica->child != NULL) {
out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_show);
}
out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_show);
if (replica->remote != NULL) {
out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_show);
}
out->end_list(out);
} else {
pe__bundle_replica_output_text(out, only_show, replica, options);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
static void
print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
long options, void *print_data)
{
pe_node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
common_print(rsc, pre_text, buffer, node, options, print_data);
}
void
pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_CHECK(rsc != NULL, return);
if (options & pe_print_xml) {
bundle_print_xml(rsc, pre_text, options, print_data);
return;
}
get_bundle_variant_data(bundle_data, rsc);
if (pre_text == NULL) {
pre_text = " ";
}
status_print("%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (options & pe_print_html) {
status_print("<li>");
}
if (is_set(options, pe_print_implicit)) {
child_text = crm_strdup_printf(" %s", pre_text);
if (pcmk__list_of_multiple(bundle_data->replicas)) {
status_print(" %sReplica[%d]\n", pre_text, replica->offset);
}
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
print_rsc_in_list(replica->ip, child_text, options, print_data);
print_rsc_in_list(replica->container, child_text, options, print_data);
print_rsc_in_list(replica->remote, child_text, options, print_data);
print_rsc_in_list(replica->child, child_text, options, print_data);
if (options & pe_print_html) {
status_print("</ul>\n");
}
} else {
child_text = crm_strdup_printf("%s ", pre_text);
print_bundle_replica(replica, child_text, options, print_data);
}
free(child_text);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
if (options & pe_print_html) {
status_print("</ul>\n");
}
}
static void
free_bundle_replica(pe__bundle_replica_t *replica)
{
if (replica == NULL) {
return;
}
if (replica->node) {
free(replica->node);
replica->node = NULL;
}
if (replica->ip) {
free_xml(replica->ip->xml);
replica->ip->xml = NULL;
replica->ip->fns->free(replica->ip);
replica->ip = NULL;
}
if (replica->container) {
free_xml(replica->container->xml);
replica->container->xml = NULL;
replica->container->fns->free(replica->container);
replica->container = NULL;
}
if (replica->remote) {
free_xml(replica->remote->xml);
replica->remote->xml = NULL;
replica->remote->fns->free(replica->remote);
replica->remote = NULL;
}
free(replica->ipaddr);
free(replica);
}
void
pe__free_bundle(pe_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
free(bundle_data->prefix);
free(bundle_data->image);
free(bundle_data->control_port);
free(bundle_data->host_network);
free(bundle_data->host_netmask);
free(bundle_data->ip_range_start);
free(bundle_data->container_network);
free(bundle_data->launcher_options);
free(bundle_data->container_command);
free(bundle_data->container_host_options);
g_list_free_full(bundle_data->replicas,
(GDestroyNotify) free_bundle_replica);
g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
g_list_free(rsc->children);
if(bundle_data->child) {
free_xml(bundle_data->child->xml);
bundle_data->child->xml = NULL;
bundle_data->child->fns->free(bundle_data->child);
}
common_free(rsc);
}
enum rsc_role_e
pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
{
enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
return container_role;
}
/*!
* \brief Get the number of configured replicas in a bundle
*
* \param[in] rsc Bundle resource
*
* \return Number of configured replicas, or 0 on error
*/
int
pe_bundle_replicas(const pe_resource_t *rsc)
{
if ((rsc == NULL) || (rsc->variant != pe_container)) {
return 0;
} else {
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
return bundle_data->nreplicas;
}
}
void
pe__count_bundle(pe_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
pe__bundle_replica_t *replica = item->data;
if (replica->ip) {
replica->ip->fns->count(replica->ip);
}
if (replica->child) {
replica->child->fns->count(replica->child);
}
if (replica->container) {
replica->container->fns->count(replica->container);
}
if (replica->remote) {
replica->remote->fns->count(replica->remote);
}
}
}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 61beec8531..9f70c2de27 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1077 +1,1087 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
#include <crm/msg_xml.h>
#define VARIANT_CLONE 1
#include "./variant.h"
void
pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
"such as %s can be used only as anonymous clones",
rsc->id, standard, rid);
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
g_list_length(data_set->nodes));
}
}
pe_resource_t *
find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
{
char *child_id = NULL;
pe_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
child_base = ID(clone_data->xml_obj_child);
child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
child = pe_find_resource(rsc->children, child_id);
free(child_id);
return child;
}
pe_resource_t *
pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
pe_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
if (clone_data->total_clones >= clone_data->clone_max) {
// If we've already used all available instances, this is an orphan
as_orphan = TRUE;
}
// Allocate instance numbers in numerical order (starting at 0)
inc_num = crm_itoa(clone_data->total_clones);
inc_max = crm_itoa(clone_data->clone_max);
child_copy = copy_xml(clone_data->xml_obj_child);
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
child_rsc = NULL;
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
CRM_ASSERT(child_rsc);
clone_data->total_clones += 1;
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
set_bit_recursive(child_rsc, pe_rsc_orphan);
}
add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
free(inc_num);
free(inc_max);
return child_rsc;
}
gboolean
clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
if (is_set(rsc->flags, pe_rsc_promotable)) {
const char *promoted_max = NULL;
const char *promoted_node_max = NULL;
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_MAX);
if (promoted_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_MAX);
}
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_NODEMAX);
if (promoted_node_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_NODEMAX);
}
clone_data->promoted_max = crm_parse_int(promoted_max, "1");
clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
if (max_clones) {
clone_data->clone_max = crm_parse_int(max_clones, "1");
} else if (pcmk__list_of_multiple(data_set->nodes)) {
clone_data->clone_max = g_list_length(data_set->nodes);
} else {
clone_data->clone_max = 1; /* Handy during crm_verify */
}
clone_data->ordered = crm_is_true(ordered);
if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
"because anonymous clones support only one instance "
"per node", rsc->id);
clone_data->clone_node_max = 1;
}
pe_rsc_trace(rsc, "Options for %s", rsc->id);
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
pe_rsc_trace(rsc, "\tClone is promotable: %s",
is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false");
// Clones may contain a single group or primitive
for (a_child = __xml_first_child_element(xml_obj); a_child != NULL;
a_child = __xml_next_element(a_child)) {
if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE)
|| crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) {
clone_data->xml_obj_child = a_child;
break;
}
}
if (clone_data->xml_obj_child == NULL) {
pcmk__config_err("%s has nothing to clone", rsc->id);
return FALSE;
}
/*
* Make clones ever so slightly sticky by default
*
* This helps ensure clone instances are not shuffled around the cluster
* for no benefit in situations when pre-allocation is not appropriate
*/
if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
}
/* This ensures that the globally-unique value always exists for children to
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
}
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
return TRUE;
}
gboolean
clone_active(pe_resource_t * rsc, gboolean all)
{
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
return TRUE;
} else if (all && child_active == FALSE) {
return FALSE;
}
}
if (all) {
return TRUE;
} else {
return FALSE;
}
}
static void
short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
{
if(suffix == NULL) {
suffix = "";
}
if (list) {
if (options & pe_print_html) {
status_print("<li>");
}
status_print("%s%s: [%s ]%s", prefix, type, list, suffix);
if (options & pe_print_html) {
status_print("</li>\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
}
}
static const char *
configured_role_str(pe_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
configured_role(pe_resource_t * rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
return RSC_ROLE_UNKNOWN;
}
static void
clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
const char *target_role = configured_role_str(rsc);
GListPtr gIter = rsc->children;
status_print("%s<clone ", pre_text);
status_print("id=\"%s\" ", rsc->id);
status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false");
status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print("failure_ignored=\"%s\" ",
is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s</clone>\n", pre_text);
free(child_text);
}
bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
{
GListPtr gIter;
bool all = !any;
if(is_set(rsc->flags, flag)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
if(is_set_recursive(gIter->data, flag, any)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
}
if(all) {
return TRUE;
}
return FALSE;
}
void
clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *list_text = NULL;
char *child_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
clone_print_xml(rsc, pre_text, options, print_data);
return;
}
get_clone_variant_data(clone_data, rsc);
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n<ul>\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
if (options & pe_print_html) {
status_print("<li>\n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
short_print(list_text, child_text, "Masters", NULL, options, print_data);
g_list_free(master_list);
free(list_text);
list_text = NULL;
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
} else {
short_print(list_text, child_text, "Slaves", NULL, options, print_data);
}
} else {
short_print(list_text, child_text, "Started", NULL, options, print_data);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list); stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
short_print(stopped_list, child_text, state, NULL, options, print_data);
free(stopped_list);
}
if (options & pe_print_html) {
status_print("</ul>\n");
}
free(child_text);
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show = va_arg(args, GListPtr);
GListPtr gIter = rsc->children;
- int rc = pe__name_and_nvpairs_xml(out, true, "clone", 7
- , "id", rsc->id
- , "multi_state", BOOL2STR(is_set(rsc->flags, pe_rsc_promotable))
- , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
- , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
- , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
- , "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
- , "target_role", configured_role_str(rsc));
- CRM_ASSERT(rc == pcmk_rc_ok);
+ int rc = pcmk_rc_no_output;
+ gboolean printed_header = FALSE;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (!pe__rsc_running_on_any_node_in_list(child_rsc, only_show)) {
continue;
}
+ if (!printed_header) {
+ printed_header = TRUE;
+
+ rc = pe__name_and_nvpairs_xml(out, true, "clone", 7
+ , "id", rsc->id
+ , "multi_state", BOOL2STR(is_set(rsc->flags, pe_rsc_promotable))
+ , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
+ , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
+ , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
+ , "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
+ , "target_role", configured_role_str(rsc));
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
}
- pcmk__output_xml_pop_parent(out);
+ if (printed_header) {
+ pcmk__output_xml_pop_parent(out);
+ }
+
return rc;
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__clone_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show = va_arg(args, GListPtr);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
get_clone_variant_data(clone_data, rsc);
out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (!pe__rsc_running_on_any_node_in_list(child_rsc, only_show)) {
continue;
}
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
}
}
if (is_set(options, pe_print_clone_details)) {
out->end_list(out);
return pcmk_rc_ok;
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
out->list_item(out, NULL, " Masters: [%s ]", list_text);
g_list_free(master_list);
free(list_text);
list_text = NULL;
}
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
out->list_item(out, NULL, " Slaves (target-role): [%s ]", list_text);
} else {
out->list_item(out, NULL, " Slaves: [%s ]", list_text);
}
} else {
out->list_item(out, NULL, " Started: [%s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
}
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
out->list_item(out, NULL, " %s: [%s ]", state, stopped_list);
free(stopped_list);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__clone_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show = va_arg(args, GListPtr);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
get_clone_variant_data(clone_data, rsc);
out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (!pe__rsc_running_on_any_node_in_list(child_rsc, only_show)) {
continue;
}
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
}
}
if (is_set(options, pe_print_clone_details)) {
out->end_list(out);
return pcmk_rc_ok;
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
out->list_item(out, "Masters", "[%s ]", list_text);
g_list_free(master_list);
free(list_text);
list_text = NULL;
}
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
out->list_item(out, "Slaves (target-role)", "[%s ]", list_text);
} else {
out->list_item(out, "Slaves", "[%s ]", list_text);
}
} else {
out->list_item(out, "Started", "[%s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
}
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
out->list_item(out, state, "[%s ]", stopped_list);
free(stopped_list);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
void
clone_free(pe_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
free_xml(child_rsc->xml);
child_rsc->xml = NULL;
/* There could be a saved unexpanded xml */
free_xml(child_rsc->orig_xml);
child_rsc->orig_xml = NULL;
child_rsc->fns->free(child_rsc);
}
g_list_free(rsc->children);
if (clone_data) {
CRM_ASSERT(clone_data->demote_notify == NULL);
CRM_ASSERT(clone_data->stop_notify == NULL);
CRM_ASSERT(clone_data->start_notify == NULL);
CRM_ASSERT(clone_data->promote_notify == NULL);
}
common_free(rsc);
}
enum rsc_role_e
clone_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
clone_role = a_role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
return clone_role;
}
/*!
* \internal
* \brief Check whether a clone has an instance for every node
*
* \param[in] rsc Clone to check
* \param[in] data_set Cluster state
*/
bool
pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->clone_max == g_list_length(data_set->nodes)) {
return TRUE;
}
}
return FALSE;
}
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index c850b3b9e5..d855a6b743 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,295 +1,305 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <pe_status_private.h>
#define VARIANT_GROUP 1
#include "./variant.h"
gboolean
group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
group_variant_data_t *group_data = NULL;
const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
const char *clone_id = NULL;
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
group_data = calloc(1, sizeof(group_variant_data_t));
group_data->num_children = 0;
group_data->first_child = NULL;
group_data->last_child = NULL;
rsc->variant_opaque = group_data;
// We don't actually need the null checks but it speeds up the common case
if ((group_ordered == NULL)
|| (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) {
group_data->ordered = TRUE;
}
if ((group_colocated == NULL)
|| (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) {
group_data->colocated = TRUE;
}
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
for (xml_native_rsc = __xml_first_child_element(xml_obj); xml_native_rsc != NULL;
xml_native_rsc = __xml_next_element(xml_native_rsc)) {
if (crm_str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, TRUE)) {
pe_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
continue;
}
group_data->num_children++;
rsc->children = g_list_append(rsc->children, new_rsc);
if (group_data->first_child == NULL) {
group_data->first_child = new_rsc;
}
group_data->last_child = new_rsc;
pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
}
}
if (group_data->num_children == 0) {
pcmk__config_warn("Group %s does not have any children", rsc->id);
return TRUE; // Allow empty groups, children can be added later
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
return TRUE;
}
gboolean
group_active(pe_resource_t * rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
} else {
c_all = FALSE;
}
}
if (c_any == FALSE) {
return FALSE;
} else if (all && c_all == FALSE) {
return FALSE;
}
return TRUE;
}
static void
group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
GListPtr gIter = rsc->children;
char *child_text = crm_strdup_printf("%s ", pre_text);
status_print("%s<group id=\"%s\" ", pre_text, rsc->id);
status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s</group>\n", pre_text);
free(child_text);
}
void
group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = NULL;
GListPtr gIter = rsc->children;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
group_print_xml(rsc, pre_text, options, print_data);
return;
}
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
if (options & pe_print_html) {
status_print("\n<ul>\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
if (options & pe_print_brief) {
print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
} else {
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("<li>\n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
if (options & pe_print_html) {
status_print("</ul>\n");
}
free(child_text);
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show G_GNUC_UNUSED = va_arg(args, GListPtr);
GListPtr gIter = rsc->children;
char *count = crm_itoa(g_list_length(gIter));
- int rc = pe__name_and_nvpairs_xml(out, true, "group", 2
- , "id", rsc->id
- , "number_resources", count);
- free(count);
- CRM_ASSERT(rc == pcmk_rc_ok);
+ int rc = pcmk_rc_no_output;
+ gboolean printed_header = FALSE;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ if (!printed_header) {
+ printed_header = TRUE;
+
+ rc = pe__name_and_nvpairs_xml(out, true, "group", 2
+ , "id", rsc->id
+ , "number_resources", count);
+ free(count);
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
}
- pcmk__output_xml_pop_parent(out);
+ if (printed_header) {
+ pcmk__output_xml_pop_parent(out);
+ }
+
return rc;
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__group_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show G_GNUC_UNUSED = va_arg(args, GListPtr);
out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
if (options & pe_print_brief) {
pe__rscs_brief_output(out, rsc->children, options, TRUE);
} else {
for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "struct pe_resource_t *", "GListPtr")
int
pe__group_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_show G_GNUC_UNUSED = va_arg(args, GListPtr);
out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
if (options & pe_print_brief) {
pe__rscs_brief_output(out, rsc->children, options, TRUE);
} else {
for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
void
group_free(pe_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
child_rsc->fns->free(child_rsc);
}
pe_rsc_trace(rsc, "Freeing child list");
g_list_free(rsc->children);
common_free(rsc);
}
enum rsc_role_e
group_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
group_role = role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
return group_role;
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, Jun 25, 4:50 AM (1 d, 10 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1952207
Default Alt Text
(118 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment