Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am
index 6981586c59..7c1c090129 100644
--- a/include/crm/common/Makefile.am
+++ b/include/crm/common/Makefile.am
@@ -1,18 +1,18 @@
#
# Copyright 2004-2019 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
MAINTAINERCLEANFILES = Makefile.in
headerdir=$(pkgincludedir)/crm/common
header_HEADERS = xml.h ipc.h util.h iso8601.h mainloop.h logging.h results.h \
nvpair.h
noinst_HEADERS = cib_secrets.h ipcs.h internal.h alerts_internal.h \
iso8601_internal.h remote_internal.h xml_internal.h \
- ipc_internal.h output.h cmdline_internal.h
+ ipc_internal.h output.h cmdline_internal.h curses_internal.h
diff --git a/include/crm/common/curses_internal.h b/include/crm/common/curses_internal.h
new file mode 100644
index 0000000000..cb773ded35
--- /dev/null
+++ b/include/crm/common/curses_internal.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015-2019 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef CURSES_INTERNAL_H
+# define CURSES_INTERNAL_H
+
+# include <stdio.h>
+
+# include <config.h>
+# include <crm/common/logging.h>
+
+/*
+ * The man pages for both curses and ncurses suggest inclusion of "curses.h".
+ * We believe the following to be acceptable and portable.
+ */
+
+# if defined(HAVE_LIBNCURSES) || defined(HAVE_LIBCURSES)
+# if defined(HAVE_NCURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
+# include <ncurses.h>
+# define CURSES_ENABLED 1
+# elif defined(HAVE_NCURSES_NCURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
+# include <ncurses/ncurses.h>
+# define CURSES_ENABLED 1
+# elif defined(HAVE_CURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
+# include <curses.h>
+# define CURSES_ENABLED 1
+# elif defined(HAVE_CURSES_CURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
+# include <curses/curses.h>
+# define CURSES_ENABLED 1
+# else
+# define CURSES_ENABLED 0
+# endif
+# else
+# define CURSES_ENABLED 0
+# endif
+
+# if CURSES_ENABLED
+# define status_printw(fmt, args...) printw(fmt, ##args)
+# else
+# define status_printw(fmt, args...) \
+ crm_err("printw support requires ncurses to be available during configure"); \
+ do_crm_log(LOG_WARNING, fmt, ##args);
+# endif
+
+# define status_print(fmt, args...) \
+ if(options & pe_print_html) { \
+ FILE *stream = print_data; \
+ fprintf(stream, fmt, ##args); \
+ } else if(options & pe_print_ncurses) { \
+ status_printw(fmt, ##args); \
+ } else if(options & pe_print_printf) { \
+ FILE *stream = print_data; \
+ fprintf(stream, fmt, ##args); \
+ } else if(options & pe_print_xml) { \
+ FILE *stream = print_data; \
+ fprintf(stream, fmt, ##args); \
+ } else if(options & pe_print_log) { \
+ int log_level = *(int*)print_data; \
+ do_crm_log(log_level, fmt, ##args); \
+ }
+
+#endif
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index eeaf99ebc3..e12842fd84 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,1893 +1,1893 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <ctype.h>
+#include <crm/common/curses_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
-#include <unpack.h>
#include <crm/msg_xml.h>
#define PE__VARIANT_BUNDLE 1
#include "./variant.h"
static char *
next_ip(const char *last_ip)
{
unsigned int oct1 = 0;
unsigned int oct2 = 0;
unsigned int oct3 = 0;
unsigned int oct4 = 0;
int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
if (rc != 4) {
/*@ TODO check for IPv6 */
return NULL;
} else if (oct3 > 253) {
return NULL;
} else if (oct4 > 253) {
++oct3;
oct4 = 1;
} else {
++oct4;
}
return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
}
static int
allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
char *buffer, int max)
{
if(data->ip_range_start == NULL) {
return 0;
} else if(data->ip_last) {
replica->ipaddr = next_ip(data->ip_last);
} else {
replica->ipaddr = strdup(data->ip_range_start);
}
data->ip_last = replica->ipaddr;
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
if (data->add_host) {
return snprintf(buffer, max, " --add-host=%s-%d:%s",
data->prefix, replica->offset,
replica->ipaddr);
}
case PE__CONTAINER_AGENT_RKT:
return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
replica->ipaddr, data->prefix, replica->offset);
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return 0;
}
static xmlNode *
create_resource(const char *name, const char *provider, const char *kind)
{
xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
crm_xml_add(rsc, XML_ATTR_ID, name);
crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
crm_xml_add(rsc, XML_ATTR_TYPE, kind);
return rsc;
}
/*!
* \internal
* \brief Check whether cluster can manage resource inside container
*
* \param[in] data Container variant data
*
* \return TRUE if networking configuration is acceptable, FALSE otherwise
*
* \note The resource is manageable if an IP range or control port has been
* specified. If a control port is used without an IP range, replicas per
* host must be 1.
*/
static bool
valid_network(pe__bundle_variant_data_t *data)
{
if(data->ip_range_start) {
return TRUE;
}
if(data->control_port) {
if(data->nreplicas_per_host > 1) {
pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
data->nreplicas_per_host = 1;
/* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
}
return TRUE;
}
return FALSE;
}
static bool
create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
if(data->ip_range_start) {
char *id = NULL;
xmlNode *xml_ip = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
crm_xml_sanitize_id(id);
xml_ip = create_resource(id, "heartbeat", "IPaddr2");
free(id);
xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
if(data->host_network) {
crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
}
if(data->host_netmask) {
crm_create_nvpair_xml(xml_obj, NULL,
"cidr_netmask", data->host_netmask);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
}
xml_obj = create_xml_node(xml_ip, "operations");
crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->ip);
}
return TRUE;
}
static bool
create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_DOCKER_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
offset += snprintf(buffer+offset, max-offset, " --restart=no");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
if (data->container_network) {
#if 0
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
}
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
}
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
replica->ipaddr, port->source,
port->target);
} else if(safe_str_neq(data->container_network, "host")) {
// No need to do port mapping if net=host
offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
static bool
create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_PODMAN_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
// FIXME: (bandini 2018-08) podman has no restart policies
//offset += snprintf(buffer+offset, max-offset, " --restart=no");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
if (data->container_network) {
#if 0
// podman has no support for --link-local-ip
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
}
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
}
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
replica->ipaddr, port->source,
port->target);
} else if(safe_str_neq(data->container_network, "host")) {
// No need to do port mapping if net=host
offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent,
data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
static bool
create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
int volid = 0;
id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_RKT_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
if (data->container_network) {
#if 0
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
}
offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
}
offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
}
volid++;
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset,
" --port=%s:%s:%s", port->target,
replica->ipaddr, port->source);
} else {
offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
/*!
* \brief Ban a node from a resource's (and its children's) allowed nodes list
*
* \param[in,out] rsc Resource to modify
* \param[in] uname Name of node to ban
*/
static void
disallow_node(resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
((pe_node_t *) match)->weight = -INFINITY;
((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
}
if (rsc->children) {
GListPtr child;
for (child = rsc->children; child != NULL; child = child->next) {
disallow_node((resource_t *) (child->data), uname);
}
}
}
static bool
create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
GListPtr rsc_iter = NULL;
node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
const char *uname = NULL;
const char *connect_name = NULL;
if (remote_id_conflict(id, data_set)) {
free(id);
// The biggest hammer we have
id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
replica->child->id, replica->offset);
CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
}
/* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
* connection does not have its own IP is a magic string that we use to
* support nested remotes (i.e. a bundle running on a remote node).
*/
connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
if (data->control_port == NULL) {
port_s = crm_itoa(DEFAULT_REMOTE_PORT);
}
/* This sets replica->container as replica->remote's container, which is
* similar to what happens with guest nodes. This is how the PE knows
* that the bundle node is fenced by recovering the container, and that
* remote should be ordered relative to the container.
*/
xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
NULL, NULL, NULL,
connect_name, (data->control_port?
data->control_port : port_s));
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
* need something that will get freed during data set cleanup to use as
* the node ID and uname.
*/
free(id);
id = NULL;
uname = ID(xml_remote);
/* Ensure a node has been created for the guest (it may have already
* been, if it has a permanent node attribute), and ensure its weight is
* -INFINITY so no other resources can run on it.
*/
node = pe_find_node(data_set->nodes, uname);
if (node == NULL) {
node = pe_create_node(uname, uname, "remote", "-INFINITY",
data_set);
} else {
node->weight = -INFINITY;
}
node->rsc_discover_mode = pe_discover_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
* has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
* Unfortunately, a bundle has to be mostly unpacked before it's obvious
* what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
* likely place for this to happen is when common_unpack() calls
* resource_location() to set a default score in symmetric clusters.
* This adds a node *copy* to each resource's allowed nodes, and these
* copies will have the wrong weight.
*
* As a hacky workaround, fix those copies here.
*
* @TODO Possible alternative: ensure bundles are unpacked before other
* resources, so the weight is correct before any copies are made.
*/
for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
disallow_node((resource_t *) (rsc_iter->data), uname);
}
replica->node = node_copy(node);
replica->node->weight = 500;
replica->node->rsc_discover_mode = pe_discover_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
g_hash_table_destroy(replica->child->allowed_nodes);
}
replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
g_str_equal,
NULL, free);
g_hash_table_insert(replica->child->allowed_nodes,
(gpointer) replica->node->details->id,
node_copy(replica->node));
{
node_t *copy = node_copy(replica->node);
copy->weight = -INFINITY;
g_hash_table_insert(replica->child->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
}
if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
return FALSE;
}
g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
if (pe__is_guest_or_remote_node(node)) {
/* Remote resources can only run on 'normal' cluster node */
node->weight = -INFINITY;
}
}
replica->node->details->remote_rsc = replica->remote;
// Ensure pe__is_guest_node() functions correctly immediately
replica->remote->container = replica->container;
/* A bundle's #kind is closer to "container" (guest node) than the
* "remote" set by pe_create_node().
*/
g_hash_table_insert(replica->node->details->attrs,
strdup(CRM_ATTR_KIND), strdup("container"));
/* One effect of this is that setup_container() will add
* replica->remote to replica->container's fillers, which will make
* pe__resource_contains_guest_node() true for replica->container.
*
* replica->child does NOT get added to replica->container's fillers.
* The only noticeable effect if it did would be for its fail count to
* be taken into account when checking replica->container's migration
* threshold.
*/
parent->children = g_list_append(parent->children, replica->remote);
}
return TRUE;
}
static bool
create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
if (!create_docker_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
case PE__CONTAINER_AGENT_PODMAN:
if (!create_podman_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
case PE__CONTAINER_AGENT_RKT:
if (!create_rkt_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
default: // PE__CONTAINER_AGENT_UNKNOWN
return FALSE;
}
if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
return FALSE;
}
if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
return FALSE;
}
if (replica->child && replica->ipaddr) {
add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
}
if (replica->remote) {
/*
* Allow the remote connection resource to be allocated to a
* different node than the one on which the container is active.
*
* This makes it possible to have Pacemaker Remote nodes running
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
set_bit(replica->remote->flags, pe_rsc_allow_remote_remotes);
}
return TRUE;
}
static void
mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
const char *target, const char *options, uint32_t flags)
{
pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
mount->source = strdup(source);
mount->target = strdup(target);
if (options) {
mount->options = strdup(options);
}
mount->flags = flags;
bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
}
static void
mount_free(pe__bundle_mount_t *mount)
{
free(mount->source);
free(mount->target);
free(mount->options);
free(mount);
}
static void
port_free(pe__bundle_port_t *port)
{
free(port->source);
free(port->target);
free(port);
}
static pe__bundle_replica_t *
replica_for_remote(pe_resource_t *remote)
{
resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
return NULL;
}
while (top->parent != NULL) {
top = top->parent;
}
get_bundle_variant_data(bundle_data, top);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->remote == remote) {
return replica;
}
}
CRM_LOG_ASSERT(FALSE);
return NULL;
}
bool
pe__bundle_needs_remote_name(pe_resource_t *rsc)
{
const char *value;
if (rsc == NULL) {
return FALSE;
}
value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
if (safe_str_eq(value, "#uname") == FALSE) {
return FALSE;
} else {
const char *match[3][2] = {
{ XML_ATTR_TYPE, "remote" },
{ XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF },
{ XML_AGENT_ATTR_PROVIDER, "pacemaker" },
};
for (int m = 0; m < 3; m++) {
value = crm_element_value(rsc->xml, match[m][0]);
if (safe_str_neq(value, match[m][1])) {
return FALSE;
}
}
}
return TRUE;
}
const char *
pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
pe_node_t *node = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
return NULL;
}
replica = replica_for_remote(rsc);
if (replica == NULL) {
return NULL;
}
node = replica->container->allocated_to;
if (node == NULL) {
/* If it won't be running anywhere after the
* transition, go with where it's running now.
*/
node = pe__current_node(replica->container);
}
if(node == NULL) {
crm_trace("Cannot determine address for bundle connection %s", rsc->id);
return NULL;
}
crm_trace("Setting address for bundle connection %s to bundle host %s",
rsc->id, node->details->uname);
if(xml != NULL && field != NULL) {
crm_xml_add(xml, field, node->details->uname);
}
return node->details->uname;
}
gboolean
pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_resource = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
bool need_log_mount = TRUE;
CRM_ASSERT(rsc != NULL);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
rsc->variant_opaque = bundle_data;
bundle_data->prefix = strdup(rsc->id);
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
} else {
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
} else {
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
} else {
return FALSE;
}
}
}
value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, "masters");
}
bundle_data->promoted_max = crm_parse_int(value, "0");
if (bundle_data->promoted_max < 0) {
pe_err("%s for %s must be nonnegative integer, using 0",
XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
bundle_data->promoted_max = 0;
}
value = crm_element_value(xml_obj, "replicas");
if ((value == NULL) && bundle_data->promoted_max) {
bundle_data->nreplicas = bundle_data->promoted_max;
} else {
bundle_data->nreplicas = crm_parse_int(value, "1");
}
if (bundle_data->nreplicas < 1) {
pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
bundle_data->nreplicas = 1;
}
/*
* Communication between containers on the same host via the
* floating IPs only works if the container is started with:
* --userland-proxy=false --ip-masq=false
*/
value = crm_element_value(xml_obj, "replicas-per-host");
bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
if (bundle_data->nreplicas_per_host < 1) {
pe_err("'replicas-per-host' for %s must be positive integer, using 1",
rsc->id);
bundle_data->nreplicas_per_host = 1;
}
if (bundle_data->nreplicas_per_host == 1) {
clear_bit(rsc->flags, pe_rsc_unique);
}
bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
bundle_data->image = crm_element_value_copy(xml_obj, "image");
bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
xml_obj = first_named_child(rsc->xml, "network");
if(xml_obj) {
bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
value = crm_element_value(xml_obj, "add-host");
if (check_boolean(value) == FALSE) {
bundle_data->add_host = TRUE;
} else {
crm_str_to_boolean(value, &bundle_data->add_host);
}
for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
xml_child = __xml_next_element(xml_child)) {
pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
port->source = crm_element_value_copy(xml_child, "port");
if(port->source == NULL) {
port->source = crm_element_value_copy(xml_child, "range");
} else {
port->target = crm_element_value_copy(xml_child, "internal-port");
}
if(port->source != NULL && strlen(port->source) > 0) {
if(port->target == NULL) {
port->target = strdup(port->source);
}
bundle_data->ports = g_list_append(bundle_data->ports, port);
} else {
pe_err("Invalid port directive %s", ID(xml_child));
port_free(port);
}
}
}
xml_obj = first_named_child(rsc->xml, "storage");
for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
xml_child = __xml_next_element(xml_child)) {
const char *source = crm_element_value(xml_child, "source-dir");
const char *target = crm_element_value(xml_child, "target-dir");
const char *options = crm_element_value(xml_child, "options");
int flags = pe__bundle_mount_none;
if (source == NULL) {
source = crm_element_value(xml_child, "source-dir-root");
set_bit(flags, pe__bundle_mount_subdir);
}
if (source && target) {
mount_add(bundle_data, source, target, options, flags);
if (strcmp(target, "/var/log") == 0) {
need_log_mount = FALSE;
}
} else {
pe_err("Invalid mount directive %s", ID(xml_child));
}
}
xml_obj = first_named_child(rsc->xml, "primitive");
if (xml_obj && valid_network(bundle_data)) {
char *value = NULL;
xmlNode *xml_set = NULL;
xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
/* @COMPAT We no longer use the <master> tag, but we need to keep it as
* part of the resource name, so that bundles don't restart in a rolling
* upgrade. (It also avoids needing to change regression tests.)
*/
crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
(bundle_data->promoted_max? "master"
: (const char *)xml_resource->name));
xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
value = crm_itoa(bundle_data->nreplicas);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_INCARNATION_MAX, value);
free(value);
value = crm_itoa(bundle_data->nreplicas_per_host);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_INCARNATION_NODEMAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
(bundle_data->nreplicas_per_host > 1)?
XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
if (bundle_data->promoted_max) {
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
value = crm_itoa(bundle_data->promoted_max);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_PROMOTED_MAX, value);
free(value);
}
//crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
add_node_copy(xml_resource, xml_obj);
} else if(xml_obj) {
pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
rsc->id, ID(xml_obj));
return FALSE;
}
if(xml_resource) {
int lpc = 0;
GListPtr childIter = NULL;
resource_t *new_rsc = NULL;
pe__bundle_port_t *port = NULL;
int offset = 0, max = 1024;
char *buffer = NULL;
if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", ID(rsc->xml));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
return FALSE;
}
bundle_data->child = new_rsc;
/* Currently, we always map the default authentication key location
* into the same location inside the container.
*
* Ideally, we would respect the host's PCMK_authkey_location, but:
* - it may be different on different nodes;
* - the actual connection will do extra checking to make sure the key
* file exists and is readable, that we can't do here on the DC
* - tools such as crm_resource and crm_simulate may not have the same
* environment variables as the cluster, causing operation digests to
* differ
*
* Always using the default location inside the container is fine,
* because we control the pacemaker_remote environment, and it avoids
* having to pass another environment variable to the container.
*
* @TODO A better solution may be to have only pacemaker_remote use the
* environment variable, and have the cluster nodes use a new
* cluster option for key location. This would introduce the limitation
* of the location being the same on all cluster nodes, but that's
* reasonable.
*/
mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
if (need_log_mount) {
mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
pe__bundle_mount_subdir);
}
port = calloc(1, sizeof(pe__bundle_port_t));
if(bundle_data->control_port) {
port->source = strdup(bundle_data->control_port);
} else {
/* If we wanted to respect PCMK_remote_port, we could use
* crm_default_remote_port() here and elsewhere in this file instead
* of DEFAULT_REMOTE_PORT.
*
* However, it gains nothing, since we control both the container
* environment and the connection resource parameters, and the user
* can use a different port if desired by setting control-port.
*/
port->source = crm_itoa(DEFAULT_REMOTE_PORT);
}
port->target = strdup(port->source);
bundle_data->ports = g_list_append(bundle_data->ports, port);
buffer = calloc(1, max+1);
for (childIter = bundle_data->child->children; childIter != NULL;
childIter = childIter->next) {
pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
replica->child = childIter->data;
replica->child->exclusive_discover = TRUE;
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
if (is_set(replica->child->flags, pe_rsc_notify)) {
set_bit(bundle_data->child->flags, pe_rsc_notify);
}
offset += allocate_ip(bundle_data, replica, buffer+offset,
max-offset);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
XML_RSC_ATTR_TARGET);
}
bundle_data->container_host_options = buffer;
if (bundle_data->attribute_target) {
g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
strdup(bundle_data->attribute_target));
g_hash_table_replace(bundle_data->child->meta,
strdup(XML_RSC_ATTR_TARGET),
strdup(bundle_data->attribute_target));
}
} else {
// Just a naked container, no pacemaker-remote
int offset = 0, max = 1024;
char *buffer = calloc(1, max+1);
for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
replica->offset = lpc;
offset += allocate_ip(bundle_data, replica, buffer+offset,
max-offset);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
}
bundle_data->container_host_options = buffer;
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (!create_container(rsc, bundle_data, replica, data_set)) {
pe_err("Failed unpacking resource %s", rsc->id);
rsc->fns->free(rsc);
return FALSE;
}
}
if (bundle_data->child) {
rsc->children = g_list_append(rsc->children, bundle_data->child);
}
return TRUE;
}
static int
replica_resource_active(pe_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->fns->active(rsc, all);
if (child_active && !all) {
return TRUE;
} else if (!child_active && all) {
return FALSE;
}
}
return -1;
}
gboolean
pe__bundle_active(pe_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GListPtr iter = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
pe__bundle_replica_t *replica = iter->data;
int rsc_active;
rsc_active = replica_resource_active(replica->ip, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->child, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->container, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->remote, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
}
/* If "all" is TRUE, we've already checked that no resources were inactive,
* so return TRUE; if "all" is FALSE, we didn't find any active resources,
* so return FALSE.
*/
return all;
}
/*!
* \internal
* \brief Find the bundle replica corresponding to a given node
*
* \param[in] bundle Top-level bundle resource
* \param[in] node Node to search for
*
* \return Bundle replica if found, NULL otherwise
*/
pe_resource_t *
pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
get_bundle_variant_data(bundle_data, bundle);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica && replica->node);
if (replica->node->details == node->details) {
return replica->child;
}
}
return NULL;
}
static void
print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
if (rsc != NULL) {
if (options & pe_print_html) {
status_print("<li>");
}
rsc->fns->print(rsc, pre_text, options, print_data);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
static const char*
container_agent_str(enum pe__container_agent t)
{
switch (t) {
case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S;
case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return PE__CONTAINER_AGENT_UNKNOWN_S;
}
static void
bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_CHECK(rsc != NULL, return);
if (pre_text == NULL) {
pre_text = "";
}
child_text = crm_concat(pre_text, " ", ' ');
get_bundle_variant_data(bundle_data, rsc);
status_print("%s<bundle ", pre_text);
status_print("id=\"%s\" ", rsc->id);
status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
status_print("image=\"%s\" ", bundle_data->image);
status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print(">\n");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
status_print("%s <replica id=\"%d\">\n", pre_text, replica->offset);
print_rsc_in_list(replica->ip, child_text, options, print_data);
print_rsc_in_list(replica->child, child_text, options, print_data);
print_rsc_in_list(replica->container, child_text, options, print_data);
print_rsc_in_list(replica->remote, child_text, options, print_data);
status_print("%s </replica>\n", pre_text);
}
status_print("%s</bundle>\n", pre_text);
free(child_text);
}
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, int);
resource_t *rsc = va_arg(args, resource_t *);
pe__bundle_variant_data_t *bundle_data = NULL;
int rc = 0;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6
, "id", rsc->id
, "type", container_agent_str(bundle_data->agent_type)
, "image", bundle_data->image
, "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
, "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
, "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)));
CRM_ASSERT(rc == 0);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
char *id = crm_itoa(replica->offset);
CRM_ASSERT(replica);
rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
free(id);
CRM_ASSERT(rc == 0);
out->message(out, crm_element_name(replica->ip->xml), options, replica->ip);
out->message(out, crm_element_name(replica->child->xml), options, replica->child);
out->message(out, crm_element_name(replica->container->xml), options, replica->container);
out->message(out, crm_element_name(replica->remote->xml), options, replica->remote);
pcmk__output_xml_pop_parent(out); // replica
}
pcmk__output_xml_pop_parent(out); // bundle
return rc;
}
static void
pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
long options)
{
node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
pe__common_output_html(out, rsc, buffer, node, options);
}
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, int);
resource_t *rsc = va_arg(args, resource_t *);
pe__bundle_variant_data_t *bundle_data = NULL;
char buffer[LINE_MAX];
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
snprintf(buffer, LINE_MAX, "Container bundle%s: %s [%s]%s%s",
((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
pcmk__output_create_xml_node(out, "br");
out->begin_list(out, buffer, NULL, NULL);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
pcmk__output_xml_create_parent(out, "li");
if (is_set(options, pe_print_implicit)) {
if(g_list_length(bundle_data->replicas) > 1) {
snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset);
xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer);
}
pcmk__output_create_xml_node(out, "br");
out->begin_list(out, NULL, NULL, NULL);
out->message(out, crm_element_name(replica->ip->xml), options, replica->ip);
out->message(out, crm_element_name(replica->child->xml), options, replica->child);
out->message(out, crm_element_name(replica->container->xml), options, replica->container);
out->message(out, crm_element_name(replica->remote->xml), options, replica->remote);
out->end_list(out);
} else {
pe__bundle_replica_output_html(out, replica, options);
}
pcmk__output_xml_pop_parent(out);
}
out->end_list(out);
return 0;
}
static void
pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
const char *pre_text, long options)
{
node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
pe__common_output_text(out, rsc, pre_text, buffer, node, options);
}
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, int);
resource_t *rsc = va_arg(args, resource_t *);
const char *pre_text = va_arg(args, char *);
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
if (pre_text == NULL) {
pre_text = " ";
}
fprintf(out->dest, "%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (is_set(options, pe_print_implicit)) {
child_text = crm_strdup_printf(" %s", pre_text);
if(g_list_length(bundle_data->replicas) > 1) {
fprintf(out->dest, " %sReplica[%d]\n", pre_text, replica->offset);
}
out->message(out, crm_element_name(replica->ip->xml), options, replica->ip, child_text);
out->message(out, crm_element_name(replica->child->xml), options, replica->child, child_text);
out->message(out, crm_element_name(replica->container->xml), options, replica->container, child_text);
out->message(out, crm_element_name(replica->remote->xml), options, replica->remote, child_text);
} else {
child_text = crm_strdup_printf("%s ", pre_text);
pe__bundle_replica_output_text(out, replica, child_text, options);
}
free(child_text);
}
return 0;
}
static void
print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
long options, void *print_data)
{
node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
common_print(rsc, pre_text, buffer, node, options, print_data);
}
void
pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_CHECK(rsc != NULL, return);
if (options & pe_print_xml) {
bundle_print_xml(rsc, pre_text, options, print_data);
return;
}
get_bundle_variant_data(bundle_data, rsc);
if (pre_text == NULL) {
pre_text = " ";
}
status_print("%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (options & pe_print_html) {
status_print("<li>");
}
if (is_set(options, pe_print_implicit)) {
child_text = crm_strdup_printf(" %s", pre_text);
if(g_list_length(bundle_data->replicas) > 1) {
status_print(" %sReplica[%d]\n", pre_text, replica->offset);
}
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
print_rsc_in_list(replica->ip, child_text, options, print_data);
print_rsc_in_list(replica->container, child_text, options, print_data);
print_rsc_in_list(replica->remote, child_text, options, print_data);
print_rsc_in_list(replica->child, child_text, options, print_data);
if (options & pe_print_html) {
status_print("</ul>\n");
}
} else {
child_text = crm_strdup_printf("%s ", pre_text);
print_bundle_replica(replica, child_text, options, print_data);
}
free(child_text);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
if (options & pe_print_html) {
status_print("</ul>\n");
}
}
static void
free_bundle_replica(pe__bundle_replica_t *replica)
{
if (replica == NULL) {
return;
}
if (replica->node) {
free(replica->node);
replica->node = NULL;
}
if (replica->ip) {
free_xml(replica->ip->xml);
replica->ip->xml = NULL;
replica->ip->fns->free(replica->ip);
replica->ip = NULL;
}
if (replica->container) {
free_xml(replica->container->xml);
replica->container->xml = NULL;
replica->container->fns->free(replica->container);
replica->container = NULL;
}
if (replica->remote) {
free_xml(replica->remote->xml);
replica->remote->xml = NULL;
replica->remote->fns->free(replica->remote);
replica->remote = NULL;
}
free(replica->ipaddr);
free(replica);
}
void
pe__free_bundle(pe_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
free(bundle_data->prefix);
free(bundle_data->image);
free(bundle_data->control_port);
free(bundle_data->host_network);
free(bundle_data->host_netmask);
free(bundle_data->ip_range_start);
free(bundle_data->container_network);
free(bundle_data->launcher_options);
free(bundle_data->container_command);
free(bundle_data->container_host_options);
g_list_free_full(bundle_data->replicas,
(GDestroyNotify) free_bundle_replica);
g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
g_list_free(rsc->children);
if(bundle_data->child) {
free_xml(bundle_data->child->xml);
bundle_data->child->xml = NULL;
bundle_data->child->fns->free(bundle_data->child);
}
common_free(rsc);
}
enum rsc_role_e
pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
{
enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
return container_role;
}
/*!
* \brief Get the number of configured replicas in a bundle
*
* \param[in] rsc Bundle resource
*
* \return Number of configured replicas, or 0 on error
*/
int
pe_bundle_replicas(const resource_t *rsc)
{
if ((rsc == NULL) || (rsc->variant != pe_container)) {
return 0;
} else {
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
return bundle_data->nreplicas;
}
}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index bf385a1d50..416d642994 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1075 +1,1075 @@
/*
* Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
+#include <crm/common/curses_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
-#include <unpack.h>
#include <pe_status_private.h>
#include <crm/msg_xml.h>
#define VARIANT_CLONE 1
#include "./variant.h"
void
pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
"such as %s can be used only as anonymous clones",
rsc->id, standard, rid);
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
g_list_length(data_set->nodes));
}
}
resource_t *
find_clone_instance(resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
{
char *child_id = NULL;
resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
child_base = ID(clone_data->xml_obj_child);
child_id = crm_concat(child_base, sub_id, ':');
child = pe_find_resource(rsc->children, child_id);
free(child_id);
return child;
}
pe_resource_t *
pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
if (clone_data->total_clones >= clone_data->clone_max) {
// If we've already used all available instances, this is an orphan
as_orphan = TRUE;
}
// Allocate instance numbers in numerical order (starting at 0)
inc_num = crm_itoa(clone_data->total_clones);
inc_max = crm_itoa(clone_data->clone_max);
child_copy = copy_xml(clone_data->xml_obj_child);
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
child_rsc = NULL;
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
CRM_ASSERT(child_rsc);
clone_data->total_clones += 1;
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
set_bit_recursive(child_rsc, pe_rsc_orphan);
}
add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
print_resource(LOG_TRACE, "Added ", child_rsc, FALSE);
bail:
free(inc_num);
free(inc_max);
return child_rsc;
}
gboolean
clone_unpack(resource_t * rsc, pe_working_set_t * data_set)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
if (is_set(rsc->flags, pe_rsc_promotable)) {
const char *promoted_max = NULL;
const char *promoted_node_max = NULL;
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_MAX);
if (promoted_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_MAX);
}
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_NODEMAX);
if (promoted_node_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_NODEMAX);
}
clone_data->promoted_max = crm_parse_int(promoted_max, "1");
clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
if (max_clones) {
clone_data->clone_max = crm_parse_int(max_clones, "1");
} else if (g_list_length(data_set->nodes) > 0) {
clone_data->clone_max = g_list_length(data_set->nodes);
} else {
clone_data->clone_max = 1; /* Handy during crm_verify */
}
clone_data->ordered = crm_is_true(ordered);
if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
crm_config_err("Anonymous clones (%s) may only support one copy per node", rsc->id);
clone_data->clone_node_max = 1;
}
pe_rsc_trace(rsc, "Options for %s", rsc->id);
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
pe_rsc_trace(rsc, "\tClone is promotable: %s",
is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false");
// Clones may contain a single group or primitive
for (a_child = __xml_first_child(xml_obj); a_child != NULL;
a_child = __xml_next_element(a_child)) {
if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE)
|| crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) {
clone_data->xml_obj_child = a_child;
break;
}
}
if (clone_data->xml_obj_child == NULL) {
crm_config_err("%s has nothing to clone", rsc->id);
return FALSE;
}
/*
* Make clones ever so slightly sticky by default
*
* This helps ensure clone instances are not shuffled around the cluster
* for no benefit in situations when pre-allocation is not appropriate
*/
if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
}
/* This ensures that the globally-unique value always exists for children to
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
}
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
return TRUE;
}
gboolean
clone_active(resource_t * rsc, gboolean all)
{
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
return TRUE;
} else if (all && child_active == FALSE) {
return FALSE;
}
}
if (all) {
return TRUE;
} else {
return FALSE;
}
}
static void
short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
{
if(suffix == NULL) {
suffix = "";
}
if (list) {
if (options & pe_print_html) {
status_print("<li>");
}
status_print("%s%s: [%s ]%s", prefix, type, list, suffix);
if (options & pe_print_html) {
status_print("</li>\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
}
}
static void
pe__short_output_text(pcmk__output_t *out, char *list, const char *prefix, const char *type, const char *suffix, long options)
{
if(suffix == NULL) {
suffix = "";
}
if (list) {
fprintf(out->dest, "%s%s: [%s ]%s", prefix, type, list, suffix);
if (options & pe_print_suppres_nl) {
/* nothing */
} else {
fprintf(out->dest, "\n");
}
}
}
static void
pe__short_output_html(pcmk__output_t *out, char *list, const char *type, const char *suffix, long options)
{
char buffer[LINE_MAX];
if (list == NULL) {
return;
}
snprintf(buffer, LINE_MAX, " %s: [%s ]%s", type, list, suffix ? suffix : "");
out->list_item(out, NULL, buffer);
}
static const char *
configured_role_str(resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
target_role = g_hash_table_lookup(((resource_t*)rsc->children->data)->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
configured_role(resource_t * rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
return RSC_ROLE_UNKNOWN;
}
static void
clone_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = crm_concat(pre_text, " ", ' ');
const char *target_role = configured_role_str(rsc);
GListPtr gIter = rsc->children;
status_print("%s<clone ", pre_text);
status_print("id=\"%s\" ", rsc->id);
status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false");
status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print("failure_ignored=\"%s\" ",
is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s</clone>\n", pre_text);
free(child_text);
}
bool is_set_recursive(resource_t * rsc, long long flag, bool any)
{
GListPtr gIter;
bool all = !any;
if(is_set(rsc->flags, flag)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
if(is_set_recursive(gIter->data, flag, any)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
}
if(all) {
return TRUE;
}
return FALSE;
}
void
clone_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *list_text = NULL;
char *child_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
clone_print_xml(rsc, pre_text, options, print_data);
return;
}
get_clone_variant_data(clone_data, rsc);
child_text = crm_concat(pre_text, " ", ' ');
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n<ul>\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
resource_t *child_rsc = (resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = add_list_element(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
if (options & pe_print_html) {
status_print("<li>\n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
node_t *host = gIter->data;
list_text = add_list_element(list_text, host->details->uname);
active_instances++;
}
short_print(list_text, child_text, "Masters", NULL, options, print_data);
g_list_free(master_list);
free(list_text);
list_text = NULL;
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
node_t *host = gIter->data;
list_text = add_list_element(list_text, host->details->uname);
active_instances++;
}
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
} else {
short_print(list_text, child_text, "Slaves", NULL, options, print_data);
}
} else {
short_print(list_text, child_text, "Started", NULL, options, print_data);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list); stopped_list = NULL;
if (g_list_length(list) == 0) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
node_t *node = (node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = add_list_element(stopped_list, node->details->uname);
}
}
g_list_free(list);
}
short_print(stopped_list, child_text, state, NULL, options, print_data);
free(stopped_list);
}
if (options & pe_print_html) {
status_print("</ul>\n");
}
free(child_text);
}
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, long);
resource_t *rsc = va_arg(args, resource_t *);
GListPtr gIter = rsc->children;
int rc = pe__name_and_nvpairs_xml(out, true, "clone", 7
, "id", rsc->id
, "multi_state", BOOL2STR(is_set(rsc->flags, pe_rsc_promotable))
, "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
, "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
, "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
, "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
, "target_role", configured_role_str(rsc));
CRM_ASSERT(rc == 0);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
out->message(out, crm_element_name(child_rsc->xml), options, child_rsc);
}
pcmk__output_xml_pop_parent(out);
return rc;
}
int
pe__clone_html(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, long);
resource_t *rsc = va_arg(args, resource_t *);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
char buffer[LINE_MAX];
get_clone_variant_data(clone_data, rsc);
snprintf(buffer, LINE_MAX, "Clone Set: %s [%s]%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
out->begin_list(out, buffer, NULL, NULL);
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
resource_t *child_rsc = (resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = add_list_element(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
pcmk__output_xml_create_parent(out, "li");
out->message(out, crm_element_name(child_rsc->xml), options, child_rsc);
pcmk__output_xml_pop_parent(out);
}
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
node_t *host = gIter->data;
list_text = add_list_element(list_text, host->details->uname);
active_instances++;
}
pe__short_output_html(out, list_text, "Masters", NULL, options);
g_list_free(master_list);
free(list_text);
list_text = NULL;
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
node_t *host = gIter->data;
list_text = add_list_element(list_text, host->details->uname);
active_instances++;
}
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
pe__short_output_html(out, list_text, "Slaves (target-role)", NULL, options);
} else {
pe__short_output_html(out, list_text, "Slaves", NULL, options);
}
} else {
pe__short_output_html(out, list_text, "Started", NULL, options);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (g_list_length(list) == 0) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
node_t *node = (node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = add_list_element(stopped_list, node->details->uname);
}
}
g_list_free(list);
}
pe__short_output_html(out, stopped_list, state, NULL, options);
free(stopped_list);
}
out->end_list(out);
return 0;
}
int
pe__clone_text(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, long);
resource_t *rsc = va_arg(args, resource_t *);
const char *pre_text = va_arg(args, char *);
char *list_text = NULL;
char *child_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
if (pre_text == NULL) {
pre_text = " ";
}
get_clone_variant_data(clone_data, rsc);
child_text = crm_concat(pre_text, " ", ' ');
fprintf(out->dest, "%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
resource_t *child_rsc = (resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = add_list_element(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
out->message(out, crm_element_name(child_rsc->xml), options, child_rsc, child_text);
}
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
node_t *host = gIter->data;
list_text = add_list_element(list_text, host->details->uname);
active_instances++;
}
pe__short_output_text(out, list_text, child_text, "Masters", NULL, options);
g_list_free(master_list);
free(list_text);
list_text = NULL;
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
node_t *host = gIter->data;
list_text = add_list_element(list_text, host->details->uname);
active_instances++;
}
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
pe__short_output_text(out, list_text, child_text, "Slaves (target-role)", NULL, options);
} else {
pe__short_output_text(out, list_text, child_text, "Slaves", NULL, options);
}
} else {
pe__short_output_text(out, list_text, child_text, "Started", NULL, options);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (g_list_length(list) == 0) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
node_t *node = (node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = add_list_element(stopped_list, node->details->uname);
}
}
g_list_free(list);
}
pe__short_output_text(out, stopped_list, child_text, state, NULL, options);
free(stopped_list);
}
free(child_text);
return 0;
}
void
clone_free(resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
free_xml(child_rsc->xml);
child_rsc->xml = NULL;
/* There could be a saved unexpanded xml */
free_xml(child_rsc->orig_xml);
child_rsc->orig_xml = NULL;
child_rsc->fns->free(child_rsc);
}
g_list_free(rsc->children);
if (clone_data) {
CRM_ASSERT(clone_data->demote_notify == NULL);
CRM_ASSERT(clone_data->stop_notify == NULL);
CRM_ASSERT(clone_data->start_notify == NULL);
CRM_ASSERT(clone_data->promote_notify == NULL);
}
common_free(rsc);
}
enum rsc_role_e
clone_resource_state(const resource_t * rsc, gboolean current)
{
enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
clone_role = a_role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
return clone_role;
}
/*!
* \internal
* \brief Check whether a clone has an instance for every node
*
* \param[in] rsc Clone to check
* \param[in] data_set Cluster state
*/
bool
pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->clone_max == g_list_length(data_set->nodes)) {
return TRUE;
}
}
return FALSE;
}
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index 146b653cb5..29e4a54370 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,221 +1,221 @@
/*
* Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
+#include <crm/common/curses_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
-#include <unpack.h>
#include <crm/msg_xml.h>
#define VARIANT_GROUP 1
#include "./variant.h"
gboolean
group_unpack(resource_t * rsc, pe_working_set_t * data_set)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
group_variant_data_t *group_data = NULL;
const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
const char *clone_id = NULL;
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
group_data = calloc(1, sizeof(group_variant_data_t));
group_data->num_children = 0;
group_data->first_child = NULL;
group_data->last_child = NULL;
rsc->variant_opaque = group_data;
group_data->ordered = TRUE;
group_data->colocated = TRUE;
if (group_ordered != NULL) {
crm_str_to_boolean(group_ordered, &(group_data->ordered));
}
if (group_colocated != NULL) {
crm_str_to_boolean(group_colocated, &(group_data->colocated));
}
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
for (xml_native_rsc = __xml_first_child(xml_obj); xml_native_rsc != NULL;
xml_native_rsc = __xml_next_element(xml_native_rsc)) {
if (crm_str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, TRUE)) {
resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
}
group_data->num_children++;
rsc->children = g_list_append(rsc->children, new_rsc);
if (group_data->first_child == NULL) {
group_data->first_child = new_rsc;
}
group_data->last_child = new_rsc;
print_resource(LOG_TRACE, "Added ", new_rsc, FALSE);
}
}
if (group_data->num_children == 0) {
#if 0
/* Bug #1287 */
crm_config_err("Group %s did not have any children", rsc->id);
return FALSE;
#else
crm_config_warn("Group %s did not have any children", rsc->id);
return TRUE;
#endif
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
return TRUE;
}
gboolean
group_active(resource_t * rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
} else {
c_all = FALSE;
}
}
if (c_any == FALSE) {
return FALSE;
} else if (all && c_all == FALSE) {
return FALSE;
}
return TRUE;
}
static void
group_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
{
GListPtr gIter = rsc->children;
char *child_text = crm_concat(pre_text, " ", ' ');
status_print("%s<group id=\"%s\" ", pre_text, rsc->id);
status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s</group>\n", pre_text);
free(child_text);
}
void
group_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = NULL;
GListPtr gIter = rsc->children;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
group_print_xml(rsc, pre_text, options, print_data);
return;
}
child_text = crm_concat(pre_text, " ", ' ');
status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
if (options & pe_print_html) {
status_print("\n<ul>\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
if (options & pe_print_brief) {
print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
} else {
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("<li>\n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
if (options & pe_print_html) {
status_print("</ul>\n");
}
free(child_text);
}
void
group_free(resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
child_rsc->fns->free(child_rsc);
}
pe_rsc_trace(rsc, "Freeing child list");
g_list_free(rsc->children);
common_free(rsc);
}
enum rsc_role_e
group_resource_state(const resource_t * rsc, gboolean current)
{
enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
group_role = role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
return group_role;
}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index d1cf86cf6c..85a91dfa98 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1617 +1,1617 @@
/*
* Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
+#include <crm/common/curses_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/complex.h>
#include <crm/pengine/internal.h>
-#include <unpack.h>
#include <crm/msg_xml.h>
#include <pe_status_private.h>
#define VARIANT_NATIVE 1
#include "./variant.h"
/*!
* \internal
* \brief Check whether a resource is active on multiple nodes
*/
static bool
is_multiply_active(pe_resource_t *rsc)
{
unsigned int count = 0;
if (rsc->variant == pe_native) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
void
native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
GListPtr gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
node_t *a_node = (node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (safe_str_eq(a_node->details->id, node->details->id)) {
return;
}
}
pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname,
is_set(rsc->flags, pe_rsc_managed)?"":"(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
if (rsc->variant == pe_native) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
}
if (rsc->variant == pe_native && node->details->maintenance) {
clear_bit(rsc->flags, pe_rsc_managed);
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
resource_t *p = rsc->parent;
pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
while(p && node->details->online) {
/* add without the additional location constraint */
p->running_on = g_list_append(p->running_on, node);
p = p->parent;
}
return;
}
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
case recovery_stop_only:
{
GHashTableIter gIter;
node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
rsc->allowed_nodes = node_hash_from_list(data_set->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -INFINITY;
}
}
break;
case recovery_stop_start:
break;
case recovery_block:
clear_bit(rsc->flags, pe_rsc_managed);
set_bit(rsc->flags, pe_rsc_block);
/* If the resource belongs to a group or bundle configured with
* multiple-active=block, block the entire entity.
*/
if (rsc->parent
&& (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
&& rsc->parent->recovery_type == recovery_block) {
GListPtr gIter = rsc->parent->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
clear_bit(child->flags, pe_rsc_managed);
set_bit(child->flags, pe_rsc_block);
}
}
break;
}
crm_debug("%s is active on multiple nodes including %s: %s",
rsc->id, node->details->uname,
recovery2text(rsc->recovery_type));
} else {
pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname);
}
if (rsc->parent != NULL) {
native_add_running(rsc->parent, node, data_set);
}
}
static void
recursive_clear_unique(pe_resource_t *rsc)
{
clear_bit(rsc->flags, pe_rsc_unique);
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
for (GList *child = rsc->children; child != NULL; child = child->next) {
recursive_clear_unique((pe_resource_t *) child->data);
}
}
gboolean
native_unpack(resource_t * rsc, pe_working_set_t * data_set)
{
resource_t *parent = uber_parent(rsc);
native_variant_data_t *native_data = NULL;
const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
native_data = calloc(1, sizeof(native_variant_data_t));
rsc->variant_opaque = native_data;
// Only some agent standards support unique and promotable clones
if (is_not_set(ra_caps, pcmk_ra_cap_unique)
&& is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
pe__force_anon(standard, parent, rsc->id, data_set);
/* Clear globally-unique on the parent and all its descendents unpacked
* so far (clearing the parent should make any future children unpacking
* correct). We have to clear this resource explicitly because it isn't
* hooked into the parent's children yet.
*/
recursive_clear_unique(parent);
recursive_clear_unique(rsc);
}
if (is_not_set(ra_caps, pcmk_ra_cap_promotable)
&& is_set(parent->flags, pe_rsc_promotable)) {
pe_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
rsc->id, standard);
return FALSE;
}
return TRUE;
}
static bool
rsc_is_on_node(resource_t *rsc, const node_t *node, int flags)
{
pe_rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, node->details->uname);
if (is_set(flags, pe_find_current) && rsc->running_on) {
for (GListPtr iter = rsc->running_on; iter; iter = iter->next) {
node_t *loc = (node_t *) iter->data;
if (loc->details == node->details) {
return TRUE;
}
}
} else if (is_set(flags, pe_find_inactive) && (rsc->running_on == NULL)) {
return TRUE;
} else if (is_not_set(flags, pe_find_current) && rsc->allocated_to
&& (rsc->allocated_to->details == node->details)) {
return TRUE;
}
return FALSE;
}
resource_t *
native_find_rsc(resource_t * rsc, const char *id, const node_t *on_node,
int flags)
{
bool match = FALSE;
resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
if (flags & pe_find_clone) {
const char *rid = ID(rsc->xml);
if (!pe_rsc_is_clone(uber_parent(rsc))) {
match = FALSE;
} else if (!strcmp(id, rsc->id) || safe_str_eq(id, rid)) {
match = TRUE;
}
} else if (!strcmp(id, rsc->id)) {
match = TRUE;
} else if (is_set(flags, pe_find_renamed)
&& rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
} else if (is_set(flags, pe_find_any)
|| (is_set(flags, pe_find_anon)
&& is_not_set(rsc->flags, pe_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
if (match && on_node) {
bool match_node = rsc_is_on_node(rsc, on_node, flags);
if (match_node == FALSE) {
match = FALSE;
}
}
if (match) {
return rsc;
}
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
result = rsc->fns->find_rsc(child, id, on_node, flags);
if (result) {
return result;
}
}
return NULL;
}
char *
native_parameter(resource_t * rsc, node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set)
{
char *value_copy = NULL;
const char *value = NULL;
GHashTable *hash = NULL;
GHashTable *local_hash = NULL;
CRM_CHECK(rsc != NULL, return NULL);
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
if (create || g_hash_table_size(rsc->parameters) == 0) {
if (node != NULL) {
pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname);
} else {
pe_rsc_trace(rsc, "Creating default hash");
}
local_hash = crm_str_table_new();
get_rsc_attributes(local_hash, rsc, node, data_set);
hash = local_hash;
} else {
hash = rsc->parameters;
}
value = g_hash_table_lookup(hash, name);
if (value == NULL) {
/* try meta attributes instead */
value = g_hash_table_lookup(rsc->meta, name);
}
if (value != NULL) {
value_copy = strdup(value);
}
if (local_hash != NULL) {
g_hash_table_destroy(local_hash);
}
return value_copy;
}
gboolean
native_active(resource_t * rsc, gboolean all)
{
GListPtr gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
node_t *a_node = (node_t *) gIter->data;
if (a_node->details->unclean) {
crm_debug("Resource %s: node %s is unclean", rsc->id, a_node->details->uname);
return TRUE;
} else if (a_node->details->online == FALSE) {
crm_debug("Resource %s: node %s is offline", rsc->id, a_node->details->uname);
} else {
crm_debug("Resource %s active on %s", rsc->id, a_node->details->uname);
return TRUE;
}
}
return FALSE;
}
struct print_data_s {
long options;
void *print_data;
};
static void
native_print_attr(gpointer key, gpointer value, gpointer user_data)
{
long options = ((struct print_data_s *)user_data)->options;
void *print_data = ((struct print_data_s *)user_data)->print_data;
status_print("Option: %s = %s\n", (char *)key, (char *)value);
}
static void
pe__native_output_attr_html(gpointer key, gpointer value, gpointer user_data)
{
pcmk__output_t *out = (pcmk__output_t *)user_data;
char content[LINE_MAX];
snprintf(content, LINE_MAX, "Option: %s = %s<br/>", (char *)key, (char *)value);
xmlNodeAddContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)content);
}
static void
pe__native_output_attr_text(gpointer key, gpointer value, gpointer user_data)
{
pcmk__output_t *out = (pcmk__output_t *)user_data;
fprintf(out->dest, "Option: %s = %s\n", (char *)key, (char *)value);
}
static const char *
native_pending_state(resource_t * rsc)
{
const char *pending_state = NULL;
if (safe_str_eq(rsc->pending_task, CRMD_ACTION_START)) {
pending_state = "Starting";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STOP)) {
pending_state = "Stopping";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE)) {
pending_state = "Migrating";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED)) {
/* Work might be done in here. */
pending_state = "Migrating";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE)) {
pending_state = "Promoting";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE)) {
pending_state = "Demoting";
}
return pending_state;
}
static const char *
native_pending_task(resource_t * rsc)
{
const char *pending_task = NULL;
if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STATUS)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, uncomment this and the corresponding part of
* unpack.c:unpack_rsc_op().
*/
/*
} else if (safe_str_eq(rsc->pending_task, "probe")) {
pending_task = "Checking";
*/
}
return pending_task;
}
static enum rsc_role_e
native_displayable_role(resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
if ((role == RSC_ROLE_STARTED)
&& is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
role = RSC_ROLE_SLAVE;
}
return role;
}
static const char *
native_displayable_state(resource_t *rsc, long options)
{
const char *rsc_state = NULL;
if (options & pe_print_pending) {
rsc_state = native_pending_state(rsc);
}
if (rsc_state == NULL) {
rsc_state = role2text(native_displayable_role(rsc));
}
return rsc_state;
}
static void
native_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rsc_state = native_displayable_state(rsc, options);
const char *target_role = NULL;
/* resource information. */
status_print("%s<resource ", pre_text);
status_print("id=\"%s\" ", rsc_printable_id(rsc));
status_print("resource_agent=\"%s%s%s:%s\" ",
class,
prov ? "::" : "", prov ? prov : "", crm_element_value(rsc->xml, XML_ATTR_TYPE));
status_print("role=\"%s\" ", rsc_state);
if (rsc->meta) {
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print("active=\"%s\" ", rsc->fns->active(rsc, TRUE) ? "true" : "false");
status_print("orphaned=\"%s\" ", is_set(rsc->flags, pe_rsc_orphan) ? "true" : "false");
status_print("blocked=\"%s\" ", is_set(rsc->flags, pe_rsc_block) ? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print("failure_ignored=\"%s\" ",
is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
if (options & pe_print_pending) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
status_print("pending=\"%s\" ", pending_task);
}
}
if (options & pe_print_dev) {
status_print("provisional=\"%s\" ",
is_set(rsc->flags, pe_rsc_provisional) ? "true" : "false");
status_print("runnable=\"%s\" ", is_set(rsc->flags, pe_rsc_runnable) ? "true" : "false");
status_print("priority=\"%f\" ", (double)rsc->priority);
status_print("variant=\"%s\" ", crm_element_name(rsc->xml));
}
/* print out the nodes this resource is running on */
if (options & pe_print_rsconly) {
status_print("/>\n");
/* do nothing */
} else if (rsc->running_on != NULL) {
GListPtr gIter = rsc->running_on;
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
status_print("%s <node name=\"%s\" id=\"%s\" cached=\"%s\"/>\n", pre_text,
node->details->uname, node->details->id,
node->details->online ? "false" : "true");
}
status_print("%s</resource>\n", pre_text);
} else {
status_print("/>\n");
}
}
/* making this inline rather than a macro prevents a coverity "unreachable"
* warning on the first usage
*/
static inline const char *
comma_if(int i)
{
return i? ", " : "";
}
void
pe__common_output_html(pcmk__output_t *out, resource_t * rsc,
const char *name, node_t *node, long options)
{
const char *desc = NULL;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *target_role = NULL;
enum rsc_role_e role = native_displayable_role(rsc);
int offset = 0;
int flagOffset = 0;
char buffer[LINE_MAX];
char buffer2[LINE_MAX*3];
char flagBuffer[LINE_MAX];
const char *color = NULL;
CRM_ASSERT(rsc->variant == pe_native);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
node = NULL;
}
pcmk__output_xml_create_parent(out, "font");
if (is_not_set(rsc->flags, pe_rsc_managed)) {
color = "yellow";
} else if (is_set(rsc->flags, pe_rsc_failed)) {
color = "red";
} else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
color = "red";
} else if (g_list_length(rsc->running_on) > 1) {
color = "orange";
} else if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
color = "yellow";
} else {
color = "green";
}
xmlSetProp(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)"color", (pcmkXmlStr)color);
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", name);
offset += snprintf(buffer + offset, LINE_MAX - offset, "\t(%s", class);
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s):\t", kind);
if(is_set(rsc->flags, pe_rsc_orphan)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " ORPHANED ");
}
if(role > RSC_ROLE_SLAVE && is_set(rsc->flags, pe_rsc_failed)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED %s", role2text(role));
} else if(is_set(rsc->flags, pe_rsc_failed)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED");
} else {
const char *rsc_state = native_displayable_state(rsc, options);
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_state);
}
if(node) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " %s", node->details->uname);
if (node->details->online == FALSE && node->details->unclean) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sUNCLEAN", comma_if(flagOffset));
}
}
if (options & pe_print_pending) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%s%s", comma_if(flagOffset), pending_task);
}
}
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
/* Ignore target role Started, as it is the default anyways
* (and would also allow a Master to be Master).
* Show if target role limits our abilities. */
if (target_role_e == RSC_ROLE_STOPPED) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sdisabled", comma_if(flagOffset));
rsc->cluster->disabled_resources++;
} else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
&& target_role_e == RSC_ROLE_SLAVE) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%starget-role:%s", comma_if(flagOffset), target_role);
rsc->cluster->disabled_resources++;
}
}
if (is_set(rsc->flags, pe_rsc_block)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sblocked", comma_if(flagOffset));
rsc->cluster->blocked_resources++;
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sunmanaged", comma_if(flagOffset));
}
if(is_set(rsc->flags, pe_rsc_failure_ignored)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sfailure ignored", comma_if(flagOffset));
}
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
}
CRM_LOG_ASSERT(offset > 0);
if(flagOffset > 0) {
snprintf(buffer2, LINE_MAX*3, "%s (%s)%s%s", buffer, flagBuffer, desc?" ":"", desc?desc:"");
} else {
snprintf(buffer2, LINE_MAX*3, "%s%s%s", buffer, desc?" ":"", desc?desc:"");
}
xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)buffer2);
pcmk__output_xml_pop_parent(out); // </font>
if ((options & pe_print_rsconly)) {
/* nothing */
} else if (g_list_length(rsc->running_on) > 1) {
GListPtr gIter = rsc->running_on;
out->begin_list(out, NULL, NULL, NULL);
for (; gIter != NULL; gIter = gIter->next) {
node_t *n = (node_t *) gIter->data;
out->list_item(out, NULL, n->details->uname);
}
out->end_list(out);
}
pcmk__output_create_xml_node(out, "br");
if (options & pe_print_details) {
g_hash_table_foreach(rsc->parameters, pe__native_output_attr_html, out);
}
if (options & pe_print_dev) {
GHashTableIter iter;
node_t *n = NULL;
snprintf(buffer, LINE_MAX, " \t(%s%svariant=%s, priority=%f)",
is_set(rsc->flags, pe_rsc_provisional) ? "provisional, " : "",
is_set(rsc->flags, pe_rsc_runnable) ? "" : "non-startable, ",
crm_element_name(rsc->xml), (double)rsc->priority);
xmlNodeAddContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)buffer);
xmlNodeAddContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)" \tAllowed Nodes");
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
snprintf(buffer, LINE_MAX, " \t * %s %d", n->details->uname, n->weight);
xmlNodeAddContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)buffer);
}
}
if (options & pe_print_max_details) {
GHashTableIter iter;
node_t *n = NULL;
xmlNodeAddContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr)" \t=== Allowed Nodes\n");
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
pe__output_node(n, FALSE, out);
}
}
}
void
pe__common_output_text(pcmk__output_t *out, resource_t * rsc, const char *pre_text,
const char *name, node_t *node, long options)
{
const char *desc = NULL;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *target_role = NULL;
enum rsc_role_e role = native_displayable_role(rsc);
int offset = 0;
int flagOffset = 0;
char buffer[LINE_MAX];
char flagBuffer[LINE_MAX];
CRM_ASSERT(rsc->variant == pe_native);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (pre_text == NULL) {
pre_text = " ";
}
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
node = NULL;
}
if(pre_text) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", pre_text);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", name);
offset += snprintf(buffer + offset, LINE_MAX - offset, "\t(%s", class);
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s):\t", kind);
if(is_set(rsc->flags, pe_rsc_orphan)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " ORPHANED ");
}
if(role > RSC_ROLE_SLAVE && is_set(rsc->flags, pe_rsc_failed)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED %s", role2text(role));
} else if(is_set(rsc->flags, pe_rsc_failed)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED");
} else {
const char *rsc_state = native_displayable_state(rsc, options);
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_state);
}
if(node) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " %s", node->details->uname);
if (node->details->online == FALSE && node->details->unclean) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sUNCLEAN", comma_if(flagOffset));
}
}
if (options & pe_print_pending) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%s%s", comma_if(flagOffset), pending_task);
}
}
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
/* Ignore target role Started, as it is the default anyways
* (and would also allow a Master to be Master).
* Show if target role limits our abilities. */
if (target_role_e == RSC_ROLE_STOPPED) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sdisabled", comma_if(flagOffset));
rsc->cluster->disabled_resources++;
} else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
&& target_role_e == RSC_ROLE_SLAVE) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%starget-role:%s", comma_if(flagOffset), target_role);
rsc->cluster->disabled_resources++;
}
}
if (is_set(rsc->flags, pe_rsc_block)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sblocked", comma_if(flagOffset));
rsc->cluster->blocked_resources++;
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sunmanaged", comma_if(flagOffset));
}
if(is_set(rsc->flags, pe_rsc_failure_ignored)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sfailure ignored", comma_if(flagOffset));
}
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
}
CRM_LOG_ASSERT(offset > 0);
if(flagOffset > 0) {
fprintf(out->dest, "%s (%s)%s%s", buffer, flagBuffer, desc?" ":"", desc?desc:"");
} else {
fprintf(out->dest, "%s%s%s", buffer, desc?" ":"", desc?desc:"");
}
if ((options & pe_print_rsconly)) {
/* nothing */
} else if (g_list_length(rsc->running_on) > 1) {
GListPtr gIter = rsc->running_on;
fprintf(out->dest, "[");
for (; gIter != NULL; gIter = gIter->next) {
node_t *n = (node_t *) gIter->data;
fprintf(out->dest, " %s", n->details->uname);
}
fprintf(out->dest, " ]");
if (options & pe_print_suppres_nl) {
/* nothing */
} else {
fprintf(out->dest, "\n");
}
}
if (options & pe_print_details) {
g_hash_table_foreach(rsc->parameters, pe__native_output_attr_text, out);
}
if (options & pe_print_dev) {
GHashTableIter iter;
node_t *n = NULL;
fprintf(out->dest, "%s\t(%s%svariant=%s, priority=%f)", pre_text,
is_set(rsc->flags, pe_rsc_provisional) ? "provisional, " : "",
is_set(rsc->flags, pe_rsc_runnable) ? "" : "non-startable, ",
crm_element_name(rsc->xml), (double)rsc->priority);
fprintf(out->dest, "%s\tAllowed Nodes", pre_text);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
fprintf(out->dest, "%s\t * %s %d", pre_text, n->details->uname, n->weight);
}
}
if (options & pe_print_max_details) {
GHashTableIter iter;
node_t *n = NULL;
fprintf(out->dest, "%s\t=== Allowed Nodes\n", pre_text);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
pe__output_node(n, FALSE, out);
}
}
}
void
common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
{
const char *desc = NULL;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *target_role = NULL;
enum rsc_role_e role = native_displayable_role(rsc);
int offset = 0;
int flagOffset = 0;
char buffer[LINE_MAX];
char flagBuffer[LINE_MAX];
CRM_ASSERT(rsc->variant == pe_native);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (pre_text == NULL && (options & pe_print_printf)) {
pre_text = " ";
}
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
}
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
node = NULL;
}
if (options & pe_print_html) {
if (is_not_set(rsc->flags, pe_rsc_managed)) {
status_print("<font color=\"yellow\">");
} else if (is_set(rsc->flags, pe_rsc_failed)) {
status_print("<font color=\"red\">");
} else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
status_print("<font color=\"red\">");
} else if (g_list_length(rsc->running_on) > 1) {
status_print("<font color=\"orange\">");
} else if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
status_print("<font color=\"yellow\">");
} else {
status_print("<font color=\"green\">");
}
}
if(pre_text) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", pre_text);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", name);
offset += snprintf(buffer + offset, LINE_MAX - offset, "\t(%s", class);
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s):\t", kind);
if(is_set(rsc->flags, pe_rsc_orphan)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " ORPHANED ");
}
if(role > RSC_ROLE_SLAVE && is_set(rsc->flags, pe_rsc_failed)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED %s", role2text(role));
} else if(is_set(rsc->flags, pe_rsc_failed)) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED");
} else {
const char *rsc_state = native_displayable_state(rsc, options);
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_state);
}
if(node) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " %s", node->details->uname);
if (node->details->online == FALSE && node->details->unclean) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sUNCLEAN", comma_if(flagOffset));
}
}
if (options & pe_print_pending) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%s%s", comma_if(flagOffset), pending_task);
}
}
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
/* Ignore target role Started, as it is the default anyways
* (and would also allow a Master to be Master).
* Show if target role limits our abilities. */
if (target_role_e == RSC_ROLE_STOPPED) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sdisabled", comma_if(flagOffset));
rsc->cluster->disabled_resources++;
} else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
&& target_role_e == RSC_ROLE_SLAVE) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%starget-role:%s", comma_if(flagOffset), target_role);
rsc->cluster->disabled_resources++;
}
}
if (is_set(rsc->flags, pe_rsc_block)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sblocked", comma_if(flagOffset));
rsc->cluster->blocked_resources++;
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sunmanaged", comma_if(flagOffset));
}
if(is_set(rsc->flags, pe_rsc_failure_ignored)) {
flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset,
"%sfailure ignored", comma_if(flagOffset));
}
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
}
CRM_LOG_ASSERT(offset > 0);
if(flagOffset > 0) {
status_print("%s (%s)%s%s", buffer, flagBuffer, desc?" ":"", desc?desc:"");
} else {
status_print("%s%s%s", buffer, desc?" ":"", desc?desc:"");
}
#if CURSES_ENABLED
if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) {
/* Done */
} else if (options & pe_print_ncurses) {
/* coverity[negative_returns] False positive */
move(-1, 0);
}
#endif
if (options & pe_print_html) {
status_print(" </font> ");
}
if ((options & pe_print_rsconly)) {
} else if (g_list_length(rsc->running_on) > 1) {
GListPtr gIter = rsc->running_on;
int counter = 0;
if (options & pe_print_html) {
status_print("<ul>\n");
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print("[");
}
for (; gIter != NULL; gIter = gIter->next) {
node_t *n = (node_t *) gIter->data;
counter++;
if (options & pe_print_html) {
status_print("<li>\n%s", n->details->uname);
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print(" %s", n->details->uname);
} else if ((options & pe_print_log)) {
status_print("\t%d : %s", counter, n->details->uname);
} else {
status_print("%s", n->details->uname);
}
if (options & pe_print_html) {
status_print("</li>\n");
}
}
if (options & pe_print_html) {
status_print("</ul>\n");
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print(" ]");
}
}
if (options & pe_print_html) {
status_print("<br/>\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
if (options & pe_print_details) {
struct print_data_s pdata;
pdata.options = options;
pdata.print_data = print_data;
g_hash_table_foreach(rsc->parameters, native_print_attr, &pdata);
}
if (options & pe_print_dev) {
GHashTableIter iter;
node_t *n = NULL;
status_print("%s\t(%s%svariant=%s, priority=%f)", pre_text,
is_set(rsc->flags, pe_rsc_provisional) ? "provisional, " : "",
is_set(rsc->flags, pe_rsc_runnable) ? "" : "non-startable, ",
crm_element_name(rsc->xml), (double)rsc->priority);
status_print("%s\tAllowed Nodes", pre_text);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
status_print("%s\t * %s %d", pre_text, n->details->uname, n->weight);
}
}
if (options & pe_print_max_details) {
GHashTableIter iter;
node_t *n = NULL;
status_print("%s\t=== Allowed Nodes\n", pre_text);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
print_node("\t", n, FALSE);
}
}
}
void
native_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
{
node_t *node = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
}
node = pe__current_node(rsc);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
}
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, int);
resource_t *rsc = va_arg(args, resource_t *);
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rsc_state = native_displayable_state(rsc, options);
long is_print_pending = options & pe_print_pending;
long is_print_dev = options & pe_print_dev;
char ra_name[LINE_MAX];
char *nodes_running_on = NULL;
char *priority = NULL;
int rc = 0;
CRM_ASSERT(rsc->variant == pe_native);
/* resource information. */
sprintf(ra_name, "%s%s%s:%s", class, prov ? "::" : "", prov ? prov : ""
, crm_element_value(rsc->xml, XML_ATTR_TYPE));
nodes_running_on = crm_itoa(g_list_length(rsc->running_on));
priority = crm_ftoa(rsc->priority);
rc = pe__name_and_nvpairs_xml(out, true, "resource", 16
, "id", rsc_printable_id(rsc)
, "resource_agent", ra_name
, "role", rsc_state
, "target_role", (rsc->meta ? g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE) : NULL)
, "active ", BOOL2STR(rsc->fns->active(rsc, TRUE))
, "orphaned", BOOL2STR(is_set(rsc->flags, pe_rsc_orphan))
, "blocked", BOOL2STR(is_set(rsc->flags, pe_rsc_block))
, "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
, "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
, "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
, "nodes_running_on", nodes_running_on
, "pending", (is_print_pending ? native_pending_task(rsc) : NULL)
, "provisional", (is_print_dev ? BOOL2STR(is_set(rsc->flags, pe_rsc_provisional)) : NULL)
, "runnable", (is_print_dev ? BOOL2STR(is_set(rsc->flags, pe_rsc_runnable)) : NULL)
, "priority", (is_print_dev ? priority : NULL)
, "variant", (is_print_dev ? crm_element_name(rsc->xml) : NULL));
free(priority);
free(nodes_running_on);
CRM_ASSERT(rc == 0);
if (rsc->running_on != NULL) {
GListPtr gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
rc = pe__name_and_nvpairs_xml(out, false, "node", 3
, "name", node->details->uname
, "id", node->details->id
, "cached", BOOL2STR(node->details->online));
CRM_ASSERT(rc == 0);
}
}
pcmk__output_xml_pop_parent(out);
return rc;
}
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, int);
resource_t *rsc = va_arg(args, resource_t *);
node_t *node = pe__current_node(rsc);
CRM_ASSERT(rsc->variant == pe_native);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, options);
return 0;
}
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
long options = va_arg(args, int);
resource_t *rsc = va_arg(args, resource_t *);
const char *pre_text = va_arg(args, char *);
node_t *node = pe__current_node(rsc);
CRM_ASSERT(rsc->variant == pe_native);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
pe__common_output_text(out, rsc, pre_text, rsc_printable_id(rsc), node, options);
return 0;
}
void
native_free(resource_t * rsc)
{
pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
native_resource_state(const resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
if (current) {
role = rsc->role;
}
pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
return role;
}
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current 0 = where known, 1 = running, 2 = running or pending
*
* \return If list contains only one node, that node
*/
pe_node_t *
native_location(const pe_resource_t *rsc, GList **list, int current)
{
node_t *one = NULL;
GListPtr result = NULL;
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
child->fns->location(child, &result, current);
}
} else if (current) {
if (rsc->running_on) {
result = g_list_copy(rsc->running_on);
}
if ((current == 2) && rsc->pending_node
&& !pe_find_node_id(result, rsc->pending_node->details->id)) {
result = g_list_append(result, rsc->pending_node);
}
} else if (current == FALSE && rsc->allocated_to) {
result = g_list_append(NULL, rsc->allocated_to);
}
if (result && (result->next == NULL)) {
one = result->data;
}
if (list) {
GListPtr gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
}
}
}
g_list_free(result);
return one;
}
static void
get_rscs_brief(GListPtr rsc_list, GHashTable * rsc_table, GHashTable * active_table)
{
GListPtr gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
int offset = 0;
char buffer[LINE_MAX];
int *rsc_counter = NULL;
int *active_counter = NULL;
if (rsc->variant != pe_native) {
continue;
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
CRM_LOG_ASSERT(offset > 0);
if (rsc_table) {
rsc_counter = g_hash_table_lookup(rsc_table, buffer);
if (rsc_counter == NULL) {
rsc_counter = calloc(1, sizeof(int));
*rsc_counter = 0;
g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
}
(*rsc_counter)++;
}
if (active_table) {
GListPtr gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
node_t *node = (node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE) {
continue;
}
node_table = g_hash_table_lookup(active_table, node->details->uname);
if (node_table == NULL) {
node_table = crm_str_table_new();
g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
}
active_counter = g_hash_table_lookup(node_table, buffer);
if (active_counter == NULL) {
active_counter = calloc(1, sizeof(int));
*active_counter = 0;
g_hash_table_insert(node_table, strdup(buffer), active_counter);
}
(*active_counter)++;
}
}
}
}
static void
destroy_node_table(gpointer data)
{
GHashTable *node_table = data;
if (node_table) {
g_hash_table_destroy(node_table);
}
}
void
print_rscs_brief(GListPtr rsc_list, const char *pre_text, long options,
void *print_data, gboolean print_all)
{
GHashTable *rsc_table = crm_str_table_new();
GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
free, destroy_node_table);
GHashTableIter hash_iter;
char *type = NULL;
int *rsc_counter = NULL;
get_rscs_brief(rsc_list, rsc_table, active_table);
g_hash_table_iter_init(&hash_iter, rsc_table);
while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (options & pe_print_rsconly) {
node_name = NULL;
}
if (options & pe_print_html) {
status_print("<li>\n");
}
if (print_all) {
status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0,
rsc_counter ? *rsc_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
} else {
status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
}
if (options & pe_print_html) {
status_print("</li>\n");
}
}
if (print_all && active_counter_all == 0) {
if (options & pe_print_html) {
status_print("<li>\n");
}
status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
if (options & pe_print_html) {
status_print("</li>\n");
}
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
}
void
pe__rscs_brief_output_text(pcmk__output_t *out, GListPtr rsc_list, const char *pre_text,
long options, gboolean print_all)
{
GHashTable *rsc_table = crm_str_table_new();
GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
free, destroy_node_table);
GHashTableIter hash_iter;
char *type = NULL;
int *rsc_counter = NULL;
get_rscs_brief(rsc_list, rsc_table, active_table);
g_hash_table_iter_init(&hash_iter, rsc_table);
while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (options & pe_print_rsconly) {
node_name = NULL;
}
if (print_all) {
fprintf(out->dest, "%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0,
rsc_counter ? *rsc_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
} else {
fprintf(out->dest, "%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
}
}
if (print_all && active_counter_all == 0) {
fprintf(out->dest, "%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
}
void
pe__rscs_brief_output_html(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all)
{
GHashTable *rsc_table = crm_str_table_new();
GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
free, destroy_node_table);
GHashTableIter hash_iter;
char *type = NULL;
int *rsc_counter = NULL;
get_rscs_brief(rsc_list, rsc_table, active_table);
g_hash_table_iter_init(&hash_iter, rsc_table);
while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
char buffer[LINE_MAX];
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (options & pe_print_rsconly) {
node_name = NULL;
}
if (print_all) {
snprintf(buffer, LINE_MAX, " %d/%d\t(%s):\tActive %s\n",
active_counter ? *active_counter : 0,
rsc_counter ? *rsc_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
} else {
snprintf(buffer, LINE_MAX, " %d\t(%s):\tActive %s\n",
active_counter ? *active_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
}
out->list_item(out, NULL, buffer);
}
if (print_all && active_counter_all == 0) {
snprintf(buffer, LINE_MAX, " %d/%d\t(%s):\tActive\n",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
out->list_item(out, NULL, buffer);
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
}
diff --git a/lib/pengine/unpack.h b/lib/pengine/unpack.h
index 3b7adea77a..6ee9f1f46d 100644
--- a/lib/pengine/unpack.h
+++ b/lib/pengine/unpack.h
@@ -1,108 +1,58 @@
/*
* Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PENGINE_UNPACK__H
# define PENGINE_UNPACK__H
extern gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set);
extern gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set);
extern gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set);
extern gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set);
extern gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set);
extern gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
extern gboolean unpack_lrm_resources(node_t * node, xmlNode * lrm_state,
pe_working_set_t * data_set);
extern gboolean add_node_attrs(xmlNode * attrs, node_t * node, gboolean overwrite,
pe_working_set_t * data_set);
extern gboolean determine_online_status(xmlNode * node_state, node_t * this_node,
pe_working_set_t * data_set);
-/*
- * The man pages for both curses and ncurses suggest inclusion of "curses.h".
- * We believe the following to be acceptable and portable.
- */
-
-# if defined(HAVE_LIBNCURSES) || defined(HAVE_LIBCURSES)
-# if defined(HAVE_NCURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
-# include <ncurses.h>
-# define CURSES_ENABLED 1
-# elif defined(HAVE_NCURSES_NCURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
-# include <ncurses/ncurses.h>
-# define CURSES_ENABLED 1
-# elif defined(HAVE_CURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
-# include <curses.h>
-# define CURSES_ENABLED 1
-# elif defined(HAVE_CURSES_CURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
-# include <curses/curses.h>
-# define CURSES_ENABLED 1
-# else
-# define CURSES_ENABLED 0
-# endif
-# else
-# define CURSES_ENABLED 0
-# endif
-
-# if CURSES_ENABLED
-# define status_printw(fmt, args...) printw(fmt, ##args)
-# else
-# define status_printw(fmt, args...) \
- crm_err("printw support requires ncurses to be available during configure"); \
- do_crm_log(LOG_WARNING, fmt, ##args);
-# endif
-
-# define status_print(fmt, args...) \
- if(options & pe_print_html) { \
- FILE *stream = print_data; \
- fprintf(stream, fmt, ##args); \
- } else if(options & pe_print_ncurses) { \
- status_printw(fmt, ##args); \
- } else if(options & pe_print_printf) { \
- FILE *stream = print_data; \
- fprintf(stream, fmt, ##args); \
- } else if(options & pe_print_xml) { \
- FILE *stream = print_data; \
- fprintf(stream, fmt, ##args); \
- } else if(options & pe_print_log) { \
- int log_level = *(int*)print_data; \
- do_crm_log(log_level, fmt, ##args); \
- }
-
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = 0x0001,
pe_wo_restart_type = 0x0002,
pe_wo_role_after = 0x0004,
pe_wo_poweroff = 0x0008,
pe_wo_require_all = 0x0010,
pe_wo_order_score = 0x0020,
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (is_not_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
set_bit(pe_wo, pe_wo_bit); \
} \
} while (0);
#endif
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 4b7b101d67..eeec852e3b 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,4425 +1,4425 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <signal.h>
#include <sys/utsname.h>
#include <crm/msg_xml.h>
#include <crm/services.h>
#include <crm/lrmd.h>
+#include <crm/common/curses_internal.h>
#include <crm/common/internal.h> /* crm_ends_with_ext */
#include <crm/common/ipc.h>
#include <crm/common/iso8601_internal.h>
#include <crm/common/mainloop.h>
#include <crm/common/util.h>
#include <crm/common/xml.h>
#include <crm/cib/internal.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
-#include <../lib/pengine/unpack.h>
#include <pacemaker-internal.h>
#include <crm/stonith-ng.h>
static void clean_up_connections(void);
static crm_exit_t clean_up(crm_exit_t exit_code);
static void crm_diff_update(const char *event, xmlNode * msg);
static gboolean mon_refresh_display(gpointer user_data);
static int cib_connect(gboolean full);
static void mon_st_callback_event(stonith_t * st, stonith_event_t * e);
static void mon_st_callback_display(stonith_t * st, stonith_event_t * e);
static void kick_refresh(gboolean data_updated);
static char *get_node_display_name(node_t *node);
/*
* Definitions indicating which items to print
*/
#define mon_show_times (0x0001U)
#define mon_show_stack (0x0002U)
#define mon_show_dc (0x0004U)
#define mon_show_count (0x0008U)
#define mon_show_nodes (0x0010U)
#define mon_show_resources (0x0020U)
#define mon_show_attributes (0x0040U)
#define mon_show_failcounts (0x0080U)
#define mon_show_operations (0x0100U)
#define mon_show_tickets (0x0200U)
#define mon_show_bans (0x0400U)
#define mon_show_fence_history (0x0800U)
#define mon_show_headers (mon_show_times | mon_show_stack | mon_show_dc \
| mon_show_count)
#define mon_show_default (mon_show_headers | mon_show_nodes \
| mon_show_resources)
#define mon_show_all (mon_show_default | mon_show_attributes \
| mon_show_failcounts | mon_show_operations \
| mon_show_tickets | mon_show_bans \
| mon_show_fence_history)
static unsigned int show = mon_show_default;
/*
* Definitions indicating how to output
*/
enum mon_output_format_e {
mon_output_none,
mon_output_monitor,
mon_output_plain,
mon_output_console,
mon_output_xml,
mon_output_html,
mon_output_cgi
} output_format = mon_output_console;
static char *output_filename = NULL; /* if sending output to a file, its name */
/* other globals */
static char *pid_file = NULL;
static gboolean group_by_node = FALSE;
static gboolean inactive_resources = FALSE;
static int reconnect_msec = 5000;
static gboolean daemonize = FALSE;
static GMainLoop *mainloop = NULL;
static guint timer_id = 0;
static mainloop_timer_t *refresh_timer = NULL;
static pe_working_set_t *mon_data_set = NULL;
static GList *attr_list = NULL;
static const char *external_agent = NULL;
static const char *external_recipient = NULL;
static cib_t *cib = NULL;
static stonith_t *st = NULL;
static xmlNode *current_cib = NULL;
static gboolean one_shot = FALSE;
static gboolean has_warnings = FALSE;
static gboolean print_timing = FALSE;
static gboolean watch_fencing = FALSE;
static gboolean fence_history = FALSE;
static gboolean fence_full_history = FALSE;
static gboolean fence_connect = FALSE;
static int fence_history_level = 1;
static gboolean print_brief = FALSE;
static gboolean print_pending = TRUE;
static gboolean print_clone_detail = FALSE;
#if CURSES_ENABLED
static gboolean curses_console_initialized = FALSE;
#endif
/* FIXME allow, detect, and correctly interpret glob pattern or regex? */
const char *print_neg_location_prefix = "";
/* Never display node attributes whose name starts with one of these prefixes */
#define FILTER_STR { CRM_FAIL_COUNT_PREFIX, CRM_LAST_FAILURE_PREFIX, \
"shutdown", "terminate", "standby", "probe_complete", \
"#", NULL }
long last_refresh = 0;
crm_trigger_t *refresh_trigger = NULL;
/* Define exit codes for monitoring-compatible output
* For nagios plugins, the possibilities are
* OK=0, WARN=1, CRIT=2, and UNKNOWN=3
*/
#define MON_STATUS_WARN CRM_EX_ERROR
#define MON_STATUS_CRIT CRM_EX_INVALID_PARAM
#define MON_STATUS_UNKNOWN CRM_EX_UNIMPLEMENT_FEATURE
/* Convenience macro for prettifying output (e.g. "node" vs "nodes") */
#define s_if_plural(i) (((i) == 1)? "" : "s")
#if CURSES_ENABLED
# define print_dot() if (output_format == mon_output_console) { \
printw("."); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, "."); \
}
#else
# define print_dot() fprintf(stdout, ".");
#endif
#if CURSES_ENABLED
# define print_as(fmt, args...) if (output_format == mon_output_console) { \
printw(fmt, ##args); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, fmt, ##args); \
}
#else
# define print_as(fmt, args...) fprintf(stdout, fmt, ##args);
#endif
static void
blank_screen(void)
{
#if CURSES_ENABLED
int lpc = 0;
for (lpc = 0; lpc < LINES; lpc++) {
move(lpc, 0);
clrtoeol();
}
move(0, 0);
refresh();
#endif
}
static gboolean
mon_timer_popped(gpointer data)
{
int rc = pcmk_ok;
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
if (timer_id > 0) {
g_source_remove(timer_id);
timer_id = 0;
}
print_as("Reconnecting...\n");
rc = cib_connect(TRUE);
if (rc != pcmk_ok) {
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return FALSE;
}
static void
mon_cib_connection_destroy(gpointer user_data)
{
print_as("Connection to the cluster-daemons terminated\n");
if (refresh_timer != NULL) {
/* we'll trigger a refresh after reconnect */
mainloop_timer_stop(refresh_timer);
}
if (timer_id) {
/* we'll trigger a new reconnect-timeout at the end */
g_source_remove(timer_id);
timer_id = 0;
}
if (st) {
/* the client API won't properly reconnect notifications
* if they are still in the table - so remove them
*/
st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY);
if (st->state != stonith_disconnected) {
st->cmds->disconnect(st);
}
}
if (cib) {
cib->cmds->signoff(cib);
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return;
}
/*
* Mainloop signal handler.
*/
static void
mon_shutdown(int nsig)
{
clean_up(CRM_EX_OK);
}
#if CURSES_ENABLED
static sighandler_t ncurses_winch_handler;
static void
mon_winresize(int nsig)
{
static int not_done;
int lines = 0, cols = 0;
if (!not_done++) {
if (ncurses_winch_handler)
/* the original ncurses WINCH signal handler does the
* magic of retrieving the new window size;
* otherwise, we'd have to use ioctl or tgetent */
(*ncurses_winch_handler) (SIGWINCH);
getmaxyx(stdscr, lines, cols);
resizeterm(lines, cols);
mainloop_set_trigger(refresh_trigger);
}
not_done--;
}
#endif
static int
cib_connect(gboolean full)
{
int rc = pcmk_ok;
static gboolean need_pass = TRUE;
CRM_CHECK(cib != NULL, return -EINVAL);
if (getenv("CIB_passwd") != NULL) {
need_pass = FALSE;
}
if ((fence_connect) && (st == NULL)) {
st = stonith_api_new();
}
if ((fence_connect) && (st->state == stonith_disconnected)) {
rc = st->cmds->connect(st, crm_system_name, NULL);
if (rc == pcmk_ok) {
crm_trace("Setting up stonith callbacks");
if (watch_fencing) {
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
mon_st_callback_event);
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event);
} else {
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
mon_st_callback_display);
st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display);
}
}
}
if (cib->state != cib_connected_query && cib->state != cib_connected_command) {
crm_trace("Connecting to the CIB");
if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) {
need_pass = FALSE;
print_as("Password:");
}
rc = cib->cmds->signon(cib, crm_system_name, cib_query);
if (rc != pcmk_ok) {
return rc;
}
rc = cib->cmds->query(cib, NULL, &current_cib, cib_scope_local | cib_sync_call);
if (rc == pcmk_ok) {
mon_refresh_display(NULL);
}
if (rc == pcmk_ok && full) {
if (rc == pcmk_ok) {
rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy);
if (rc == -EPROTONOSUPPORT) {
print_as
("Notification setup not supported, won't be able to reconnect after failure");
if (output_format == mon_output_console) {
sleep(2);
}
rc = pcmk_ok;
}
}
if (rc == pcmk_ok) {
cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
}
if (rc != pcmk_ok) {
print_as("Notification setup failed, could not monitor CIB actions");
if (output_format == mon_output_console) {
sleep(2);
}
clean_up_connections();
}
}
}
return rc;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"quiet", 0, 0, 'Q', "\tDisplay only essential output" },
{"-spacer-", 1, 0, '-', "\nModes (mutually exclusive):"},
{"as-html", 1, 0, 'h', "\tWrite cluster status to the named html file"},
{"as-xml", 0, 0, 'X', "\t\tWrite cluster status as xml to stdout. This will enable one-shot mode."},
{"web-cgi", 0, 0, 'w', "\t\tWeb mode with output suitable for CGI (preselected when run as *.cgi)"},
{"simple-status", 0, 0, 's', "\tDisplay the cluster status once as a simple one line output (suitable for nagios)"},
{"-spacer-", 1, 0, '-', "\nDisplay Options:"},
{"group-by-node", 0, 0, 'n', "\tGroup resources by node" },
{"inactive", 0, 0, 'r', "\t\tDisplay inactive resources" },
{"failcounts", 0, 0, 'f', "\tDisplay resource fail counts"},
{"operations", 0, 0, 'o', "\tDisplay resource operation history" },
{"timing-details", 0, 0, 't', "\tDisplay resource operation history with timing details" },
{"tickets", 0, 0, 'c', "\t\tDisplay cluster tickets"},
{"watch-fencing", 0, 0, 'W', "\tListen for fencing events. For use with --external-agent"},
{"fence-history", 2, 0, 'm', "Show fence history\n"
"\t\t\t\t\t0=off, 1=failures and pending (default without option),\n"
"\t\t\t\t\t2=add successes (default without value for option),\n"
"\t\t\t\t\t3=show full history without reduction to most recent of each flavor"},
{"neg-locations", 2, 0, 'L', "Display negative location constraints [optionally filtered by id prefix]"},
{"show-node-attributes", 0, 0, 'A', "Display node attributes" },
{"hide-headers", 0, 0, 'D', "\tHide all headers" },
{"show-detail", 0, 0, 'R', "\tShow more details (node IDs, individual clone instances)" },
{"brief", 0, 0, 'b', "\t\tBrief output" },
{"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"interval", 1, 0, 'i', "\tUpdate frequency in seconds" },
{"one-shot", 0, 0, '1', "\t\tDisplay the cluster status once on the console and exit"},
{"disable-ncurses",0, 0, 'N', "\tDisable the use of ncurses", !CURSES_ENABLED},
{"daemonize", 0, 0, 'd', "\tRun in the background as a daemon"},
{"pid-file", 1, 0, 'p', "\t(Advanced) Daemon pid file location"},
{"external-agent", 1, 0, 'E', "A program to run when resource operations take place."},
{"external-recipient",1, 0, 'e', "A recipient for your program (assuming you want the program to send something to someone)."},
{"xml-file", 1, 0, 'x', NULL, pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "Display the cluster status on the console with updates as they occur:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display the cluster status on the console just once then exit:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon -1", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display your cluster status, group resources by node, and include inactive resources in the list:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --group-by-node --inactive", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it write the cluster status to an HTML file:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --as-html /path/to/docroot/filename.html", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon and export the current cluster status as xml to stdout, then exit.:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --as-xml", pcmk_option_example},
{NULL, 0, 0, 0}
};
/* *INDENT-ON* */
#if CURSES_ENABLED
static const char *
get_option_desc(char c)
{
int lpc;
for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
if (long_options[lpc].name[0] == '-')
continue;
if (long_options[lpc].val == c) {
static char *buf = NULL;
const char *rv;
char *nl;
/* chop off tabs and cut at newline */
free(buf); /* free string from last usage */
buf = strdup(long_options[lpc].desc);
rv = buf; /* make a copy to keep buf pointer unaltered
for freeing when we come by next time.
Like this the result stays valid until
the next call.
*/
while(isspace(rv[0])) {
rv++;
}
nl = strchr(rv, '\n');
if (nl) {
*nl = '\0';
}
return rv;
}
}
return NULL;
}
#define print_option_help(option, condition) \
print_as("%c %c: \t%s\n", ((condition)? '*': ' '), option, get_option_desc(option));
static gboolean
detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer unused)
{
int c;
gboolean config_mode = FALSE;
while (1) {
/* Get user input */
c = getchar();
switch (c) {
case 'm':
if (!fence_history_level) {
fence_history = TRUE;
fence_connect = TRUE;
if (st == NULL) {
mon_cib_connection_destroy(NULL);
}
}
show ^= mon_show_fence_history;
break;
case 'c':
show ^= mon_show_tickets;
break;
case 'f':
show ^= mon_show_failcounts;
break;
case 'n':
group_by_node = ! group_by_node;
break;
case 'o':
show ^= mon_show_operations;
if ((show & mon_show_operations) == 0) {
print_timing = 0;
}
break;
case 'r':
inactive_resources = ! inactive_resources;
break;
case 'R':
print_clone_detail = ! print_clone_detail;
break;
case 't':
print_timing = ! print_timing;
if (print_timing) {
show |= mon_show_operations;
}
break;
case 'A':
show ^= mon_show_attributes;
break;
case 'L':
show ^= mon_show_bans;
break;
case 'D':
/* If any header is shown, clear them all, otherwise set them all */
if (show & mon_show_headers) {
show &= ~mon_show_headers;
} else {
show |= mon_show_headers;
}
break;
case 'b':
print_brief = ! print_brief;
break;
case 'j':
print_pending = ! print_pending;
break;
case '?':
config_mode = TRUE;
break;
default:
goto refresh;
}
if (!config_mode)
goto refresh;
blank_screen();
print_as("Display option change mode\n");
print_as("\n");
print_option_help('c', show & mon_show_tickets);
print_option_help('f', show & mon_show_failcounts);
print_option_help('n', group_by_node);
print_option_help('o', show & mon_show_operations);
print_option_help('r', inactive_resources);
print_option_help('t', print_timing);
print_option_help('A', show & mon_show_attributes);
print_option_help('L', show & mon_show_bans);
print_option_help('D', (show & mon_show_headers) == 0);
print_option_help('R', print_clone_detail);
print_option_help('b', print_brief);
print_option_help('j', print_pending);
print_option_help('m', (show & mon_show_fence_history));
print_as("\n");
print_as("Toggle fields via field letter, type any other key to return");
}
refresh:
mon_refresh_display(NULL);
return TRUE;
}
#endif
// Basically crm_signal_handler(SIGCHLD, SIG_IGN) plus the SA_NOCLDWAIT flag
static void
avoid_zombies()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
if (sigemptyset(&sa.sa_mask) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno));
return;
}
sa.sa_handler = SIG_IGN;
sa.sa_flags = SA_RESTART|SA_NOCLDWAIT;
if (sigaction(SIGCHLD, &sa, NULL) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno));
}
}
int
main(int argc, char **argv)
{
int flag;
int argerr = 0;
int option_index = 0;
int rc = pcmk_ok;
pid_file = strdup("/tmp/ClusterMon.pid");
crm_log_cli_init("crm_mon");
crm_set_options(NULL, "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
// Avoid needing to wait for subprocesses forked for -E/--external-agent
avoid_zombies();
if (crm_ends_with_ext(argv[0], ".cgi") == TRUE) {
output_format = mon_output_cgi;
one_shot = TRUE;
}
/* to enable stonith-connection when called via some application like pcs
* set environment-variable FENCE_HISTORY to desired level
* so you don't have to modify this application
*/
/* fence_history_level = crm_atoi(getenv("FENCE_HISTORY"), "0"); */
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'Q':
show &= ~mon_show_times;
break;
case 'i':
reconnect_msec = crm_get_msec(optarg);
break;
case 'n':
group_by_node = TRUE;
break;
case 'r':
inactive_resources = TRUE;
break;
case 'W':
watch_fencing = TRUE;
fence_connect = TRUE;
break;
case 'm':
fence_history_level = crm_atoi(optarg, "2");
break;
case 'd':
daemonize = TRUE;
break;
case 't':
print_timing = TRUE;
show |= mon_show_operations;
break;
case 'o':
show |= mon_show_operations;
break;
case 'f':
show |= mon_show_failcounts;
break;
case 'A':
show |= mon_show_attributes;
break;
case 'L':
show |= mon_show_bans;
print_neg_location_prefix = optarg? optarg : "";
break;
case 'D':
show &= ~mon_show_headers;
break;
case 'b':
print_brief = TRUE;
break;
case 'j':
print_pending = TRUE;
break;
case 'R':
print_clone_detail = TRUE;
break;
case 'c':
show |= mon_show_tickets;
break;
case 'p':
free(pid_file);
if(optarg == NULL) {
crm_help(flag, CRM_EX_USAGE);
}
pid_file = strdup(optarg);
break;
case 'x':
if(optarg == NULL) {
crm_help(flag, CRM_EX_USAGE);
}
setenv("CIB_file", optarg, 1);
one_shot = TRUE;
break;
case 'h':
if(optarg == NULL) {
crm_help(flag, CRM_EX_USAGE);
}
argerr += (output_format != mon_output_console);
output_format = mon_output_html;
output_filename = strdup(optarg);
umask(S_IWGRP | S_IWOTH);
break;
case 'X':
argerr += (output_format != mon_output_console);
output_format = mon_output_xml;
one_shot = TRUE;
break;
case 'w':
/* do not allow argv[0] and argv[1...] redundancy */
argerr += (output_format != mon_output_console);
output_format = mon_output_cgi;
one_shot = TRUE;
break;
case 's':
argerr += (output_format != mon_output_console);
output_format = mon_output_monitor;
one_shot = TRUE;
break;
case 'E':
external_agent = optarg;
break;
case 'e':
external_recipient = optarg;
break;
case '1':
one_shot = TRUE;
break;
case 'N':
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
default:
printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
++argerr;
break;
}
}
if (watch_fencing) {
/* don't moan as fence_history_level == 1 is default */
fence_history_level = 0;
}
/* create the cib-object early to be able to do further
* decisions based on the cib-source
*/
cib = cib_new();
if (cib == NULL) {
rc = -EINVAL;
} else {
switch (cib->variant) {
case cib_native:
/* cib & fencing - everything available */
break;
case cib_file:
/* Don't try to connect to fencing as we
* either don't have a running cluster or
* the fencing-information would possibly
* not match the cib data from a file.
* As we don't expect cib-updates coming
* in enforce one-shot. */
fence_history_level = 0;
one_shot = TRUE;
break;
case cib_remote:
/* updates coming in but no fencing */
fence_history_level = 0;
break;
case cib_undefined:
case cib_database:
default:
/* something is odd */
rc = -EINVAL;
crm_err("Invalid cib-source");
break;
}
}
switch (fence_history_level) {
case 3:
fence_full_history = TRUE;
/* fall through to next lower level */
case 2:
show |= mon_show_fence_history;
/* fall through to next lower level */
case 1:
fence_history = TRUE;
fence_connect = TRUE;
break;
default:
break;
}
/* Extra sanity checks when in CGI mode */
if (output_format == mon_output_cgi) {
argerr += (optind < argc);
argerr += (output_filename != NULL);
argerr += ((cib) && (cib->variant == cib_file));
argerr += (external_agent != NULL);
argerr += (daemonize == TRUE); /* paranoia */
} else if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\n");
}
if (argerr) {
return clean_up(CRM_EX_USAGE);
}
/* XML output always prints everything */
if (output_format == mon_output_xml) {
show = mon_show_all;
print_timing = TRUE;
}
if (one_shot) {
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
} else if (daemonize) {
if ((output_format == mon_output_console) || (output_format == mon_output_plain)) {
output_format = mon_output_none;
}
crm_enable_stderr(FALSE);
if ((output_format != mon_output_html)
&& !external_agent) {
printf ("Looks like you forgot to specify one or more of: "
"--as-html, --external-agent\n");
return clean_up(CRM_EX_USAGE);
}
if (cib) {
/* to be on the safe side don't have cib-object around
* when we are forking
*/
cib_delete(cib);
cib = NULL;
crm_make_daemon(crm_system_name, TRUE, pid_file);
cib = cib_new();
if (cib == NULL) {
rc = -EINVAL;
}
/* otherwise assume we've got the same cib-object we've just destroyed
* in our parent
*/
}
} else if (output_format == mon_output_console) {
#if CURSES_ENABLED
initscr();
cbreak();
noecho();
crm_enable_stderr(FALSE);
curses_console_initialized = TRUE;
#else
one_shot = TRUE;
output_format = mon_output_plain;
printf("Defaulting to one-shot mode\n");
printf("You need to have curses available at compile time to enable console mode\n");
#endif
}
crm_info("Starting %s", crm_system_name);
if (cib) {
do {
if (!one_shot) {
print_as("Waiting until cluster is available on this node ...\n");
}
rc = cib_connect(!one_shot);
if (one_shot) {
break;
} else if (rc != pcmk_ok) {
sleep(reconnect_msec / 1000);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
} else {
if (output_format == mon_output_html) {
print_as("Writing html to %s ...\n", output_filename);
}
}
} while (rc == -ENOTCONN);
}
if (rc != pcmk_ok) {
if (output_format == mon_output_monitor) {
printf("CLUSTER CRIT: Connection to cluster failed: %s\n",
pcmk_strerror(rc));
return clean_up(MON_STATUS_CRIT);
} else {
if (rc == -ENOTCONN) {
print_as("\nError: cluster is not available on this node\n");
} else {
print_as("\nConnection to cluster failed: %s\n",
pcmk_strerror(rc));
}
}
if (output_format == mon_output_console) {
sleep(2);
}
return clean_up(crm_errno2exit(rc));
}
if (one_shot) {
return clean_up(CRM_EX_OK);
}
mainloop = g_main_loop_new(NULL, FALSE);
mainloop_add_signal(SIGTERM, mon_shutdown);
mainloop_add_signal(SIGINT, mon_shutdown);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
ncurses_winch_handler = crm_signal_handler(SIGWINCH, mon_winresize);
if (ncurses_winch_handler == SIG_DFL ||
ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR)
ncurses_winch_handler = NULL;
g_io_add_watch(g_io_channel_unix_new(STDIN_FILENO), G_IO_IN, detect_user_input, NULL);
}
#endif
refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
g_main_loop_run(mainloop);
g_main_loop_unref(mainloop);
crm_info("Exiting %s", crm_system_name);
return clean_up(CRM_EX_OK);
}
#define mon_warn(fmt...) do { \
if (!has_warnings) { \
print_as("CLUSTER WARN:"); \
} else { \
print_as(","); \
} \
print_as(fmt); \
has_warnings = TRUE; \
} while(0)
static int
count_resources(pe_working_set_t * data_set, resource_t * rsc)
{
int count = 0;
GListPtr gIter = NULL;
if (rsc == NULL) {
gIter = data_set->resources;
} else if (rsc->children) {
gIter = rsc->children;
} else {
return is_not_set(rsc->flags, pe_rsc_orphan);
}
for (; gIter != NULL; gIter = gIter->next) {
count += count_resources(data_set, gIter->data);
}
return count;
}
/*!
* \internal
* \brief Print one-line status suitable for use with monitoring software
*
* \param[in] data_set Working set of CIB state
* \param[in] history List of stonith actions
*
* \note This function's output (and the return code when the program exits)
* should conform to https://www.monitoring-plugins.org/doc/guidelines.html
*/
static void
print_simple_status(pe_working_set_t * data_set,
stonith_history_t *history)
{
GListPtr gIter = NULL;
int nodes_online = 0;
int nodes_standby = 0;
int nodes_maintenance = 0;
if (data_set->dc_node == NULL) {
mon_warn(" No DC");
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node->details->standby && node->details->online) {
nodes_standby++;
} else if (node->details->maintenance && node->details->online) {
nodes_maintenance++;
} else if (node->details->online) {
nodes_online++;
} else {
mon_warn(" offline node: %s", node->details->uname);
}
}
if (!has_warnings) {
int nresources = count_resources(data_set, NULL);
print_as("CLUSTER OK: %d node%s online", nodes_online, s_if_plural(nodes_online));
if (nodes_standby > 0) {
print_as(", %d standby node%s", nodes_standby, s_if_plural(nodes_standby));
}
if (nodes_maintenance > 0) {
print_as(", %d maintenance node%s", nodes_maintenance, s_if_plural(nodes_maintenance));
}
print_as(", %d resource%s configured", nresources, s_if_plural(nresources));
}
print_as("\n");
}
/*!
* \internal
* \brief Print a [name]=[value][units] pair, optionally using time string
*
* \param[in] stream File stream to display output to
* \param[in] name Name to display
* \param[in] value Value to display (or NULL to convert time instead)
* \param[in] units Units to display (or NULL for no units)
* \param[in] epoch_time Epoch time to convert if value is NULL
*/
static void
print_nvpair(FILE *stream, const char *name, const char *value,
const char *units, time_t epoch_time)
{
/* print name= */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" %s=", name);
break;
case mon_output_html:
case mon_output_cgi:
case mon_output_xml:
fprintf(stream, " %s=", name);
break;
default:
break;
}
/* If we have a value (and optionally units), print it */
if (value) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("%s%s", value, (units? units : ""));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "%s%s", value, (units? units : ""));
break;
case mon_output_xml:
fprintf(stream, "\"%s%s\"", value, (units? units : ""));
break;
default:
break;
}
/* Otherwise print user-friendly time string */
} else {
static char empty_str[] = "";
char *c, *date_str = asctime(localtime(&epoch_time));
for (c = (date_str != NULL) ? date_str : empty_str; *c != '\0'; ++c) {
if (*c == '\n') {
*c = '\0';
break;
}
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("'%s'", date_str);
break;
case mon_output_html:
case mon_output_cgi:
case mon_output_xml:
fprintf(stream, "\"%s\"", date_str);
break;
default:
break;
}
}
}
/*!
* \internal
* \brief Print whatever is needed to start a node section
*
* \param[in] stream File stream to display output to
* \param[in] node Node to print
*/
static void
print_node_start(FILE *stream, node_t *node)
{
char *node_name;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
node_name = get_node_display_name(node);
print_as("* Node %s:\n", node_name);
free(node_name);
break;
case mon_output_html:
case mon_output_cgi:
node_name = get_node_display_name(node);
fprintf(stream, " <h3>Node: %s</h3>\n <ul>\n", node_name);
free(node_name);
break;
case mon_output_xml:
fprintf(stream, " <node name=\"%s\">\n", node->details->uname);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever is needed to end a node section
*
* \param[in] stream File stream to display output to
*/
static void
print_node_end(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n");
break;
case mon_output_xml:
fprintf(stream, " </node>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print resources section heading appropriate to options
*
* \param[in] stream File stream to display output to
*/
static void
print_resources_heading(FILE *stream)
{
const char *heading;
if (group_by_node) {
/* Active resources have already been printed by node */
heading = (inactive_resources? "Inactive resources" : NULL);
} else if (inactive_resources) {
heading = "Full list of resources";
} else {
heading = "Active resources";
}
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n%s:\n\n", heading);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>%s</h2>\n", heading);
break;
case mon_output_xml:
fprintf(stream, " <resources>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever resource section closing is appropriate
*
* \param[in] stream File stream to display output to
*/
static void
print_resources_closing(FILE *stream, gboolean printed_heading)
{
const char *heading;
/* What type of resources we did or did not display */
if (group_by_node) {
heading = "inactive ";
} else if (inactive_resources) {
heading = "";
} else {
heading = "active ";
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (!printed_heading) {
print_as("\nNo %sresources\n\n", heading);
}
break;
case mon_output_html:
case mon_output_cgi:
if (!printed_heading) {
fprintf(stream, " <hr />\n <h2>No %sresources</h2>\n", heading);
}
break;
case mon_output_xml:
fprintf(stream, " %s\n",
(printed_heading? "</resources>" : "<resources/>"));
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever resource section(s) are appropriate
*
* \param[in] stream File stream to display output to
* \param[in] data_set Cluster state to display
* \param[in] print_opts Bitmask of pe_print_options
*/
static void
print_resources(FILE *stream, pe_working_set_t *data_set, int print_opts)
{
GListPtr rsc_iter;
const char *prefix = NULL;
gboolean printed_heading = FALSE;
gboolean brief_output = print_brief;
/* If we already showed active resources by node, and
* we're not showing inactive resources, we have nothing to do
*/
if (group_by_node && !inactive_resources) {
return;
}
/* XML uses an indent, and ignores brief option for resources */
if (output_format == mon_output_xml) {
prefix = " ";
brief_output = FALSE;
}
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (brief_output && !group_by_node) {
print_resources_heading(stream);
printed_heading = TRUE;
print_rscs_brief(data_set->resources, NULL, print_opts, stream,
inactive_resources);
}
/* For each resource, display it if appropriate */
for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
resource_t *rsc = (resource_t *) rsc_iter->data;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
} else if (group_by_node) {
if (is_active) {
continue;
}
/* Skip primitives already counted in a brief summary */
} else if (brief_output && (rsc->variant == pe_native)) {
continue;
/* Skip resources that aren't at least partially active,
* unless we're displaying inactive resources
*/
} else if (!partially_active && !inactive_resources) {
continue;
}
/* Print this resource */
if (printed_heading == FALSE) {
print_resources_heading(stream);
printed_heading = TRUE;
}
rsc->fns->print(rsc, prefix, print_opts, stream);
}
print_resources_closing(stream, printed_heading);
}
/*!
* \internal
* \brief Print heading for resource history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node that ran this resource
* \param[in] rsc Resource to print
* \param[in] rsc_id ID of resource to print
* \param[in] all Whether to print every resource or just failed ones
*/
static void
print_rsc_history_start(FILE *stream, pe_working_set_t *data_set, node_t *node,
resource_t *rsc, const char *rsc_id, gboolean all)
{
time_t last_failure = 0;
int failcount = rsc?
pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
NULL, data_set)
: 0;
if (!all && !failcount && (last_failure <= 0)) {
return;
}
/* Print resource ID */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" %s:", rsc_id);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <li>%s:", rsc_id);
break;
case mon_output_xml:
fprintf(stream, " <resource_history id=\"%s\"", rsc_id);
break;
default:
break;
}
/* If resource is an orphan, that's all we can say about it */
if (rsc == NULL) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" orphan");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " orphan");
break;
case mon_output_xml:
fprintf(stream, " orphan=\"true\"");
break;
default:
break;
}
/* If resource is not an orphan, print some details */
} else if (all || failcount || (last_failure > 0)) {
/* Print migration threshold */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" migration-threshold=%d", rsc->migration_threshold);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " migration-threshold=%d", rsc->migration_threshold);
break;
case mon_output_xml:
fprintf(stream, " orphan=\"false\" migration-threshold=\"%d\"",
rsc->migration_threshold);
break;
default:
break;
}
/* Print fail count if any */
if (failcount > 0) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" " CRM_FAIL_COUNT_PREFIX "=%d", failcount);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " " CRM_FAIL_COUNT_PREFIX "=%d", failcount);
break;
case mon_output_xml:
fprintf(stream, " " CRM_FAIL_COUNT_PREFIX "=\"%d\"",
failcount);
break;
default:
break;
}
}
/* Print last failure time if any */
if (last_failure > 0) {
print_nvpair(stream, CRM_LAST_FAILURE_PREFIX, NULL, NULL,
last_failure);
}
}
/* End the heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "\n <ul>\n");
break;
case mon_output_xml:
fprintf(stream, ">\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print closing for resource history
*
* \param[in] stream File stream to display output to
*/
static void
print_rsc_history_end(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n </li>\n");
break;
case mon_output_xml:
fprintf(stream, " </resource_history>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print operation history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node this operation is for
* \param[in] xml_op Root of XML tree describing this operation
* \param[in] task Task parsed from this operation's XML
* \param[in] interval_ms_s Interval parsed from this operation's XML
* \param[in] rc Return code parsed from this operation's XML
*/
static void
print_op_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
xmlNode *xml_op, const char *task, const char *interval_ms_s,
int rc)
{
const char *value = NULL;
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
/* Begin the operation description */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" + (%s) %s:", call, task);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <li>(%s) %s:", call, task);
break;
case mon_output_xml:
fprintf(stream, " <operation_history call=\"%s\" task=\"%s\"",
call, task);
break;
default:
break;
}
/* Add name=value pairs as appropriate */
if (interval_ms_s && safe_str_neq(interval_ms_s, "0")) {
print_nvpair(stream, "interval", interval_ms_s, "ms", 0);
}
if (print_timing) {
int int_value;
const char *attr;
attr = XML_RSC_OP_LAST_CHANGE;
value = crm_element_value(xml_op, attr);
if (value) {
int_value = crm_parse_int(value, NULL);
if (int_value > 0) {
print_nvpair(stream, attr, NULL, NULL, int_value);
}
}
attr = XML_RSC_OP_LAST_RUN;
value = crm_element_value(xml_op, attr);
if (value) {
int_value = crm_parse_int(value, NULL);
if (int_value > 0) {
print_nvpair(stream, attr, NULL, NULL, int_value);
}
}
attr = XML_RSC_OP_T_EXEC;
value = crm_element_value(xml_op, attr);
if (value) {
print_nvpair(stream, attr, value, "ms", 0);
}
attr = XML_RSC_OP_T_QUEUE;
value = crm_element_value(xml_op, attr);
if (value) {
print_nvpair(stream, attr, value, "ms", 0);
}
}
/* End the operation description */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" rc=%d (%s)\n", rc, services_ocf_exitcode_str(rc));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " rc=%d (%s)</li>\n", rc, services_ocf_exitcode_str(rc));
break;
case mon_output_xml:
fprintf(stream, " rc=\"%d\" rc_text=\"%s\" />\n", rc, services_ocf_exitcode_str(rc));
break;
default:
break;
}
}
/*!
* \internal
* \brief Print resource operation/failure history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node that ran this resource
* \param[in] rsc_entry Root of XML tree describing resource status
* \param[in] operations Whether to print operations or just failcounts
*/
static void
print_rsc_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
xmlNode *rsc_entry, gboolean operations)
{
GListPtr gIter = NULL;
GListPtr op_list = NULL;
gboolean printed = FALSE;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
xmlNode *rsc_op = NULL;
/* If we're not showing operations, just print the resource failure summary */
if (operations == FALSE) {
print_rsc_history_start(stream, data_set, node, rsc, rsc_id, FALSE);
print_rsc_history_end(stream);
return;
}
/* Create a list of this resource's operations */
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_append(op_list, rsc_op);
}
}
op_list = g_list_sort(op_list, sort_op_by_callid);
/* Print each operation */
for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *xml_op = (xmlNode *) gIter->data;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(xml_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
int rc = crm_parse_int(op_rc, "0");
/* Display 0-interval monitors as "probe" */
if (safe_str_eq(task, CRMD_ACTION_STATUS)
&& ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) {
task = "probe";
}
/* Ignore notifies and some probes */
if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (rc == 7))) {
continue;
}
/* If this is the first printed operation, print heading for resource */
if (printed == FALSE) {
printed = TRUE;
print_rsc_history_start(stream, data_set, node, rsc, rsc_id, TRUE);
}
/* Print the operation */
print_op_history(stream, data_set, node, xml_op, task, interval_ms_s,
rc);
}
/* Free the list we created (no need to free the individual items) */
g_list_free(op_list);
/* If we printed anything, close the resource */
if (printed) {
print_rsc_history_end(stream);
}
}
/*!
* \internal
* \brief Print node operation/failure history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node_state Root of XML tree describing node status
* \param[in] operations Whether to print operations or just failcounts
*/
static void
print_node_history(FILE *stream, pe_working_set_t *data_set,
xmlNode *node_state, gboolean operations)
{
node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
xmlNode *lrm_rsc = NULL;
xmlNode *rsc_entry = NULL;
if (node && node->details && node->details->online) {
print_node_start(stream, node);
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
/* Print history of each of the node's resources */
for (rsc_entry = __xml_first_child(lrm_rsc); rsc_entry != NULL;
rsc_entry = __xml_next(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
print_rsc_history(stream, data_set, node, rsc_entry, operations);
}
}
print_node_end(stream);
}
}
/*!
* \internal
* \brief Print extended information about an attribute if appropriate
*
* \param[in] data_set Working set of CIB state
*
* \return TRUE if extended information was printed, FALSE otherwise
* \note Currently, extended information is only supported for ping/pingd
* resources, for which a message will be printed if connectivity is lost
* or degraded.
*/
static gboolean
print_attr_msg(FILE *stream, node_t * node, GListPtr rsc_list, const char *attrname, const char *attrvalue)
{
GListPtr gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
if (rsc->children != NULL) {
if (print_attr_msg(stream, node, rsc->children, attrname, attrvalue)) {
return TRUE;
}
}
if (safe_str_eq(type, "ping") || safe_str_eq(type, "pingd")) {
const char *name = g_hash_table_lookup(rsc->parameters, "name");
if (name == NULL) {
name = "pingd";
}
/* To identify the resource with the attribute name. */
if (safe_str_eq(name, attrname)) {
int host_list_num = 0;
int expected_score = 0;
int value = crm_parse_int(attrvalue, "0");
const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
if(hosts) {
char **host_list = g_strsplit(hosts, " ", 0);
host_list_num = g_strv_length(host_list);
g_strfreev(host_list);
}
/* pingd multiplier is the same as the default value. */
expected_score = host_list_num * crm_parse_int(multiplier, "1");
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (value <= 0) {
print_as("\t: Connectivity is lost");
} else if (value < expected_score) {
print_as("\t: Connectivity is degraded (Expected=%d)", expected_score);
}
break;
case mon_output_html:
case mon_output_cgi:
if (value <= 0) {
fprintf(stream, " <b>(connectivity is lost)</b>");
} else if (value < expected_score) {
fprintf(stream, " <b>(connectivity is degraded -- expected %d)</b>",
expected_score);
}
break;
case mon_output_xml:
fprintf(stream, " expected=\"%d\"", expected_score);
break;
default:
break;
}
return TRUE;
}
}
}
return FALSE;
}
static int
compare_attribute(gconstpointer a, gconstpointer b)
{
int rc;
rc = strcmp((const char *)a, (const char *)b);
return rc;
}
static void
create_attr_list(gpointer name, gpointer value, gpointer data)
{
int i;
const char *filt_str[] = FILTER_STR;
CRM_CHECK(name != NULL, return);
/* filtering automatic attributes */
for (i = 0; filt_str[i] != NULL; i++) {
if (g_str_has_prefix(name, filt_str[i])) {
return;
}
}
attr_list = g_list_insert_sorted(attr_list, name, compare_attribute);
}
/* structure for passing multiple user data to g_list_foreach() */
struct mon_attr_data {
FILE *stream;
node_t *node;
};
static void
print_node_attribute(gpointer name, gpointer user_data)
{
const char *value = NULL;
struct mon_attr_data *data = (struct mon_attr_data *) user_data;
value = pe_node_attribute_raw(data->node, name);
/* Print attribute name and value */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" + %-32s\t: %-10s", (char *)name, value);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(data->stream, " <li>%s: %s",
(char *)name, value);
break;
case mon_output_xml:
fprintf(data->stream,
" <attribute name=\"%s\" value=\"%s\"",
(char *)name, value);
break;
default:
break;
}
/* Print extended information if appropriate */
print_attr_msg(data->stream, data->node, data->node->details->running_rsc,
name, value);
/* Close out the attribute */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(data->stream, "</li>\n");
break;
case mon_output_xml:
fprintf(data->stream, " />\n");
break;
default:
break;
}
}
static void
print_node_summary(FILE *stream, pe_working_set_t * data_set, gboolean operations)
{
xmlNode *node_state = NULL;
xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
/* Print heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (operations) {
print_as("\nOperations:\n");
} else {
print_as("\nMigration Summary:\n");
}
break;
case mon_output_html:
case mon_output_cgi:
if (operations) {
fprintf(stream, " <hr />\n <h2>Operations</h2>\n");
} else {
fprintf(stream, " <hr />\n <h2>Migration Summary</h2>\n");
}
break;
case mon_output_xml:
fprintf(stream, " <node_history>\n");
break;
default:
break;
}
/* Print each node in the CIB status */
for (node_state = __xml_first_child(cib_status); node_state != NULL;
node_state = __xml_next(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
print_node_history(stream, data_set, node_state, operations);
}
}
/* Close section */
switch (output_format) {
case mon_output_xml:
fprintf(stream, " </node_history>\n");
break;
default:
break;
}
}
static void
print_ticket(gpointer name, gpointer value, gpointer data)
{
ticket_t *ticket = (ticket_t *) value;
FILE *stream = (FILE *) data;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("* %s:\t%s%s", ticket->id,
(ticket->granted? "granted" : "revoked"),
(ticket->standby? " [standby]" : ""));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <li>%s: %s%s", ticket->id,
(ticket->granted? "granted" : "revoked"),
(ticket->standby? " [standby]" : ""));
break;
case mon_output_xml:
fprintf(stream, " <ticket id=\"%s\" status=\"%s\" standby=\"%s\"",
ticket->id, (ticket->granted? "granted" : "revoked"),
(ticket->standby? "true" : "false"));
break;
default:
break;
}
if (ticket->last_granted > -1) {
print_nvpair(stdout, "last-granted", NULL, NULL, ticket->last_granted);
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "</li>\n");
break;
case mon_output_xml:
fprintf(stream, " />\n");
break;
default:
break;
}
}
static void
print_cluster_tickets(FILE *stream, pe_working_set_t * data_set)
{
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nTickets:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>Tickets</h2>\n <ul>\n");
break;
case mon_output_xml:
fprintf(stream, " <tickets>\n");
break;
default:
break;
}
/* Print each ticket */
g_hash_table_foreach(data_set->tickets, print_ticket, stream);
/* Close section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n");
break;
case mon_output_xml:
fprintf(stream, " </tickets>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Return human-friendly string representing node name
*
* The returned string will be in the format
* uname[@hostUname] [(nodeID)]
* "@hostUname" will be printed if the node is a guest node.
* "(nodeID)" will be printed if the node ID is different from the node uname,
* and detailed output has been requested.
*
* \param[in] node Node to represent
* \return Newly allocated string with representation of node name
* \note It is the caller's responsibility to free the result with free().
*/
static char *
get_node_display_name(node_t *node)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
/* Host is displayed only if this is a guest node */
if (pe__is_guest_node(node)) {
node_t *host_node = pe__current_node(node->details->remote_rsc);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_clone_detail && safe_str_neq(node->details->uname, node->details->id)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
/*!
* \internal
* \brief Print a negative location constraint
*
* \param[in] stream File stream to display output to
* \param[in] node Node affected by constraint
* \param[in] location Constraint to print
*/
static void
print_ban(FILE *stream, pe_node_t *node, pe__location_t *location)
{
char *node_name = NULL;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
node_name = get_node_display_name(node);
print_as(" %s\tprevents %s from running %son %s\n",
location->id, location->rsc_lh->id,
((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""),
node_name);
break;
case mon_output_html:
case mon_output_cgi:
node_name = get_node_display_name(node);
fprintf(stream, " <li>%s prevents %s from running %son %s</li>\n",
location->id, location->rsc_lh->id,
((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""),
node_name);
break;
case mon_output_xml:
fprintf(stream,
" <ban id=\"%s\" resource=\"%s\" node=\"%s\" weight=\"%d\" master_only=\"%s\" />\n",
location->id, location->rsc_lh->id, node->details->uname, node->weight,
((location->role_filter == RSC_ROLE_MASTER)? "true" : "false"));
break;
default:
break;
}
free(node_name);
}
/*!
* \internal
* \brief Print section for negative location constraints
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set corresponding to CIB status to display
*/
static void
print_neg_locations(FILE *stream, pe_working_set_t *data_set)
{
GListPtr gIter, gIter2;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nNegative Location Constraints:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>Negative Location Constraints</h2>\n <ul>\n");
break;
case mon_output_xml:
fprintf(stream, " <bans>\n");
break;
default:
break;
}
/* Print each ban */
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
if (!g_str_has_prefix(location->id, print_neg_location_prefix))
continue;
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
node_t *node = (node_t *) gIter2->data;
if (node->weight < 0) {
print_ban(stream, node, location);
}
}
}
/* Close section */
switch (output_format) {
case mon_output_cgi:
case mon_output_html:
fprintf(stream, " </ul>\n");
break;
case mon_output_xml:
fprintf(stream, " </bans>\n");
break;
default:
break;
}
}
static void
crm_mon_get_parameters(resource_t *rsc, pe_working_set_t * data_set)
{
get_rsc_attributes(rsc->parameters, rsc, NULL, data_set);
crm_trace("Beekhof: unpacked params for %s (%d)", rsc->id, g_hash_table_size(rsc->parameters));
if(rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
}
}
/*!
* \internal
* \brief Print node attributes section
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_node_attributes(FILE *stream, pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nNode Attributes:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>Node Attributes</h2>\n");
break;
case mon_output_xml:
fprintf(stream, " <node_attributes>\n");
break;
default:
break;
}
/* Unpack all resource parameters (it would be more efficient to do this
* only when needed for the first time in print_attr_msg())
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
/* Display each node's attributes */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
struct mon_attr_data data;
data.stream = stream;
data.node = (node_t *) gIter->data;
if (data.node && data.node->details && data.node->details->online) {
print_node_start(stream, data.node);
g_hash_table_foreach(data.node->details->attrs, create_attr_list, NULL);
g_list_foreach(attr_list, print_node_attribute, &data);
g_list_free(attr_list);
attr_list = NULL;
print_node_end(stream);
}
}
/* Print section footer */
switch (output_format) {
case mon_output_xml:
fprintf(stream, " </node_attributes>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Return resource display options corresponding to command-line choices
*
* \return Bitmask of pe_print_options suitable for resource print functions
*/
static int
get_resource_display_options(void)
{
int print_opts;
/* Determine basic output format */
switch (output_format) {
case mon_output_console:
print_opts = pe_print_ncurses;
break;
case mon_output_html:
case mon_output_cgi:
print_opts = pe_print_html;
break;
case mon_output_xml:
print_opts = pe_print_xml;
break;
default:
print_opts = pe_print_printf;
break;
}
/* Add optional display elements */
if (print_pending) {
print_opts |= pe_print_pending;
}
if (print_clone_detail) {
print_opts |= pe_print_clone_details|pe_print_implicit;
}
if (!inactive_resources) {
print_opts |= pe_print_clone_active;
}
if (print_brief) {
print_opts |= pe_print_brief;
}
return print_opts;
}
/*!
* \internal
* \brief Print header for cluster summary if needed
*
* \param[in] stream File stream to display output to
*/
static void
print_cluster_summary_header(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <h2>Cluster Summary</h2>\n <p>\n");
break;
case mon_output_xml:
fprintf(stream, " <summary>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print footer for cluster summary if needed
*
* \param[in] stream File stream to display output to
*/
static void
print_cluster_summary_footer(FILE *stream)
{
switch (output_format) {
case mon_output_cgi:
case mon_output_html:
fprintf(stream, " </p>\n");
break;
case mon_output_xml:
fprintf(stream, " </summary>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print times the display was last updated and CIB last changed
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_times(FILE *stream, pe_working_set_t *data_set)
{
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
switch (output_format) {
case mon_output_plain:
case mon_output_console: {
const char *now_str = crm_now_string(NULL);
print_as("Last updated: %s", now_str ? now_str : "Could not determine current time");
print_as((user || client || origin)? "\n" : "\t\t");
print_as("Last change: %s", last_written ? last_written : "");
if (user) {
print_as(" by %s", user);
}
if (client) {
print_as(" via %s", client);
}
if (origin) {
print_as(" on %s", origin);
}
print_as("\n");
break;
}
case mon_output_html:
case mon_output_cgi: {
const char *now_str = crm_now_string(NULL);
fprintf(stream, " <b>Last updated:</b> %s<br/>\n",
now_str ? now_str : "Could not determine current time");
fprintf(stream, " <b>Last change:</b> %s", last_written ? last_written : "");
if (user) {
fprintf(stream, " by %s", user);
}
if (client) {
fprintf(stream, " via %s", client);
}
if (origin) {
fprintf(stream, " on %s", origin);
}
fprintf(stream, "<br/>\n");
break;
}
case mon_output_xml: {
const char *now_str = crm_now_string(NULL);
fprintf(stream, " <last_update time=\"%s\" />\n",
now_str ? now_str : "Could not determine current time");
fprintf(stream, " <last_change time=\"%s\" user=\"%s\" client=\"%s\" origin=\"%s\" />\n",
last_written ? last_written : "", user ? user : "",
client ? client : "", origin ? origin : "");
break;
}
default:
break;
}
}
/*!
* \internal
* \brief Print cluster stack
*
* \param[in] stream File stream to display output to
* \param[in] stack_s Stack name
*/
static void
print_cluster_stack(FILE *stream, const char *stack_s)
{
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Stack: %s\n", stack_s);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <b>Stack:</b> %s<br/>\n", stack_s);
break;
case mon_output_xml:
fprintf(stream, " <stack type=\"%s\" />\n", stack_s);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print current DC and its version
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_dc(FILE *stream, pe_working_set_t *data_set)
{
node_t *dc = data_set->dc_node;
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = dc? get_node_display_name(dc) : NULL;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Current DC: ");
if (dc) {
print_as("%s (version %s) - partition %s quorum\n",
dc_name, (dc_version_s? dc_version_s : "unknown"),
(crm_is_true(quorum) ? "with" : "WITHOUT"));
} else {
print_as("NONE\n");
}
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <b>Current DC:</b> ");
if (dc) {
fprintf(stream, "%s (version %s) - partition %s quorum",
dc_name, (dc_version_s? dc_version_s : "unknown"),
(crm_is_true(quorum)? "with" : "<font color=\"red\"><b>WITHOUT</b></font>"));
} else {
fprintf(stream, "<font color=\"red\"><b>NONE</b></font>");
}
fprintf(stream, "<br/>\n");
break;
case mon_output_xml:
fprintf(stream, " <current_dc ");
if (dc) {
fprintf(stream,
"present=\"true\" version=\"%s\" name=\"%s\" id=\"%s\" with_quorum=\"%s\"",
(dc_version_s? dc_version_s : ""), dc->details->uname, dc->details->id,
(crm_is_true(quorum) ? "true" : "false"));
} else {
fprintf(stream, "present=\"false\"");
}
fprintf(stream, " />\n");
break;
default:
break;
}
free(dc_name);
}
/*!
* \internal
* \brief Print counts of configured nodes and resources
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
* \param[in] stack_s Stack name
*/
static void
print_cluster_counts(FILE *stream, pe_working_set_t *data_set, const char *stack_s)
{
int nnodes = g_list_length(data_set->nodes);
int nresources = count_resources(data_set, NULL);
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n%d node%s configured\n", nnodes, s_if_plural(nnodes));
print_as("%d resource%s configured",
nresources, s_if_plural(nresources));
if(data_set->disabled_resources || data_set->blocked_resources) {
print_as(" (");
if (data_set->disabled_resources) {
print_as("%d DISABLED", data_set->disabled_resources);
}
if (data_set->disabled_resources && data_set->blocked_resources) {
print_as(", ");
}
if (data_set->blocked_resources) {
print_as("%d BLOCKED from starting due to failure",
data_set->blocked_resources);
}
print_as(")");
}
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %d node%s configured<br/>\n",
nnodes, s_if_plural(nnodes));
fprintf(stream, " %d resource%s configured",
nresources, s_if_plural(nresources));
if (data_set->disabled_resources || data_set->blocked_resources) {
fprintf(stream, " (");
if (data_set->disabled_resources) {
fprintf(stream, "%d <strong>DISABLED</strong>",
data_set->disabled_resources);
}
if (data_set->disabled_resources && data_set->blocked_resources) {
fprintf(stream, ", ");
}
if (data_set->blocked_resources) {
fprintf(stream,
"%d <strong>BLOCKED</strong> from starting due to failure",
data_set->blocked_resources);
}
fprintf(stream, ")");
}
fprintf(stream, "<br/>\n");
break;
case mon_output_xml:
fprintf(stream,
" <nodes_configured number=\"%d\" />\n",
g_list_length(data_set->nodes));
fprintf(stream,
" <resources_configured number=\"%d\" disabled=\"%d\" blocked=\"%d\" />\n",
count_resources(data_set, NULL),
data_set->disabled_resources, data_set->blocked_resources);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster-wide options
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*
* \note Currently this is only implemented for HTML and XML output, and
* prints only a few options. If there is demand, more could be added.
*/
static void
print_cluster_options(FILE *stream, pe_working_set_t *data_set)
{
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
print_as("\n *** Resource management is DISABLED ***");
print_as("\n The cluster will not attempt to start, stop or recover services");
print_as("\n");
}
break;
case mon_output_html:
fprintf(stream, " </p>\n <h3>Config Options</h3>\n");
fprintf(stream, " <table>\n");
fprintf(stream, " <tr><th>STONITH of failed nodes</th><td>%s</td></tr>\n",
is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
fprintf(stream, " <tr><th>Cluster is</th><td>%ssymmetric</td></tr>\n",
is_set(data_set->flags, pe_flag_symmetric_cluster)? "" : "a");
fprintf(stream, " <tr><th>No Quorum Policy</th><td>");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "Freeze resources");
break;
case no_quorum_stop:
fprintf(stream, "Stop ALL resources");
break;
case no_quorum_ignore:
fprintf(stream, "Ignore");
break;
case no_quorum_suicide:
fprintf(stream, "Suicide");
break;
}
fprintf(stream, "</td></tr>\n");
fprintf(stream, " <tr><th>Resource management</th><td>");
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
fprintf(stream, "<strong>DISABLED</strong> (the cluster will "
"not attempt to start, stop or recover services)");
} else {
fprintf(stream, "enabled");
}
fprintf(stream, "</td></tr>\n");
fprintf(stream, "</table>\n <p>\n");
break;
case mon_output_xml:
fprintf(stream, " <cluster_options");
fprintf(stream, " stonith-enabled=\"%s\"",
is_set(data_set->flags, pe_flag_stonith_enabled)?
"true" : "false");
fprintf(stream, " symmetric-cluster=\"%s\"",
is_set(data_set->flags, pe_flag_symmetric_cluster)?
"true" : "false");
fprintf(stream, " no-quorum-policy=\"");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "freeze");
break;
case no_quorum_stop:
fprintf(stream, "stop");
break;
case no_quorum_ignore:
fprintf(stream, "ignore");
break;
case no_quorum_suicide:
fprintf(stream, "suicide");
break;
}
fprintf(stream, "\"");
fprintf(stream, " maintenance-mode=\"%s\"",
is_set(data_set->flags, pe_flag_maintenance_mode)?
"true" : "false");
fprintf(stream, " />\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Get the name of the stack in use (or "unknown" if not available)
*
* \param[in] data_set Working set of CIB state
*
* \return String representing stack name
*/
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
/*!
* \internal
* \brief Print a summary of cluster-wide information
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_summary(FILE *stream, pe_working_set_t *data_set)
{
const char *stack_s = get_cluster_stack(data_set);
gboolean header_printed = FALSE;
if (show & mon_show_stack) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_stack(stream, stack_s);
}
/* Always print DC if none, even if not requested */
if ((data_set->dc_node == NULL) || (show & mon_show_dc)) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_dc(stream, data_set);
}
if (show & mon_show_times) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_times(stream, data_set);
}
if (is_set(data_set->flags, pe_flag_maintenance_mode)
|| data_set->disabled_resources
|| data_set->blocked_resources
|| is_set(show, mon_show_count)) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_counts(stream, data_set, stack_s);
}
/* There is not a separate option for showing cluster options, so show with
* stack for now; a separate option could be added if there is demand
*/
if (show & mon_show_stack) {
print_cluster_options(stream, data_set);
}
if (header_printed) {
print_cluster_summary_footer(stream);
}
}
/*!
* \internal
* \brief Print a failed action
*
* \param[in] stream File stream to display output to
* \param[in] xml_op Root of XML tree describing failed action
*/
static void
print_failed_action(FILE *stream, xmlNode *xml_op)
{
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *op_key_attr = "op_key";
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
char *exit_reason_cleaned;
/* If no op_key was given, use id instead */
if (op_key == NULL) {
op_key = ID(xml_op);
op_key_attr = "id";
}
/* If no exit reason was given, use "none" */
if (exit_reason == NULL) {
exit_reason = "none";
}
/* Print common action information */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("* %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key, node, services_ocf_exitcode_str(rc), rc,
call, services_lrm_status_str(status), exit_reason);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <li>%s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key, node, services_ocf_exitcode_str(rc), rc,
call, services_lrm_status_str(status), exit_reason);
break;
case mon_output_xml:
exit_reason_cleaned = crm_xml_escape(exit_reason);
fprintf(stream, " <failure %s=\"%s\" node=\"%s\"",
op_key_attr, op_key, node);
fprintf(stream, " exitstatus=\"%s\" exitreason=\"%s\" exitcode=\"%d\"",
services_ocf_exitcode_str(rc), exit_reason_cleaned, rc);
fprintf(stream, " call=\"%s\" status=\"%s\"",
call, services_lrm_status_str(status));
free(exit_reason_cleaned);
break;
default:
break;
}
/* If last change was given, print timing information as well */
if (last) {
time_t run_at = crm_parse_int(last, "0");
char *run_at_s = ctime(&run_at);
if (run_at_s) {
run_at_s[24] = 0; /* Overwrite the newline */
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(",\n last-rc-change='%s', queued=%sms, exec=%sms",
run_at_s? run_at_s : "",
crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " last-rc-change='%s', queued=%sms, exec=%sms",
run_at_s? run_at_s : "",
crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
break;
case mon_output_xml:
fprintf(stream,
" last-rc-change=\"%s\" queued=\"%s\" exec=\"%s\" interval=\"%u\" task=\"%s\"",
run_at_s? run_at_s : "",
crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
crm_parse_ms(crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS)),
crm_element_value(xml_op, XML_LRM_ATTR_TASK));
break;
default:
break;
}
}
/* End the action listing */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "</li>\n");
break;
case mon_output_xml:
fprintf(stream, " />\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print a section for failed actions
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_failed_actions(FILE *stream, pe_working_set_t *data_set)
{
xmlNode *xml_op = NULL;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nFailed Resource Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream,
" <hr />\n <h2>Failed Resource Actions</h2>\n <ul>\n");
break;
case mon_output_xml:
fprintf(stream, " <failures>\n");
break;
default:
break;
}
/* Print each failed action */
for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
print_failed_action(stream, xml_op);
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n");
break;
case mon_output_xml:
fprintf(stream, " </failures>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Reduce the stonith-history
* for successful actions we keep the last of every action-type & target
* for failed actions we record as well who had failed
* for actions in progress we keep full track
*
* \param[in] history List of stonith actions
*
*/
static stonith_history_t *
reduce_stonith_history(stonith_history_t *history)
{
stonith_history_t *new = NULL, *hp, *np, *tmp;
for (hp = history; hp; ) {
for (np = new; np; np = np->next) {
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* action not in progress */
if (safe_str_eq(hp->target, np->target) &&
safe_str_eq(hp->action, np->action) &&
(hp->state == np->state)) {
if ((hp->state == st_done) ||
safe_str_eq(hp->delegate, np->delegate)) {
/* replace or purge */
if (hp->completed < np->completed) {
/* purge older hp */
tmp = hp->next;
hp->next = NULL;
stonith_history_free(hp);
hp = tmp;
break;
}
/* damn single linked list */
free(hp->target);
free(hp->action);
free(np->origin);
np->origin = hp->origin;
free(np->delegate);
np->delegate = hp->delegate;
free(np->client);
np->client = hp->client;
np->completed = hp->completed;
tmp = hp;
hp = hp->next;
free(tmp);
break;
}
}
if (np->next) {
continue;
}
}
np = 0; /* let outer loop progress hp */
break;
}
/* simply move hp from history to new */
if (np == NULL) {
tmp = hp->next;
hp->next = new;
new = hp;
hp = tmp;
}
}
return new;
}
/*!
* \internal
* \brief Sort the stonith-history
* sort by competed most current on the top
* pending actions lacking a completed-stamp are gathered at the top
*
* \param[in] history List of stonith actions
*
*/
static stonith_history_t *
sort_stonith_history(stonith_history_t *history)
{
stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp;
for (hp = history; hp; ) {
tmp = hp->next;
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* sort into new */
if ((!new) || (hp->completed > new->completed)) {
hp->next = new;
new = hp;
} else {
np = new;
do {
if ((!np->next) || (hp->completed > np->next->completed)) {
hp->next = np->next;
np->next = hp;
break;
}
np = np->next;
} while (1);
}
} else {
/* put into pending */
hp->next = pending;
pending = hp;
}
hp = tmp;
}
/* pending actions don't have a completed-stamp so make them go front */
if (pending) {
stonith_history_t *last_pending = pending;
while (last_pending->next) {
last_pending = last_pending->next;
}
last_pending->next = new;
new = pending;
}
return new;
}
/*!
* \internal
* \brief Print a stonith action
*
* \param[in] stream File stream to display output to
* \param[in] event stonith event
*/
static void
print_stonith_action(FILE *stream, stonith_history_t *event)
{
const char *action_s = stonith_action_str(event->action);
char *run_at_s = ctime(&event->completed);
if ((run_at_s) && (run_at_s[0] != 0)) {
run_at_s[strlen(run_at_s)-1] = 0; /* Overwrite the newline */
}
switch(output_format) {
case mon_output_xml:
fprintf(stream, " <fence_event target=\"%s\" action=\"%s\"",
event->target, event->action);
switch(event->state) {
case st_done:
fprintf(stream, " state=\"success\"");
break;
case st_failed:
fprintf(stream, " state=\"failed\"");
break;
default:
fprintf(stream, " state=\"pending\"");
}
fprintf(stream, " origin=\"%s\" client=\"%s\"",
event->origin, event->client);
if (event->delegate) {
fprintf(stream, " delegate=\"%s\"", event->delegate);
}
switch(event->state) {
case st_done:
case st_failed:
fprintf(stream, " completed=\"%s\"", run_at_s?run_at_s:"");
break;
default:
break;
}
fprintf(stream, " />\n");
break;
case mon_output_plain:
case mon_output_console:
switch(event->state) {
case st_done:
print_as("* %s of %s successful: delegate=%s, client=%s, origin=%s,\n"
" %s='%s'\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-successful",
run_at_s?run_at_s:"");
break;
case st_failed:
print_as("* %s of %s failed: delegate=%s, client=%s, origin=%s,\n"
" %s='%s'\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-failed",
run_at_s?run_at_s:"");
break;
default:
print_as("* %s of %s pending: client=%s, origin=%s\n",
action_s, event->target,
event->client, event->origin);
}
break;
case mon_output_html:
case mon_output_cgi:
switch(event->state) {
case st_done:
fprintf(stream, " <li>%s of %s successful: delegate=%s, "
"client=%s, origin=%s, %s='%s'</li>\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-successful",
run_at_s?run_at_s:"");
break;
case st_failed:
fprintf(stream, " <li>%s of %s failed: delegate=%s, "
"client=%s, origin=%s, %s='%s'</li>\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-failed",
run_at_s?run_at_s:"");
break;
default:
fprintf(stream, " <li>%s of %s pending: client=%s, "
"origin=%s</li>\n",
action_s, event->target,
event->client, event->origin);
}
break;
default:
/* no support for fence history for other formats so far */
break;
}
}
/*!
* \internal
* \brief Print a section for failed stonith actions
*
* \param[in] stream File stream to display output to
* \param[in] history List of stonith actions
*
*/
static void
print_failed_stonith_actions(FILE *stream, stonith_history_t *history)
{
stonith_history_t *hp;
for (hp = history; hp; hp = hp->next) {
if (hp->state == st_failed) {
break;
}
}
if (!hp) {
return;
}
/* Print section heading */
switch (output_format) {
/* no need to take care of xml in here as xml gets full
* history anyway
*/
case mon_output_plain:
case mon_output_console:
print_as("\nFailed Fencing Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>Failed Fencing Actions</h2>\n <ul>\n");
break;
default:
break;
}
/* Print each failed stonith action */
for (hp = history; hp; hp = hp->next) {
if (hp->state == st_failed) {
print_stonith_action(stream, hp);
}
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print pending stonith actions
*
* \param[in] stream File stream to display output to
* \param[in] history List of stonith actions
*
*/
static void
print_stonith_pending(FILE *stream, stonith_history_t *history)
{
/* xml-output always shows the full history
* so we'll never have to show pending-actions
* separately
*/
if (history && (history->state != st_failed) &&
(history->state != st_done)) {
stonith_history_t *hp;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nPending Fencing Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>Pending Fencing Actions</h2>\n <ul>\n");
break;
default:
break;
}
for (hp = history; hp; hp = hp->next) {
if ((hp->state == st_failed) || (hp->state == st_done)) {
break;
}
print_stonith_action(stream, hp);
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n");
break;
default:
break;
}
}
}
/*!
* \internal
* \brief Print a section for stonith-history
*
* \param[in] stream File stream to display output to
* \param[in] history List of stonith actions
*
*/
static void
print_stonith_history(FILE *stream, stonith_history_t *history)
{
stonith_history_t *hp;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nFencing History:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " <hr />\n <h2>Fencing History</h2>\n <ul>\n");
break;
case mon_output_xml:
fprintf(stream, " <fence_history>\n");
break;
default:
break;
}
for (hp = history; hp; hp = hp->next) {
if ((hp->state != st_failed) || (output_format == mon_output_xml)) {
print_stonith_action(stream, hp);
}
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " </ul>\n");
break;
case mon_output_xml:
fprintf(stream, " </fence_history>\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster status to screen
*
* This uses the global display preferences set by command-line options
* to display cluster status in a human-friendly way.
*
* \param[in] data_set Working set of CIB state
* \param[in] stonith_history List of stonith actions
*/
static void
print_status(pe_working_set_t * data_set,
stonith_history_t *stonith_history)
{
GListPtr gIter = NULL;
int print_opts = get_resource_display_options();
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
if (output_format == mon_output_console) {
blank_screen();
}
print_cluster_summary(stdout, data_set);
print_as("\n");
/* Gather node information (and print if in bad state or grouping by node) */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = get_node_display_name(node);
/* Get node mode */
if (node->details->unclean) {
if (node->details->online) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
if (node->details->running_rsc) {
node_mode = "standby (with active resources)";
} else {
node_mode = "standby";
}
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
if (pe__is_guest_node(node)) {
online_guest_nodes = add_list_element(online_guest_nodes, node_name);
} else if (pe__is_remote_node(node)) {
online_remote_nodes = add_list_element(online_remote_nodes, node_name);
} else {
online_nodes = add_list_element(online_nodes, node_name);
}
free(node_name);
continue;
}
} else {
node_mode = "OFFLINE";
if (group_by_node == FALSE) {
if (pe__is_remote_node(node)) {
offline_remote_nodes = add_list_element(offline_remote_nodes, node_name);
} else if (pe__is_guest_node(node)) {
/* ignore offline guest nodes */
} else {
offline_nodes = add_list_element(offline_nodes, node_name);
}
free(node_name);
continue;
}
}
/* If we get here, node is in bad state, or we're grouping by node */
/* Print the node name and status */
if (pe__is_guest_node(node)) {
print_as("Guest");
} else if (pe__is_remote_node(node)) {
print_as("Remote");
}
print_as("Node %s: %s\n", node_name, node_mode);
/* If we're grouping by node, print its resources */
if (group_by_node) {
if (print_brief) {
print_rscs_brief(node->details->running_rsc, "\t", print_opts | pe_print_rsconly,
stdout, FALSE);
} else {
GListPtr gIter2 = NULL;
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc->fns->print(rsc, "\t", print_opts | pe_print_rsconly, stdout);
}
}
}
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
print_as("Online: [%s ]\n", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
print_as("OFFLINE: [%s ]\n", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
print_as("RemoteOnline: [%s ]\n", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
print_as("RemoteOFFLINE: [%s ]\n", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
print_as("GuestOnline: [%s ]\n", online_guest_nodes);
free(online_guest_nodes);
}
/* Print resources section, if needed */
print_resources(stdout, data_set, print_opts);
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stdout, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stdout, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stdout, data_set);
}
/* Print failed stonith actions */
if (fence_history) {
print_failed_stonith_actions(stdout, stonith_history);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stdout, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stdout, data_set);
}
/* Print stonith history */
if (fence_history) {
if (show & mon_show_fence_history) {
print_stonith_history(stdout, stonith_history);
} else {
print_stonith_pending(stdout, stonith_history);
}
}
#if CURSES_ENABLED
if (output_format == mon_output_console) {
refresh();
}
#endif
}
/*!
* \internal
* \brief Print cluster status in XML format
*
* \param[in] data_set Working set of CIB state
*/
static void
print_xml_status(pe_working_set_t * data_set,
stonith_history_t *stonith_history)
{
FILE *stream = stdout;
GListPtr gIter = NULL;
int print_opts = get_resource_display_options();
fprintf(stream, "<?xml version=\"1.0\"?>\n");
fprintf(stream, "<crm_mon version=\"%s\">\n", VERSION);
print_cluster_summary(stream, data_set);
/*** NODES ***/
fprintf(stream, " <nodes>\n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_type = "unknown";
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
fprintf(stream, " <node name=\"%s\" ", node->details->uname);
fprintf(stream, "id=\"%s\" ", node->details->id);
fprintf(stream, "online=\"%s\" ", node->details->online ? "true" : "false");
fprintf(stream, "standby=\"%s\" ", node->details->standby ? "true" : "false");
fprintf(stream, "standby_onfail=\"%s\" ", node->details->standby_onfail ? "true" : "false");
fprintf(stream, "maintenance=\"%s\" ", node->details->maintenance ? "true" : "false");
fprintf(stream, "pending=\"%s\" ", node->details->pending ? "true" : "false");
fprintf(stream, "unclean=\"%s\" ", node->details->unclean ? "true" : "false");
fprintf(stream, "shutdown=\"%s\" ", node->details->shutdown ? "true" : "false");
fprintf(stream, "expected_up=\"%s\" ", node->details->expected_up ? "true" : "false");
fprintf(stream, "is_dc=\"%s\" ", node->details->is_dc ? "true" : "false");
fprintf(stream, "resources_running=\"%d\" ", g_list_length(node->details->running_rsc));
fprintf(stream, "type=\"%s\" ", node_type);
if (pe__is_guest_node(node)) {
fprintf(stream, "id_as_resource=\"%s\" ", node->details->remote_rsc->container->id);
}
if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, ">\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
rsc->fns->print(rsc, " ", print_opts | pe_print_rsconly, stream);
}
fprintf(stream, " </node>\n");
} else {
fprintf(stream, "/>\n");
}
}
fprintf(stream, " </nodes>\n");
/* Print resources section, if needed */
print_resources(stream, data_set, print_opts);
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stream, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stream, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stream, data_set);
}
/* Print stonith history */
if (fence_history) {
print_stonith_history(stdout, stonith_history);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stream, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stream, data_set);
}
fprintf(stream, "</crm_mon>\n");
fflush(stream);
fclose(stream);
}
/*!
* \internal
* \brief Print cluster status in HTML format (with HTTP headers if CGI)
*
* \param[in] data_set Working set of CIB state
* \param[in] filename Name of file to write HTML to (ignored if CGI)
*
* \return 0 on success, -1 on error
*/
static int
print_html_status(pe_working_set_t * data_set,
const char *filename,
stonith_history_t *stonith_history)
{
FILE *stream;
GListPtr gIter = NULL;
char *filename_tmp = NULL;
int print_opts = get_resource_display_options();
if (output_format == mon_output_cgi) {
stream = stdout;
fprintf(stream, "Content-Type: text/html\n\n");
} else {
filename_tmp = crm_concat(filename, "tmp", '.');
stream = fopen(filename_tmp, "w");
if (stream == NULL) {
crm_perror(LOG_ERR, "Cannot open %s for writing", filename_tmp);
free(filename_tmp);
return -1;
}
}
fprintf(stream, "<html>\n");
fprintf(stream, " <head>\n");
fprintf(stream, " <title>Cluster status</title>\n");
fprintf(stream, " <meta http-equiv=\"refresh\" content=\"%d\">\n", reconnect_msec / 1000);
fprintf(stream, " </head>\n");
fprintf(stream, "<body>\n");
print_cluster_summary(stream, data_set);
/*** NODE LIST ***/
fprintf(stream, " <hr />\n <h2>Node List</h2>\n");
fprintf(stream, "<ul>\n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
char *node_name = get_node_display_name(node);
fprintf(stream, "<li>Node: %s: ", node_name);
if (node->details->standby_onfail && node->details->online) {
fprintf(stream, "<font color=\"orange\">standby (on-fail)</font>\n");
} else if (node->details->standby && node->details->online) {
fprintf(stream, "<font color=\"orange\">standby%s</font>\n",
node->details->running_rsc?" (with active resources)":"");
} else if (node->details->standby) {
fprintf(stream, "<font color=\"red\">OFFLINE (standby)</font>\n");
} else if (node->details->maintenance && node->details->online) {
fprintf(stream, "<font color=\"blue\">maintenance</font>\n");
} else if (node->details->maintenance) {
fprintf(stream, "<font color=\"red\">OFFLINE (maintenance)</font>\n");
} else if (node->details->online) {
fprintf(stream, "<font color=\"green\">online</font>\n");
} else {
fprintf(stream, "<font color=\"red\">OFFLINE</font>\n");
}
if (print_brief && group_by_node) {
fprintf(stream, "<ul>\n");
print_rscs_brief(node->details->running_rsc, NULL, print_opts | pe_print_rsconly,
stream, FALSE);
fprintf(stream, "</ul>\n");
} else if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, "<ul>\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
fprintf(stream, "<li>");
rsc->fns->print(rsc, NULL, print_opts | pe_print_rsconly, stream);
fprintf(stream, "</li>\n");
}
fprintf(stream, "</ul>\n");
}
fprintf(stream, "</li>\n");
free(node_name);
}
fprintf(stream, "</ul>\n");
/* Print resources section, if needed */
print_resources(stream, data_set, print_opts);
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stream, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stream, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stream, data_set);
}
/* Print failed stonith actions */
if (fence_history) {
print_failed_stonith_actions(stream, stonith_history);
}
/* Print stonith history */
if (fence_history) {
if (show & mon_show_fence_history) {
print_stonith_history(stream, stonith_history);
} else {
print_stonith_pending(stdout, stonith_history);
}
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stream, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stream, data_set);
}
fprintf(stream, "</body>\n");
fprintf(stream, "</html>\n");
fflush(stream);
fclose(stream);
if (output_format != mon_output_cgi) {
if (rename(filename_tmp, filename) != 0) {
crm_perror(LOG_ERR, "Unable to rename %s->%s", filename_tmp, filename);
}
free(filename_tmp);
}
return 0;
}
static int
send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
pid_t pid;
/*setenv needs chars, these are ints */
char *rc_s = crm_itoa(rc);
char *status_s = crm_itoa(status);
char *target_rc_s = crm_itoa(target_rc);
crm_debug("Sending external notification to '%s' via '%s'", external_recipient, external_agent);
if(rsc) {
setenv("CRM_notify_rsc", rsc, 1);
}
if (external_recipient) {
setenv("CRM_notify_recipient", external_recipient, 1);
}
setenv("CRM_notify_node", node, 1);
setenv("CRM_notify_task", task, 1);
setenv("CRM_notify_desc", desc, 1);
setenv("CRM_notify_rc", rc_s, 1);
setenv("CRM_notify_target_rc", target_rc_s, 1);
setenv("CRM_notify_status", status_s, 1);
pid = fork();
if (pid == -1) {
crm_perror(LOG_ERR, "notification fork() failed.");
}
if (pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
execl(external_agent, external_agent, NULL);
exit(CRM_EX_ERROR);
}
crm_trace("Finished running custom notification program '%s'.", external_agent);
free(target_rc_s);
free(status_s);
free(rc_s);
return 0;
}
static void
handle_rsc_op(xmlNode * xml, const char *node_id)
{
int rc = -1;
int status = -1;
int target_rc = -1;
gboolean notify = TRUE;
char *rsc = NULL;
char *task = NULL;
const char *desc = NULL;
const char *magic = NULL;
const char *id = NULL;
const char *node = NULL;
xmlNode *n = xml;
xmlNode * rsc_op = xml;
if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) {
xmlNode *cIter;
for(cIter = xml->children; cIter; cIter = cIter->next) {
handle_rsc_op(cIter, node_id);
}
return;
}
id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY);
if (id == NULL) {
/* Compatibility with <= 1.1.5 */
id = ID(rsc_op);
}
magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC);
if (magic == NULL) {
/* non-change */
return;
}
if (!decode_transition_magic(magic, NULL, NULL, NULL, &status, &rc,
&target_rc)) {
crm_err("Invalid event %s detected for %s", magic, id);
return;
}
if (parse_op_key(id, &rsc, &task, NULL) == FALSE) {
crm_err("Invalid event detected for %s", id);
goto bail;
}
node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET);
while (n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) {
n = n->parent;
}
if(node == NULL && n) {
node = crm_element_value(n, XML_ATTR_UNAME);
}
if (node == NULL && n) {
node = ID(n);
}
if (node == NULL) {
node = node_id;
}
if (node == NULL) {
crm_err("No node detected for event %s (%s)", magic, id);
goto bail;
}
/* look up where we expected it to be? */
desc = pcmk_strerror(pcmk_ok);
if (status == PCMK_LRM_OP_DONE && target_rc == rc) {
crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
if (rc == PCMK_OCF_NOT_RUNNING) {
notify = FALSE;
}
} else if (status == PCMK_LRM_OP_DONE) {
desc = services_ocf_exitcode_str(rc);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
} else {
desc = services_lrm_status_str(status);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
}
if (notify && external_agent) {
send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
}
bail:
free(rsc);
free(task);
}
static gboolean
mon_trigger_refresh(gpointer user_data)
{
mainloop_set_trigger(refresh_trigger);
return FALSE;
}
#define NODE_PATT "/lrm[@id="
static char *
get_node_from_xpath(const char *xpath)
{
char *nodeid = NULL;
char *tmp = strstr(xpath, NODE_PATT);
if(tmp) {
tmp += strlen(NODE_PATT);
tmp += 1;
nodeid = strdup(tmp);
tmp = strstr(nodeid, "\'");
CRM_ASSERT(tmp);
tmp[0] = 0;
}
return nodeid;
}
static void
crm_diff_update_v2(const char *event, xmlNode * msg)
{
xmlNode *change = NULL;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) {
const char *name = NULL;
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
xmlNode *match = NULL;
const char *node = NULL;
if(op == NULL) {
continue;
} else if(strcmp(op, "create") == 0) {
match = change->children;
} else if(strcmp(op, "move") == 0) {
continue;
} else if(strcmp(op, "delete") == 0) {
continue;
} else if(strcmp(op, "modify") == 0) {
match = first_named_child(change, XML_DIFF_RESULT);
if(match) {
match = match->children;
}
}
if(match) {
name = (const char *)match->name;
}
crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name);
if(xpath == NULL) {
/* Version field, ignore */
} else if(name == NULL) {
crm_debug("No result for %s operation to %s", op, xpath);
CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0);
} else if(strcmp(name, XML_TAG_CIB) == 0) {
xmlNode *state = NULL;
xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS);
for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) {
xmlNode *state = NULL;
for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATE) == 0) {
node = crm_element_value(match, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(match);
}
handle_rsc_op(match, node);
} else if(strcmp(name, XML_CIB_TAG_LRM) == 0) {
node = ID(match);
handle_rsc_op(match, node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else {
crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name);
}
}
}
static void
crm_diff_update_v1(const char *event, xmlNode * msg)
{
/* Process operation updates */
xmlXPathObject *xpathObj = xpath_search(msg,
"//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
"//" XML_LRM_TAG_RSC_OP);
int lpc = 0, max = numXpathResults(xpathObj);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *rsc_op = getXpathResult(xpathObj, lpc);
handle_rsc_op(rsc_op, NULL);
}
freeXpathObject(xpathObj);
}
static void
crm_diff_update(const char *event, xmlNode * msg)
{
int rc = -1;
static bool stale = FALSE;
gboolean cib_updated = FALSE;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
print_dot();
if (current_cib != NULL) {
rc = xml_apply_patchset(current_cib, diff, TRUE);
switch (rc) {
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
break;
case pcmk_ok:
cib_updated = TRUE;
break;
default:
crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
}
}
if (current_cib == NULL) {
crm_trace("Re-requesting the full cib");
cib->cmds->query(cib, NULL, &current_cib, cib_scope_local | cib_sync_call);
}
if (external_agent) {
int format = 0;
crm_element_value_int(diff, "format", &format);
switch(format) {
case 1:
crm_diff_update_v1(event, msg);
break;
case 2:
crm_diff_update_v2(event, msg);
break;
default:
crm_err("Unknown patch format: %d", format);
}
}
if (current_cib == NULL) {
if(!stale) {
print_as("--- Stale data ---");
}
stale = TRUE;
return;
}
stale = FALSE;
kick_refresh(cib_updated);
}
static gboolean
mon_refresh_display(gpointer user_data)
{
xmlNode *cib_copy = copy_xml(current_cib);
stonith_history_t *stonith_history = NULL;
last_refresh = time(NULL);
if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) {
if (cib) {
cib->cmds->signoff(cib);
}
print_as("Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation));
if (output_format == mon_output_console) {
sleep(2);
}
clean_up(CRM_EX_CONFIG);
return FALSE;
}
/* get the stonith-history if there is evidence we need it
*/
while (fence_history) {
if (st != NULL) {
if (st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120)) {
fprintf(stderr, "Critical: Unable to get stonith-history\n");
mon_cib_connection_destroy(NULL);
} else {
if ((!fence_full_history) && (output_format != mon_output_xml)) {
stonith_history = reduce_stonith_history(stonith_history);
}
stonith_history = sort_stonith_history(stonith_history);
break; /* all other cases are errors */
}
} else {
fprintf(stderr, "Critical: No stonith-API\n");
}
free_xml(cib_copy);
print_as("Reading stonith-history failed");
if (output_format == mon_output_console) {
sleep(2);
}
return FALSE;
}
if (mon_data_set == NULL) {
mon_data_set = pe_new_working_set();
CRM_ASSERT(mon_data_set != NULL);
}
mon_data_set->input = cib_copy;
cluster_status(mon_data_set);
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
if (show & (mon_show_bans | mon_show_tickets)) {
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
mon_data_set->input);
unpack_constraints(cib_constraints, mon_data_set);
}
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
if (print_html_status(mon_data_set, output_filename, stonith_history) != 0) {
fprintf(stderr, "Critical: Unable to output html file\n");
clean_up(CRM_EX_CANTCREAT);
return FALSE;
}
break;
case mon_output_xml:
print_xml_status(mon_data_set, stonith_history);
break;
case mon_output_monitor:
print_simple_status(mon_data_set, stonith_history);
if (has_warnings) {
clean_up(MON_STATUS_WARN);
return FALSE;
}
break;
case mon_output_plain:
case mon_output_console:
print_status(mon_data_set, stonith_history);
break;
case mon_output_none:
break;
}
stonith_history_free(stonith_history);
stonith_history = NULL;
pe_reset_working_set(mon_data_set);
return TRUE;
}
static void
mon_st_callback_event(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy(NULL);
} else if (external_agent) {
char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)",
e->operation, e->origin, e->target, pcmk_strerror(e->result),
e->id);
send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
free(desc);
}
}
static void
kick_refresh(gboolean data_updated)
{
static int updates = 0;
long now = time(NULL);
if (data_updated) {
updates++;
}
if(refresh_timer == NULL) {
refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL);
}
/* Refresh
* - immediately if the last update was more than 5s ago
* - every 10 cib-updates
* - at most 2s after the last update
*/
if ((now - last_refresh) > (reconnect_msec / 1000)) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else if(updates >= 10) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else {
mainloop_timer_start(refresh_timer);
}
}
static void
mon_st_callback_display(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy(NULL);
} else {
print_dot();
kick_refresh(TRUE);
}
}
static void
clean_up_connections(void)
{
if (cib != NULL) {
cib->cmds->signoff(cib);
cib_delete(cib);
cib = NULL;
}
if (st != NULL) {
if (st->state != stonith_disconnected) {
st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY);
st->cmds->disconnect(st);
}
stonith_api_delete(st);
st = NULL;
}
}
/*
* De-init ncurses, disconnect from the CIB manager, disconnect fencing,
* deallocate memory and show usage-message if requested.
*
* We don't actually return, but nominally returning crm_exit_t allows a usage
* like "return clean_up(exit_code);" which helps static analysis understand the
* code flow.
*/
static crm_exit_t
clean_up(crm_exit_t exit_code)
{
#if CURSES_ENABLED
if (curses_console_initialized) {
output_format = mon_output_plain;
echo();
nocbreak();
endwin();
curses_console_initialized = FALSE;
}
#endif
clean_up_connections();
free(output_filename);
free(pid_file);
pe_free_working_set(mon_data_set);
mon_data_set = NULL;
if (exit_code == CRM_EX_USAGE) {
if (output_format == mon_output_cgi) {
fprintf(stdout, "Content-Type: text/plain\n"
"Status: 500\n\n");
} else {
crm_help('?', CRM_EX_USAGE);
}
}
crm_exit(exit_code);
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 4:50 PM (14 h, 1 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018991
Default Alt Text
(312 KB)

Event Timeline