Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/GNUmakefile b/GNUmakefile
index 8a1ea96364..dbea3a5268 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,127 +1,129 @@
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
-include Makefile
PACKAGE ?= pacemaker
# Force 'make dist' to be consistent with 'make export'
#distdir = $(PACKAGE)-$(VERSION)
distdir = $(PACKAGE)
TARFILE = $(distdir).tar.bz2
DIST_ARCHIVES = $(TARFILE)
LAST_RELEASE = Pacemaker-1.0.7
STABLE_SERIES = stable-1.0
RPM_ROOT = $(shell pwd)
RPM_OPTS = --define "_sourcedir $(RPM_ROOT)" \
--define "_specdir $(RPM_ROOT)" \
--define "_srcrpmdir $(RPM_ROOT)" \
# Default to fedora compliant spec files
# SLES: /etc/SuSE-release
# openSUSE: /etc/SuSE-release
# RHEL: /etc/redhat-release
# Fedora: /etc/fedora-release, /etc/redhat-release, /etc/system-release
getdistro = $(shell test -e /etc/SuSE-release || echo fedora; test -e /etc/SuSE-release && echo suse)
DISTRO ?= $(call getdistro)
export:
rm -f $(TARFILE)
hg archive -t tbz2 $(TARFILE)
echo `date`: Rebuilt $(TARFILE)
pacemaker-fedora.spec: pacemaker.spec
cp $(PACKAGE).spec $(PACKAGE)-$(DISTRO).spec
@echo Rebuilt $@
pacemaker-suse.spec: pacemaker.spec
cp $(PACKAGE).spec $@
sed -i.sed s:%{_docdir}/%{name}:%{_docdir}/%{name}-%{version}:g $@
sed -i.sed s:corosynclib:libcorosync:g $@
sed -i.sed s:pacemaker-libs:libpacemaker3:g $@
sed -i.sed s:heartbeat-libs:heartbeat:g $@
sed -i.sed s:cluster-glue-libs:libglue:g $@
sed -i.sed s:libselinux-devel::g $@
sed -i.sed s:lm_sensors-devel::g $@
sed -i.sed s:Development/Libraries:Development/Libraries/C\ and\ C++:g $@
sed -i.sed s:System\ Environment/Daemons:Productivity/Clustering/HA:g $@
sed -i.sed s:lm_sensors-devel::g $@
sed -i.sed s:bzip2-devel:libbz2-devel:g $@
@echo Rebuilt $@
srpm: export $(PACKAGE)-$(DISTRO).spec
rm -f *.src.rpm
rpmbuild -bs --define "dist .$(DISTRO)" $(RPM_OPTS) $(PACKAGE)-$(DISTRO).spec
rpm: srpm
@echo To create custom builds, edit the flags and options in $(PACKAGE)-$(DISTRO).spec first
rpmbuild --rebuild $(RPM_ROOT)/*.src.rpm
mock-nodeps:
-rm -rf $(RPM_ROOT)/mock
mock --root=fedora-12-x86_64 --resultdir=$(RPM_ROOT)/mock --rebuild $(RPM_ROOT)/*.src.rpm
mock: srpm mock-nodeps
scratch:
hg commit -m "DO-NOT-PUSH"
make srpm
hg rollback
make mock-nodeps
deb:
echo To make create custom builds, edit the configure flags in debian/rules first
dpkg-buildpackage -rfakeroot -us -uc
global: clean-generic
gtags -q
global-html: global
htags -sanhIT
global-www: global-html
rsync -avzxlSD --progress HTML/ root@clusterlabs.org:/var/lib/global/pacemaker
changes:
@printf "$(PACKAGE) ($(VERSION)-1) stable; urgency=medium\n"
@printf " * Update source tarball to revision: `hg id`\n"
@printf " * Statistics:\n"
@printf " Changesets: `hg log -M --template "{desc|firstline|strip}\n" -r $(LAST_RELEASE):tip | wc -l`\n"
@printf " Diff: "
@hg diff -r $(LAST_RELEASE):tip | diffstat | tail -n 1
@printf "\n * Testing Notes:\n"
@printf "\n + Test hardware:\n"
@printf "\n + All testing was performed with STONITH enabled\n"
@printf "\n + Pending bugs encountered during testing:\n"
@printf "\n * Changes since $(LAST_RELEASE)\n"
@hg log -M --template " + {desc|firstline|strip}\n" -r $(LAST_RELEASE):tip | grep -v -e Dev: -e Low: | sort -uf
@printf "\n -- Andrew Beekhof <abeekhof@suse.de> `date +"%a, %d %b %Y %T %z"`\n"
features:
@printf "$(PACKAGE) ($(VERSION)-1) unstable; urgency=medium\n"
@printf " * Update source tarball to revision: `hg id`\n"
@printf " * Statistics:\n"
@printf " Changesets: `hg out -M --template "{desc|firstline|strip}\n" ../$(STABLE_SERIES) | wc -l`\n"
@printf " Diff: "
@hg out -M -p ../$(STABLE_SERIES) | diffstat | tail -n 1
@printf "\n * Changes added since $(STABLE_SERIES)\n"
@hg out -M --template " + {desc|firstline|strip}\n" ../$(STABLE_SERIES) | grep -v -e Dev: -e Low: | sort -uf
@printf "\n -- Andrew Beekhof <abeekhof@suse.de> `date +"%a, %d %b %Y %T %z"`\n"
+rel-tags: tags
+ find . -name TAGS -exec sed -i.sed 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \;
diff --git a/Makefile.am b/Makefile.am
index 46a2d466f4..93d6a81f26 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,51 +1,51 @@
#
# Pacemaker code
#
# Copyright (C) 2004 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
EXTRA_DIST = autogen.sh ConfigureMe README.in libltdl.tar
MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \
DRF/stamp-h.in libtool.m4 ltdl.m4 libltdl.tar
-SUBDIRS = debian $(LIBLTDL_DIR) replace include lib pengine cib crmd fencing tools xml cts extra doc
+SUBDIRS = debian $(LIBLTDL_DIR) replace include lib pengine cib crmd fencing tools shell xml cts extra doc
doc_DATA = AUTHORS COPYING COPYING.LIB
AUTOMAKE_OPTIONS = foreign
##ACLOCAL = aclocal -I $(auxdir)
install-exec-local:
$(INSTALL) -d $(DESTDIR)/$(LCRSODIR)
$(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CONFIG_DIR)
$(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_STATE_DIR)
-chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CONFIG_DIR)
-chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_STATE_DIR)
if BUILD_AIS_SUPPORT
rm -f $(DESTDIR)$(LCRSODIR)/pacemaker.lcrso $(DESTDIR)$(LCRSODIR)/service_crm.so
cp $(DESTDIR)$(libdir)/service_crm.so $(DESTDIR)$(LCRSODIR)/pacemaker.lcrso
endif
# Use chown because the user/group may not exist
dist-clean-local:
rm -f autoconf automake autoheader $(TARFILE)
maintainer-clean-local:
rm -f libltdl.tar
.PHONY: rpm pkg handy handy-copy
diff --git a/cib/callbacks.c b/cib/callbacks.c
index 4f388e9398..b12a33a691 100644
--- a/cib/callbacks.c
+++ b/cib/callbacks.c
@@ -1,1302 +1,1299 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/ipc.h>
#include <crm/common/cluster.h>
#include <crm/common/xml.h>
#include <crm/common/msg.h>
#include <cibio.h>
#include <callbacks.h>
#include <cibmessages.h>
#include <notify.h>
#include "common.h"
extern GMainLoop* mainloop;
extern gboolean cib_shutdown_flag;
extern gboolean stand_alone;
extern const char* cib_root;
#if SUPPORT_HEARTBEAT
extern ll_cluster_t *hb_conn;
#endif
extern void cib_ha_connection_destroy(gpointer user_data);
extern enum cib_errors cib_update_counter(
xmlNode *xml_obj, const char *field, gboolean reset);
extern void GHFunc_count_peers(
gpointer key, gpointer value, gpointer user_data);
void initiate_exit(void);
void terminate_cib(const char *caller);
gint cib_GCompareFunc(gconstpointer a, gconstpointer b);
gboolean can_write(int flags);
void send_cib_replace(const xmlNode *sync_request, const char *host);
void cib_process_request(
xmlNode *request, gboolean privileged, gboolean force_synchronous,
gboolean from_peer, cib_client_t *cib_client);
void cib_common_callback_worker(xmlNode *op_request, cib_client_t *cib_client,
gboolean force_synchronous, gboolean privileged);
extern GHashTable *client_list;
int next_client_id = 0;
extern const char *cib_our_uname;
extern unsigned long cib_num_ops, cib_num_local, cib_num_updates, cib_num_fail;
extern unsigned long cib_bad_connects, cib_num_timeouts;
extern longclock_t cib_call_time;
extern enum cib_errors cib_status;
int send_via_callback_channel(xmlNode *msg, const char *token);
enum cib_errors cib_process_command(
xmlNode *request, xmlNode **reply,
xmlNode **cib_diff, gboolean privileged);
gboolean cib_common_callback(IPC_Channel *channel, cib_client_t *cib_client,
gboolean force_synchronous, gboolean privileged);
gboolean cib_process_disconnect(IPC_Channel *channel, cib_client_t *cib_client);
int num_clients = 0;
static void
cib_ipc_connection_destroy(gpointer user_data)
{
cib_client_t *cib_client = user_data;
/* cib_process_disconnect */
if(cib_client == NULL) {
crm_debug_4("Destroying %p", user_data);
return;
}
if(cib_client->source != NULL) {
crm_debug_4("Deleting %s (%p) from mainloop",
cib_client->name, cib_client->source);
G_main_del_IPC_Channel(cib_client->source);
cib_client->source = NULL;
}
crm_debug_3("Destroying %s (%p)", cib_client->name, user_data);
num_clients--;
crm_debug_2("Num unfree'd clients: %d", num_clients);
crm_free(cib_client->name);
crm_free(cib_client->callback_id);
crm_free(cib_client->id);
crm_free(cib_client);
crm_debug_4("Freed the cib client");
return;
}
gboolean
cib_client_connect(IPC_Channel *channel, gpointer user_data)
{
cl_uuid_t client_id;
xmlNode *reg_msg = NULL;
cib_client_t *new_client = NULL;
char uuid_str[UU_UNPARSE_SIZEOF];
const char *channel_name = user_data;
gboolean (*callback)(IPC_Channel *channel, gpointer user_data);
crm_debug_3("Connecting channel");
if (channel == NULL) {
crm_err("Channel was NULL");
cib_bad_connects++;
return FALSE;
} else if (channel->ch_status != IPC_CONNECT) {
crm_err("Channel was disconnected");
cib_bad_connects++;
return FALSE;
} else if(channel_name == NULL) {
crm_err("user_data must contain channel name");
cib_bad_connects++;
return FALSE;
} else if(cib_shutdown_flag) {
crm_info("Ignoring new client [%d] during shutdown",
channel->farside_pid);
return FALSE;
}
callback = cib_ro_callback;
if(safe_str_eq(channel_name, cib_channel_rw)) {
callback = cib_rw_callback;
}
crm_malloc0(new_client, sizeof(cib_client_t));
num_clients++;
new_client->channel = channel;
new_client->channel_name = channel_name;
crm_debug_3("Created channel %p for channel %s",
new_client, new_client->channel_name);
channel->ops->set_recv_qlen(channel, 1024);
channel->ops->set_send_qlen(channel, 1024);
new_client->source = G_main_add_IPC_Channel(
G_PRIORITY_DEFAULT, channel, FALSE, callback,
new_client, cib_ipc_connection_destroy);
crm_debug_3("Channel %s connected for client %s",
new_client->channel_name, new_client->id);
cl_uuid_generate(&client_id);
cl_uuid_unparse(&client_id, uuid_str);
CRM_CHECK(new_client->id == NULL, crm_free(new_client->id));
new_client->id = crm_strdup(uuid_str);
/* make sure we can find ourselves later for sync calls
* redirected to the master instance
*/
g_hash_table_insert(client_list, new_client->id, new_client);
reg_msg = create_xml_node(NULL, "callback");
crm_xml_add(reg_msg, F_CIB_OPERATION, CRM_OP_REGISTER);
crm_xml_add(reg_msg, F_CIB_CLIENTID, new_client->id);
send_ipc_message(channel, reg_msg);
free_xml(reg_msg);
return TRUE;
}
gboolean
cib_rw_callback(IPC_Channel *channel, gpointer user_data)
{
gboolean result = FALSE;
result = cib_common_callback(channel, user_data, FALSE, TRUE);
return result;
}
gboolean
cib_ro_callback(IPC_Channel *channel, gpointer user_data)
{
gboolean result = FALSE;
result = cib_common_callback(channel, user_data, FALSE, FALSE);
return result;
}
void
cib_common_callback_worker(xmlNode *op_request, cib_client_t *cib_client,
gboolean force_synchronous, gboolean privileged)
{
longclock_t call_stop = 0;
longclock_t call_start = 0;
const char *op = crm_element_value(op_request, F_CIB_OPERATION);
if(crm_str_eq(op, CRM_OP_REGISTER, TRUE)) {
return;
} else if(crm_str_eq(op, T_CIB_NOTIFY, TRUE)) {
/* Update the notify filters for this client */
int on_off = 0;
const char *type = crm_element_value(op_request, F_CIB_NOTIFY_TYPE);;
crm_element_value_int(op_request, F_CIB_NOTIFY_ACTIVATE, &on_off);
crm_debug("Setting %s callbacks for %s (%s): %s",
type, cib_client->name, cib_client->id, on_off?"on":"off");
if(safe_str_eq(type, T_CIB_POST_NOTIFY)) {
cib_client->post_notify = on_off;
} else if(safe_str_eq(type, T_CIB_PRE_NOTIFY)) {
cib_client->pre_notify = on_off;
} else if(safe_str_eq(type, T_CIB_UPDATE_CONFIRM)) {
cib_client->confirmations = on_off;
} else if(safe_str_eq(type, T_CIB_DIFF_NOTIFY)) {
cib_client->diffs = on_off;
} else if(safe_str_eq(type, T_CIB_REPLACE_NOTIFY)) {
cib_client->replace = on_off;
}
return;
}
cib_client->num_calls++;
call_start = time_longclock();
cib_process_request(
op_request, force_synchronous, privileged, FALSE, cib_client);
call_stop = time_longclock();
cib_call_time += (call_stop - call_start);
}
gboolean
cib_common_callback(IPC_Channel *channel, cib_client_t *cib_client,
gboolean force_synchronous, gboolean privileged)
{
int lpc = 0;
const char *value = NULL;
xmlNode *op_request = NULL;
gboolean keep_channel = TRUE;
CRM_CHECK(cib_client != NULL, crm_err("Invalid client"); return FALSE);
CRM_CHECK(cib_client->id != NULL, crm_err("Invalid client: %p", cib_client); return FALSE);
/*
* Do enough work to make entering worthwhile
* But don't allow a single client to monopolize the CIB
*/
while(lpc < 5
&& IPC_ISRCONN(channel)
&& channel->ops->is_message_pending(channel)) {
lpc++;
op_request = xmlfromIPC(channel, MAX_IPC_DELAY);
if (op_request == NULL) {
break;
}
if(cib_client->name == NULL) {
value = crm_element_value(op_request, F_CIB_CLIENTNAME);
if(value == NULL) {
cib_client->name = crm_itoa(channel->farside_pid);
} else {
cib_client->name = crm_strdup(value);
}
}
crm_xml_add(op_request, F_CIB_CLIENTID, cib_client->id);
crm_xml_add(op_request, F_CIB_CLIENTNAME, cib_client->name);
/* crm_log_xml(LOG_MSG, "Client[inbound]", op_request); */
if(cib_client->callback_id == NULL) {
value = crm_element_value(op_request, F_CIB_CALLBACK_TOKEN);
if(value != NULL) {
cib_client->callback_id = crm_strdup(value);
} else {
cib_client->callback_id = crm_strdup(cib_client->id);
}
}
cib_common_callback_worker(
op_request, cib_client, force_synchronous, privileged);
free_xml(op_request);
}
if(channel->ch_status != IPC_CONNECT) {
crm_debug_2("Client disconnected");
keep_channel = cib_process_disconnect(channel, cib_client);
}
return keep_channel;
}
static void
do_local_notify(xmlNode *notify_src, const char *client_id,
gboolean sync_reply, gboolean from_peer)
{
/* send callback to originating child */
cib_client_t *client_obj = NULL;
enum cib_errors local_rc = cib_ok;
crm_debug_2("Performing notification");
if(client_id != NULL) {
client_obj = g_hash_table_lookup(client_list, client_id);
} else {
crm_debug_2("No client to sent the response to."
" F_CIB_CLIENTID not set.");
}
crm_debug_3("Sending callback to request originator");
if(client_obj == NULL) {
local_rc = cib_reply_failed;
} else {
const char *client_id = client_obj->callback_id;
crm_debug_2("Sending %ssync response to %s %s",
sync_reply?"":"an a-",
client_obj->name,
from_peer?"(originator of delegated request)":"");
if(sync_reply) {
client_id = client_obj->id;
}
local_rc = send_via_callback_channel(notify_src, client_id);
}
if(local_rc != cib_ok && client_obj != NULL) {
crm_warn("%sSync reply to %s failed: %s",
sync_reply?"":"A-",
client_obj?client_obj->name:"<unknown>", cib_error2string(local_rc));
}
}
static void
parse_local_options(
cib_client_t *cib_client, int call_type, int call_options, const char *host, const char *op,
gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward)
{
if(cib_op_modifies(call_type)
&& !(call_options & cib_inhibit_bcast)) {
/* we need to send an update anyway */
*needs_reply = TRUE;
} else {
*needs_reply = FALSE;
}
if(host == NULL && (call_options & cib_scope_local)) {
crm_debug_2("Processing locally scoped %s op from %s",
op, cib_client->name);
*local_notify = TRUE;
} else if(host == NULL && cib_is_master) {
crm_debug_2("Processing master %s op locally from %s",
op, cib_client->name);
*local_notify = TRUE;
} else if(safe_str_eq(host, cib_our_uname)) {
crm_debug_2("Processing locally addressed %s op from %s",
op, cib_client->name);
*local_notify = TRUE;
} else if(stand_alone) {
*needs_forward = FALSE;
*local_notify = TRUE;
*process = TRUE;
} else {
crm_debug_2("%s op from %s needs to be forwarded to %s",
op, cib_client->name,
host?host:"the master instance");
*needs_forward = TRUE;
*process = FALSE;
}
}
static gboolean
parse_peer_options(
int call_type, xmlNode *request,
gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward)
{
const char *op = crm_element_value(request, F_CIB_OPERATION);
const char *originator = crm_element_value(request, F_ORIG);
const char *host = crm_element_value(request, F_CIB_HOST);
const char *reply_to = crm_element_value(request, F_CIB_ISREPLY);
const char *update = crm_element_value(request, F_CIB_GLOBAL_UPDATE);
const char *delegated = crm_element_value(request, F_CIB_DELEGATED);
if(safe_str_eq(op, "cib_shutdown_req")) {
if(reply_to != NULL) {
crm_debug("Processing %s from %s", op, host);
*needs_reply = FALSE;
} else {
crm_debug("Processing %s reply from %s", op, host);
}
return TRUE;
} else if(crm_is_true(update) && safe_str_eq(reply_to, cib_our_uname)) {
crm_debug_2("Processing global/peer update from %s"
" that originated from us", originator);
*needs_reply = FALSE;
if(crm_element_value(request, F_CIB_CLIENTID) != NULL) {
*local_notify = TRUE;
}
return TRUE;
} else if(crm_is_true(update)) {
crm_debug_2("Processing global/peer update from %s", originator);
*needs_reply = FALSE;
return TRUE;
} else if(host != NULL && safe_str_eq(host, cib_our_uname)) {
crm_debug_2("Processing request sent to us from %s", originator);
return TRUE;
} else if(delegated != NULL && cib_is_master == TRUE) {
crm_debug_2("Processing request sent to master instance from %s",
originator);
return TRUE;
} else if(reply_to != NULL && safe_str_eq(reply_to, cib_our_uname)) {
crm_debug_2("Forward reply sent from %s to local clients",
originator);
*process = FALSE;
*needs_reply = FALSE;
*local_notify = TRUE;
return TRUE;
} else if(delegated != NULL) {
crm_debug_2("Ignoring msg for master instance");
} else if(host != NULL) {
/* this is for a specific instance and we're not it */
crm_debug_2("Ignoring msg for instance on %s", crm_str(host));
} else if(reply_to == NULL && cib_is_master == FALSE) {
/* this is for the master instance and we're not it */
crm_debug_2("Ignoring reply to %s", crm_str(reply_to));
} else {
crm_err("Nothing for us to do?");
crm_log_xml(LOG_ERR, "Peer[inbound]", request);
}
return FALSE;
}
static void
forward_request(xmlNode *request, cib_client_t *cib_client, int call_options)
{
xmlNode *forward_msg = NULL;
const char *op = crm_element_value(request, F_CIB_OPERATION);
const char *host = crm_element_value(request, F_CIB_HOST);
forward_msg = cib_msg_copy(request, TRUE);
crm_xml_add(forward_msg, F_CIB_DELEGATED, cib_our_uname);
if(host != NULL) {
crm_debug_2("Forwarding %s op to %s", op, host);
send_cluster_message(host, crm_msg_cib, forward_msg, FALSE);
} else {
crm_debug_2("Forwarding %s op to master instance", op);
send_cluster_message(NULL, crm_msg_cib, forward_msg, FALSE);
}
if(call_options & cib_discard_reply) {
crm_debug_2("Client not interested in reply");
}
free_xml(forward_msg);
}
static void
send_peer_reply(
xmlNode *msg, xmlNode *result_diff, const char *originator, gboolean broadcast)
{
xmlNode *reply_copy = NULL;
CRM_ASSERT(msg != NULL);
reply_copy = cib_msg_copy(msg, TRUE);
if(broadcast) {
/* this (successful) call modified the CIB _and_ the
* change needs to be broadcast...
* send via HA to other nodes
*/
int diff_add_updates = 0;
int diff_add_epoch = 0;
int diff_add_admin_epoch = 0;
int diff_del_updates = 0;
int diff_del_epoch = 0;
int diff_del_admin_epoch = 0;
char *digest = NULL;
cib_diff_version_details(
result_diff,
&diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates,
&diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates);
crm_debug_2("Sending update diff %d.%d.%d -> %d.%d.%d",
diff_del_admin_epoch,diff_del_epoch,diff_del_updates,
diff_add_admin_epoch,diff_add_epoch,diff_add_updates);
crm_xml_add(reply_copy, F_CIB_ISREPLY, originator);
crm_xml_add(reply_copy, F_CIB_GLOBAL_UPDATE, XML_BOOLEAN_TRUE);
crm_xml_add(reply_copy, F_CIB_OPERATION, CIB_OP_APPLY_DIFF);
digest = calculate_xml_digest(the_cib, FALSE, TRUE);
crm_xml_add(result_diff, XML_ATTR_DIGEST, digest);
/* crm_log_xml_debug(the_cib, digest); */
crm_free(digest);
add_message_xml(reply_copy, F_CIB_UPDATE_DIFF, result_diff);
crm_log_xml(LOG_DEBUG_3, "copy", reply_copy);
send_cluster_message(NULL, crm_msg_cib, reply_copy, TRUE);
} else if(originator != NULL) {
/* send reply via HA to originating node */
crm_debug_2("Sending request result to originator only");
crm_xml_add(reply_copy, F_CIB_ISREPLY, originator);
send_cluster_message(originator, crm_msg_cib, reply_copy, FALSE);
}
free_xml(reply_copy);
}
void
cib_process_request(
xmlNode *request, gboolean force_synchronous, gboolean privileged,
gboolean from_peer, cib_client_t *cib_client)
{
int call_type = 0;
int call_options = 0;
gboolean process = TRUE;
gboolean is_update = TRUE;
gboolean needs_reply = TRUE;
gboolean local_notify = FALSE;
gboolean needs_forward = FALSE;
gboolean global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE));
xmlNode *op_reply = NULL;
xmlNode *result_diff = NULL;
enum cib_errors rc = cib_ok;
const char *op = crm_element_value(request, F_CIB_OPERATION);
const char *originator = crm_element_value(request, F_ORIG);
const char *host = crm_element_value(request, F_CIB_HOST);
crm_debug_4("%s Processing msg %s",
cib_our_uname, crm_element_value(request, F_SEQ));
cib_num_ops++;
if(cib_num_ops == 0) {
cib_num_fail = 0;
cib_num_local = 0;
cib_num_updates = 0;
crm_info("Stats wrapped around");
}
if(host != NULL && strlen(host) == 0) {
host = NULL;
}
crm_element_value_int(request, F_CIB_CALLOPTS, &call_options);
if(force_synchronous) {
call_options |= cib_sync_call;
}
crm_debug_2("Processing %s message (%s) for %s...",
from_peer?"peer":"local",
from_peer?originator:cib_our_uname, host?host:"master");
rc = cib_get_operation_id(op, &call_type);
if(rc != cib_ok) {
/* TODO: construct error reply? */
crm_err("Pre-processing of command failed: %s", cib_error2string(rc));
return;
}
is_update = cib_op_modifies(call_type);
if(is_update) {
cib_num_updates++;
}
if(from_peer == FALSE) {
parse_local_options(cib_client, call_type, call_options, host, op,
&local_notify, &needs_reply, &process, &needs_forward);
} else if(parse_peer_options(call_type, request, &local_notify,
&needs_reply, &process, &needs_forward) == FALSE) {
return;
}
crm_debug_3("Finished determining processing actions");
if(call_options & cib_discard_reply) {
needs_reply = is_update;
local_notify = FALSE;
}
if(needs_forward) {
forward_request(request, cib_client, call_options);
return;
}
if(cib_status != cib_ok) {
rc = cib_status;
crm_err("Operation ignored, cluster configuration is invalid."
" Please repair and restart: %s",
cib_error2string(cib_status));
op_reply = cib_construct_reply(request, the_cib, cib_status);
} else if(process) {
int level = LOG_INFO;
const char *section = crm_element_value(request, F_CIB_SECTION);
cib_num_local++;
rc = cib_process_command(
request, &op_reply, &result_diff, privileged);
if(global_update) {
switch(rc) {
case cib_ok:
case cib_old_data:
case cib_diff_resync:
case cib_diff_failed:
level = LOG_DEBUG_2;
break;
default:
level = LOG_ERR;
}
} else if(safe_str_eq(op, CIB_OP_QUERY)) {
level = LOG_DEBUG_2;
} else if(rc != cib_ok) {
cib_num_fail++;
level = LOG_WARNING;
- } else if(safe_str_eq(op, CIB_OP_QUERY)) {
- level = LOG_DEBUG_2;
-
} else if(safe_str_eq(op, CIB_OP_SLAVE)) {
level = LOG_DEBUG_2;
} else if(safe_str_eq(section, XML_CIB_TAG_STATUS)) {
level = LOG_DEBUG_2;
}
if(crm_log_level >= level) {
/* Avoid all the xml lookups if we're not going to print the results */
do_crm_log(level, "Operation complete: op %s for section %s (origin=%s/%s/%s, version=%s.%s.%s): %s (rc=%d)",
op, section?section:"'all'", originator?originator:"local",
crm_element_value(request, F_CIB_CLIENTNAME),
crm_element_value(request, F_CIB_CALLID),
the_cib?crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN):"0",
the_cib?crm_element_value(the_cib, XML_ATTR_GENERATION):"0",
the_cib?crm_element_value(the_cib, XML_ATTR_NUMUPDATES):"0",
cib_error2string(rc), rc);
}
if(op_reply == NULL && (needs_reply || local_notify)) {
crm_err("Unexpected NULL reply to message");
crm_log_xml(LOG_ERR, "null reply", request);
needs_reply = FALSE;
local_notify = FALSE;
}
}
crm_debug_3("processing response cases");
if(local_notify) {
const char *client_id = crm_element_value(request, F_CIB_CLIENTID);
if(process == FALSE) {
do_local_notify(request, client_id, call_options & cib_sync_call, from_peer);
} else {
do_local_notify(op_reply, client_id, call_options & cib_sync_call, from_peer);
}
}
/* from now on we are the server */
if(needs_reply == FALSE || stand_alone) {
/* nothing more to do...
* this was a non-originating slave update
*/
crm_debug_2("Completed slave update");
} else if(rc == cib_ok
&& result_diff != NULL
&& !(call_options & cib_inhibit_bcast)) {
send_peer_reply(request, result_diff, originator, TRUE);
} else if(call_options & cib_discard_reply) {
crm_debug_4("Caller isn't interested in reply");
} else if (from_peer) {
if(is_update == FALSE || result_diff == NULL) {
crm_debug_3("Request not broadcast: R/O call");
} else if(call_options & cib_inhibit_bcast) {
crm_debug_3("Request not broadcast: inhibited");
} else if(rc != cib_ok) {
crm_debug_3("Request not broadcast: call failed: %s",
cib_error2string(rc));
} else {
crm_debug_2("Directing reply to %s", originator);
}
send_peer_reply(op_reply, result_diff, originator, FALSE);
}
free_xml(op_reply);
free_xml(result_diff);
return;
}
xmlNode *
cib_construct_reply(xmlNode *request, xmlNode *output, int rc)
{
int lpc = 0;
xmlNode *reply = NULL;
const char *name = NULL;
const char *value = NULL;
const char *names[] = {
F_CIB_OPERATION,
F_CIB_CALLID,
F_CIB_CLIENTID,
F_CIB_CALLOPTS
};
crm_debug_4("Creating a basic reply");
reply = create_xml_node(NULL, "cib-reply");
crm_xml_add(reply, F_TYPE, T_CIB);
for(lpc = 0; lpc < DIMOF(names); lpc++) {
name = names[lpc];
value = crm_element_value(request, name);
crm_xml_add(reply, name, value);
}
crm_xml_add_int(reply, F_CIB_RC, rc);
if(output != NULL) {
crm_debug_4("Attaching reply output");
add_message_xml(reply, F_CIB_CALLDATA, output);
}
return reply;
}
enum cib_errors
cib_process_command(xmlNode *request, xmlNode **reply,
xmlNode **cib_diff, gboolean privileged)
{
xmlNode *input = NULL;
xmlNode *output = NULL;
xmlNode *result_cib = NULL;
xmlNode *current_cib = NULL;
int call_type = 0;
int call_options = 0;
int log_level = LOG_DEBUG_4;
const char *op = NULL;
const char *section = NULL;
enum cib_errors rc = cib_ok;
enum cib_errors rc2 = cib_ok;
gboolean send_r_notify = FALSE;
gboolean global_update = FALSE;
gboolean config_changed = FALSE;
gboolean manage_counters = TRUE;
CRM_ASSERT(cib_status == cib_ok);
*reply = NULL;
*cib_diff = NULL;
current_cib = the_cib;
/* Start processing the request... */
op = crm_element_value(request, F_CIB_OPERATION);
crm_element_value_int(request, F_CIB_CALLOPTS, &call_options);
rc = cib_get_operation_id(op, &call_type);
if(rc == cib_ok) {
rc = cib_op_can_run(call_type, call_options, privileged, global_update);
}
rc2 = cib_op_prepare(call_type, request, &input, &section);
if(rc == cib_ok) {
rc = rc2;
}
if(rc != cib_ok) {
crm_debug_2("Call setup failed: %s", cib_error2string(rc));
goto done;
} else if(cib_op_modifies(call_type) == FALSE) {
rc = cib_perform_op(op, call_options, cib_op_func(call_type), TRUE,
section, request, input, FALSE, &config_changed,
current_cib, &result_cib, NULL, &output);
CRM_CHECK(result_cib == NULL, free_xml(result_cib));
goto done;
}
/* Handle a valid write action */
global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE));
if(global_update) {
manage_counters = FALSE;
call_options |= cib_force_diff;
CRM_CHECK(call_type == 3 || call_type == 4,
crm_err("Call type: %d", call_type);
crm_log_xml(LOG_ERR, "bad op", request));
}
#ifdef SUPPORT_PRENOTIFY
if((call_options & cib_inhibit_notify) == 0) {
cib_pre_notify(call_options, op, the_cib, input);
}
#endif
if(rc == cib_ok) {
if(call_options & cib_inhibit_bcast) {
/* skip */
crm_debug_2("Skipping update: inhibit broadcast");
manage_counters = FALSE;
}
rc = cib_perform_op(op, call_options, cib_op_func(call_type), FALSE,
section, request, input, manage_counters, &config_changed,
current_cib, &result_cib, cib_diff, &output);
if(manage_counters == FALSE) {
*cib_diff = diff_cib_object(current_cib, result_cib, FALSE);
config_changed = cib_config_changed(*cib_diff);
}
}
if(rc == cib_ok) {
rc = activateCibXml(result_cib, config_changed, op);
if(crm_str_eq(CIB_OP_REPLACE, op, TRUE)) {
if(section == NULL) {
send_r_notify = TRUE;
} else if(safe_str_eq(section, XML_TAG_CIB)) {
send_r_notify = TRUE;
} else if(safe_str_eq(section, XML_CIB_TAG_NODES)) {
send_r_notify = TRUE;
} else if(safe_str_eq(section, XML_CIB_TAG_STATUS)) {
send_r_notify = TRUE;
}
} else if(crm_str_eq(CIB_OP_ERASE, op, TRUE)) {
send_r_notify = TRUE;
}
} else if(rc == cib_dtd_validation) {
if(output != NULL) {
crm_log_xml_info(output, "cib:output");
free_xml(output);
}
output = result_cib;
} else {
free_xml(result_cib);
}
if((call_options & cib_inhibit_notify) == 0) {
const char *call_id = crm_element_value(request, F_CIB_CALLID);
const char *client = crm_element_value(request, F_CIB_CLIENTNAME);
#ifdef SUPPORT_POSTNOTIFY
cib_post_notify(call_options, op, input, rc, the_cib);
#endif
cib_diff_notify(call_options, client, call_id, op, input, rc, *cib_diff);
}
if(send_r_notify) {
const char *origin = crm_element_value(request, F_ORIG);
cib_replace_notify(origin, the_cib, rc, *cib_diff);
}
if(rc != cib_ok) {
log_level = LOG_DEBUG_4;
if(rc == cib_dtd_validation && global_update) {
log_level = LOG_WARNING;
crm_log_xml_info(input, "cib:global_update");
}
} else if(config_changed) {
log_level = LOG_DEBUG_3;
if(cib_is_master) {
log_level = LOG_INFO;
}
} else if(cib_is_master) {
log_level = LOG_DEBUG_2;
}
log_xml_diff(log_level, *cib_diff, "cib:diff");
done:
if((call_options & cib_discard_reply) == 0) {
*reply = cib_construct_reply(request, output, rc);
/* crm_log_xml_info(*reply, "cib:reply"); */
}
if(call_type >= 0) {
cib_op_cleanup(call_type, call_options, &input, &output);
}
return rc;
}
int
send_via_callback_channel(xmlNode *msg, const char *token)
{
cib_client_t *hash_client = NULL;
enum cib_errors rc = cib_ok;
crm_debug_3("Delivering msg %p to client %s", msg, token);
if(token == NULL) {
crm_err("No client id token, cant send message");
if(rc == cib_ok) {
rc = cib_missing;
}
} else if(msg == NULL) {
crm_err("No message to send");
rc = cib_reply_failed;
} else {
/* A client that left before we could reply is not really
* _our_ error. Warn instead.
*/
hash_client = g_hash_table_lookup(client_list, token);
if(hash_client == NULL) {
crm_warn("Cannot find client for token %s", token);
rc = cib_client_gone;
} else if (crm_str_eq(hash_client->channel_name, "remote", FALSE)) {
/* just hope it's alive */
} else if(hash_client->channel == NULL) {
crm_err("Cannot find channel for client %s", token);
rc = cib_client_corrupt;
}
}
if(rc == cib_ok) {
crm_debug_3("Delivering reply to client %s (%s)",
token, hash_client->channel_name);
if (crm_str_eq(hash_client->channel_name, "remote", FALSE)) {
cib_send_remote_msg(hash_client->channel, msg, hash_client->encrypted);
} else if(send_ipc_message(hash_client->channel, msg) == FALSE) {
crm_warn("Delivery of reply to client %s/%s failed",
hash_client->name, token);
rc = cib_reply_failed;
}
}
return rc;
}
gint cib_GCompareFunc(gconstpointer a, gconstpointer b)
{
const xmlNode *a_msg = a;
const xmlNode *b_msg = b;
int msg_a_id = 0;
int msg_b_id = 0;
const char *value = NULL;
value = crm_element_value_const(a_msg, F_CIB_CALLID);
msg_a_id = crm_parse_int(value, NULL);
value = crm_element_value_const(b_msg, F_CIB_CALLID);
msg_b_id = crm_parse_int(value, NULL);
if(msg_a_id == msg_b_id) {
return 0;
} else if(msg_a_id < msg_b_id) {
return -1;
}
return 1;
}
gboolean
cib_process_disconnect(IPC_Channel *channel, cib_client_t *cib_client)
{
if (channel == NULL) {
CRM_DEV_ASSERT(cib_client == NULL);
} else if (cib_client == NULL) {
crm_err("No client");
} else {
CRM_DEV_ASSERT(channel->ch_status != IPC_CONNECT);
crm_debug_2("Cleaning up after client disconnect: %s/%s/%s",
crm_str(cib_client->name),
cib_client->channel_name,
cib_client->id);
if(cib_client->id != NULL) {
if(!g_hash_table_remove(client_list, cib_client->id)) {
crm_err("Client %s not found in the hashtable",
cib_client->name);
}
}
}
if(cib_shutdown_flag && g_hash_table_size(client_list) == 0) {
crm_info("All clients disconnected...");
initiate_exit();
}
return FALSE;
}
void
cib_ha_peer_callback(HA_Message * msg, void* private_data)
{
xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__);
cib_peer_callback(xml, private_data);
free_xml(xml);
}
void
cib_peer_callback(xmlNode * msg, void* private_data)
{
crm_node_t *node = NULL;
const char *reason = NULL;
const char *originator = crm_element_value(msg, F_ORIG);
if(originator == NULL || crm_str_eq(originator, cib_our_uname, TRUE)) {
/* message is from ourselves */
return;
} else if(crm_peer_cache == NULL) {
reason = "membership not established";
goto bail;
}
node = crm_get_peer(0, originator);
if(node == NULL || crm_is_member_active(node) == FALSE) {
reason = "not in our membership";
goto bail;
}
if(crm_element_value(msg, F_CIB_CLIENTNAME) == NULL) {
crm_xml_add(msg, F_CIB_CLIENTNAME, originator);
}
/* crm_log_xml(LOG_MSG, "Peer[inbound]", msg); */
cib_process_request(msg, FALSE, TRUE, TRUE, NULL);
return;
bail:
if(reason) {
const char *seq = crm_element_value(msg, F_SEQ);
const char *op = crm_element_value(msg, F_CIB_OPERATION);
crm_warn("Discarding %s message (%s) from %s: %s", op, seq, originator, reason);
}
}
void
cib_client_status_callback(const char * node, const char * client,
const char * status, void * private)
{
crm_node_t *member = NULL;
if(safe_str_eq(client, CRM_SYSTEM_CIB)) {
crm_info("Status update: Client %s/%s now has status [%s]",
node, client, status);
if(safe_str_eq(status, JOINSTATUS)){
status = ONLINESTATUS;
} else if(safe_str_eq(status, LEAVESTATUS)){
status = OFFLINESTATUS;
}
member = crm_get_peer(0, node);
if(member == NULL) {
/* Make sure it gets created */
const char *uuid = get_uuid(node);
member = crm_update_peer(0, 0, 0, -1, 0, uuid, node, NULL, NULL);
}
crm_update_peer_proc(node, crm_proc_cib, status);
}
return;
}
#if SUPPORT_HEARTBEAT
extern oc_ev_t *cib_ev_token;
gboolean cib_ccm_dispatch(int fd, gpointer user_data)
{
int rc = 0;
oc_ev_t *ccm_token = (oc_ev_t*)user_data;
crm_debug_2("received callback");
rc = oc_ev_handle_event(ccm_token);
if(0 == rc) {
return TRUE;
}
crm_err("CCM connection appears to have failed: rc=%d.", rc);
/* eventually it might be nice to recover and reconnect... but until then... */
crm_err("Exiting to recover from CCM connection failure");
exit(2);
return FALSE;
}
int current_instance = 0;
void
cib_ccm_msg_callback(
oc_ed_t event, void *cookie, size_t size, const void *data)
{
gboolean update_id = FALSE;
const oc_ev_membership_t *membership = data;
CRM_ASSERT(membership != NULL);
crm_info("Processing CCM event=%s (id=%d)",
ccm_event_name(event), membership->m_instance);
if(current_instance > membership->m_instance) {
crm_err("Membership instance ID went backwards! %d->%d",
current_instance, membership->m_instance);
CRM_ASSERT(current_instance <= membership->m_instance);
}
switch(event) {
case OC_EV_MS_NEW_MEMBERSHIP:
case OC_EV_MS_INVALID:
update_id = TRUE;
break;
case OC_EV_MS_PRIMARY_RESTORED:
update_id = TRUE;
break;
case OC_EV_MS_NOT_PRIMARY:
crm_debug_2("Ignoring transitional CCM event: %s",
ccm_event_name(event));
break;
case OC_EV_MS_EVICTED:
crm_err("Evicted from CCM: %s", ccm_event_name(event));
break;
default:
crm_err("Unknown CCM event: %d", event);
}
if(update_id) {
unsigned int lpc = 0;
CRM_CHECK(membership != NULL, return);
current_instance = membership->m_instance;
for(lpc=0; lpc < membership->m_n_out; lpc++) {
crm_update_ccm_node(
membership, lpc+membership->m_out_idx, CRM_NODE_LOST, current_instance);
}
for(lpc=0; lpc < membership->m_n_member; lpc++) {
crm_update_ccm_node(
membership, lpc+membership->m_memb_idx,CRM_NODE_ACTIVE, current_instance);
}
}
oc_ev_callback_done(cookie);
return;
}
#endif
gboolean
can_write(int flags)
{
return TRUE;
}
static gboolean
cib_force_exit(gpointer data)
{
crm_notice("Forcing exit!");
terminate_cib(__FUNCTION__);
return FALSE;
}
void
initiate_exit(void)
{
int active = 0;
xmlNode *leaving = NULL;
active = crm_active_peers(crm_proc_cib);
if(active < 2) {
terminate_cib(__FUNCTION__);
return;
}
crm_info("Sending disconnect notification to %d peers...", active);
leaving = create_xml_node(NULL, "exit-notification");
crm_xml_add(leaving, F_TYPE, "cib");
crm_xml_add(leaving, F_CIB_OPERATION, "cib_shutdown_req");
send_cluster_message(NULL, crm_msg_cib, leaving, TRUE);
free_xml(leaving);
g_timeout_add(crm_get_msec("5s"), cib_force_exit, NULL);
}
extern int remote_fd;
extern int remote_tls_fd;
void
terminate_cib(const char *caller)
{
if(remote_fd > 0) {
close(remote_fd);
}
if(remote_tls_fd > 0) {
close(remote_tls_fd);
}
#if SUPPORT_AIS
if(is_openais_cluster()) {
cib_ha_connection_destroy(NULL);
return;
}
#endif
#if SUPPORT_HEARTBEAT
if(hb_conn != NULL) {
crm_info("%s: Disconnecting heartbeat", caller);
hb_conn->llc_ops->signoff(hb_conn, FALSE);
} else {
crm_err("%s: No heartbeat connection", caller);
}
#endif
uninitializeCib();
crm_info("Exiting...");
if (mainloop != NULL && g_main_is_running(mainloop)) {
g_main_quit(mainloop);
} else {
exit(LSB_EXIT_OK);
}
}
diff --git a/configure.ac b/configure.ac
index 3e6a0678c9..b10a0098e2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,1477 +1,1477 @@
dnl
dnl autoconf for Pacemaker
dnl
dnl License: GNU General Public License (GPL)
dnl ===============================================
dnl Bootstrap
dnl ===============================================
AC_PREREQ(2.53)
dnl Suggested structure:
dnl information on the package
dnl checks for programs
dnl checks for libraries
dnl checks for header files
dnl checks for types
dnl checks for structures
dnl checks for compiler characteristics
dnl checks for library functions
dnl checks for system services
AC_INIT(pacemaker, 1.1.0, pacemaker@oss.clusterlabs.org)
CRM_DTD_VERSION="1.0"
PKG_FEATURES=""
HB_PKG=heartbeat
AC_CONFIG_AUX_DIR(.)
AC_CANONICAL_HOST
dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below)
dnl
dnl Internal header: include/config.h
dnl - Contains ALL defines
dnl - include/config.h.in is generated automatically by autoheader
dnl - NOT to be included in any header files except lha_internal.h
dnl (which is also not to be included in any other header files)
dnl
dnl External header: include/crm_config.h
dnl - Contains a subset of defines checked here
dnl - Manually edit include/crm_config.h.in to have configure include
dnl new defines
dnl - Should not include HAVE_* defines
dnl - Safe to include anywhere
AM_CONFIG_HEADER(include/config.h include/crm_config.h)
ALL_LINGUAS="en fr"
AC_ARG_WITH(version,
[ --with-version=version Override package version (if you're a packager needing to pretend) ],
[ PACKAGE_VERSION="$withval" ])
AC_ARG_WITH(pkg-name,
[ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ],
[ PACKAGE_NAME="$withval" ])
AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION)
AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version)
dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from
dnl normal compilation. When a failure occurs, it will then display the full
dnl command line
dnl Wrap in m4_ifdef to avoid breaking on older platforms
m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES])
CC_IN_CONFIGURE=yes
export CC_IN_CONFIGURE
LDD=ldd
dnl ========================================================================
dnl Compiler characteristics
dnl ========================================================================
AC_PROG_CC dnl Can force other with environment variable "CC".
AM_PROG_CC_C_O
AC_PROG_CC_STDC
AC_LIBTOOL_DLOPEN dnl Enable dlopen support...
AC_LIBLTDL_CONVENIENCE dnl make libltdl a convenience lib
AC_PROG_LIBTOOL
AC_C_STRINGIZE
AC_TYPE_SIZE_T
AC_CHECK_SIZEOF(char)
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
AC_STRUCT_TIMEZONE
dnl ===============================================
dnl Helpers
dnl ===============================================
cc_supports_flag() {
local CFLAGS="$@"
AC_MSG_CHECKING(whether $CC supports "$@")
AC_COMPILE_IFELSE([int main(){return 0;}] ,[RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)])
return $RC
}
extract_header_define() {
AC_MSG_CHECKING(for $2 in $1)
Cfile=/tmp/extract_define.$2.${$}
printf "#include <stdio.h>\n" > ${Cfile}.c
printf "#include <%s>\n" $1 >> ${Cfile}.c
printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c
$CC $CFLAGS ${Cfile}.c -o ${Cfile}
value=`${Cfile}`
AC_MSG_RESULT($value)
printf $value
rm -f ${Cfile}.c ${Cfile}
}
dnl ===============================================
dnl Configure Options
dnl ===============================================
dnl Some systems, like Solaris require a custom package name
AC_ARG_WITH(pkgname,
[ --with-pkgname=name name for pkg (typically for Solaris) ],
[ PKGNAME="$withval" ],
[ PKGNAME="LXHAhb" ],
)
AC_SUBST(PKGNAME)
AC_ARG_ENABLE([ansi],
[ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers.
[default=yes]])
AC_ARG_ENABLE([fatal-warnings],
[ --enable-fatal-warnings very pedantic and fatal warnings for gcc
[default=yes]])
AC_ARG_ENABLE([pretty],
[ --enable-pretty
Pretty-print compiler output unless there is an error
[default=no]])
AC_ARG_ENABLE([quiet],
[ --enable-quiet
Supress make output unless there is an error
[default=no]])
AC_ARG_ENABLE([thread-safe],
[ --enable-thread-safe Enable some client libraries to be thread safe.
[default=no]])
AC_ARG_ENABLE([bundled-ltdl],
[ --enable-bundled-ltdl Configure, build and install the standalone ltdl library bundled with ${PACKAGE} [default=no]])
LTDL_LIBS=""
AC_ARG_WITH(ais,
[ --with-ais
Support the OpenAIS messaging and membership layer ],
[ SUPPORT_AIS=$withval ],
[ SUPPORT_AIS=try ],
)
AC_ARG_WITH(heartbeat,
[ --with-heartbeat
Support the Heartbeat messaging and membership layer ],
[ SUPPORT_HEARTBEAT=$withval ],
[ SUPPORT_HEARTBEAT=try ],
)
AC_ARG_WITH(snmp,
[ --with-snmp
Support the SNMP protocol ],
[ SUPPORT_SNMP=$withval ],
[ SUPPORT_SNMP=try ],
)
AC_ARG_WITH(esmtp,
[ --with-esmtp
Support the sending mail notifications with the esmtp library ],
[ SUPPORT_ESMTP=$withval ],
[ SUPPORT_ESMTP=try ],
)
AISPREFIX=""
AC_ARG_WITH(ais-prefix,
[ --with-ais-prefix=DIR Prefix used when OpenAIS was installed [$prefix]],
[ AISPREFIX=$withval ],
[ AISPREFIX=$prefix ])
LCRSODIR=""
AC_ARG_WITH(lcrso-dir,
[ --with-lcrso-dir=DIR OpenAIS lcrso files. ],
[ LCRSODIR="$withval" ])
INITDIR=""
AC_ARG_WITH(initdir,
[ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]],
[ INITDIR="$withval" ])
dnl ===============================================
dnl General Processing
dnl ===============================================
AC_SUBST(HB_PKG)
INIT_EXT=""
echo Our Host OS: $host_os/$host
AC_MSG_NOTICE(Sanitizing prefix: ${prefix})
case $prefix in
NONE) prefix=/usr;;
esac
AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix})
case $exec_prefix in
dnl For consistency with Heartbeat, map NONE->$prefix
NONE) exec_prefix=$prefix;;
prefix) exec_prefix=$prefix;;
esac
AC_MSG_NOTICE(Sanitizing ais_prefix: ${AISPREFIX})
case $AISPREFIX in
dnl For consistency with Heartbeat, map NONE->$prefix
NONE) AISPREFIX=$prefix;;
prefix) AISPREFIX=$prefix;;
esac
AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR})
case $INITDIR in
prefix) INITDIR=$prefix;;
"")
AC_MSG_CHECKING(which init (rc) directory to use)
for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
/usr/local/etc/rc.d /etc/rc.d
do
if
test -d $initdir
then
INITDIR=$initdir
break
fi
done
AC_MSG_RESULT($INITDIR);;
esac
AC_SUBST(INITDIR)
AC_MSG_NOTICE(Sanitizing libdir: ${libdir})
case $libdir in
dnl For consistency with Heartbeat, map NONE->$prefix
*prefix*|NONE)
AC_MSG_CHECKING(which lib directory to use)
for aDir in lib64 lib
do
trydir="${exec_prefix}/${aDir}"
if
test -d ${trydir}
then
libdir=${trydir}
break
fi
done
AC_MSG_RESULT($libdir);
;;
esac
dnl Expand autoconf variables so that we dont end up with '${prefix}'
dnl in #defines and python scripts
dnl NOTE: Autoconf deliberately leaves them unexpanded to allow
dnl make exec_prefix=/foo install
dnl No longer being able to do this seems like no great loss to me...
eval prefix="`eval echo ${prefix}`"
eval exec_prefix="`eval echo ${exec_prefix}`"
eval bindir="`eval echo ${bindir}`"
eval sbindir="`eval echo ${sbindir}`"
eval libexecdir="`eval echo ${libexecdir}`"
eval datadir="`eval echo ${datadir}`"
eval sysconfdir="`eval echo ${sysconfdir}`"
eval sharedstatedir="`eval echo ${sharedstatedir}`"
eval localstatedir="`eval echo ${localstatedir}`"
eval libdir="`eval echo ${libdir}`"
eval includedir="`eval echo ${includedir}`"
eval oldincludedir="`eval echo ${oldincludedir}`"
eval infodir="`eval echo ${infodir}`"
eval mandir="`eval echo ${mandir}`"
dnl Home-grown variables
eval INITDIR="${INITDIR}"
eval docdir="`eval echo ${docdir}`"
if test x"${docdir}" = x""; then
docdir=${datadir}/doc/${PACKAGE}-${VERSION}
#docdir=${datadir}/doc/packages/${PACKAGE}
fi
AC_SUBST(docdir)
for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
sharedstatedir localstatedir libdir includedir oldincludedir infodir \
mandir INITDIR docdir
do
dirname=`eval echo '${'${j}'}'`
if
test ! -d "$dirname"
then
AC_MSG_WARN([$j directory ($dirname) does not exist!])
fi
done
dnl This OS-based decision-making is poor autotools practice;
dnl feature-based mechanisms are strongly preferred.
dnl
dnl So keep this section to a bare minimum; regard as a "necessary evil".
case "$host_os" in
*bsd*) LIBS="-L/usr/local/lib"
CPPFLAGS="$CPPFLAGS -I/usr/local/include"
INIT_EXT=".sh"
;;
*solaris*)
;;
*linux*)
AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform)
CFLAGS="$CFLAGS -I${prefix}/include"
;;
darwin*)
AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform)
LIBS="$LIBS -L${prefix}/lib"
CFLAGS="$CFLAGS -I${prefix}/include"
;;
esac
dnl Eventually remove this
CFLAGS="$CFLAGS -I${prefix}/include/heartbeat"
AC_SUBST(INIT_EXT)
AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility)
AC_MSG_NOTICE(Host CPU: $host_cpu)
case "$host_cpu" in
ppc64|powerpc64)
case $CFLAGS in
*powerpc64*) ;;
*) if test "$GCC" = yes; then
CFLAGS="$CFLAGS -m64"
fi ;;
esac
esac
AC_MSG_CHECKING(which format is needed to print uint64_t)
case "$host_cpu" in
s390x)U64T="%lu";;
*64*) U64T="%lu";;
*) U64T="%llu";;
esac
AC_MSG_RESULT($U64T)
AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t)
AC_CHECK_HEADERS(hb_config.h)
AC_CHECK_HEADERS(glue_config.h)
GLUE_HEADER=none
if test "$ac_cv_header_glue_config_h" = "yes"; then
GLUE_HEADER=glue_config.h
elif test "$ac_cv_header_hb_config_h" = "yes"; then
GLUE_HEADER=hb_config.h
else
AC_MSG_FAILURE(Core development headers were not found)
fi
dnl Variables needed for substitution
CRM_DTD_DIRECTORY="${datadir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_DTD_DIRECTORY)
AC_DEFINE_UNQUOTED(CRM_DTD_VERSION,"$CRM_DTD_VERSION", Current version of the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_DTD_VERSION)
CRM_DAEMON_USER=`extract_header_define $GLUE_HEADER HA_CCMUSER`
AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_USER)
CRM_DAEMON_GROUP=`extract_header_define $GLUE_HEADER HA_APIGROUP`
AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_GROUP)
CRM_STATE_DIR=${localstatedir}/run/crm
AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets)
AC_SUBST(CRM_STATE_DIR)
PE_STATE_DIR="${localstatedir}/lib/pengine"
AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs)
AC_SUBST(PE_STATE_DIR)
dnl Eventually move out of the heartbeat dir tree and create compatability code
CRM_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm"
AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep CIB configuration files)
AC_SUBST(CRM_CONFIG_DIR)
dnl Eventually move out of the heartbeat dir tree and create symlinks when needed
CRM_DAEMON_DIR=`extract_header_define $GLUE_HEADER HA_LIBHBDIR`
AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
AC_SUBST(CRM_DAEMON_DIR)
dnl Needed so that the AIS plugin can clear out the directory as Heartbeat does
HA_STATE_DIR=`extract_header_define $GLUE_HEADER HA_VARRUNDIR`
AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets)
AC_SUBST(HA_STATE_DIR)
dnl Needed for the location of hostcache in CTS.py
HA_VARLIBHBDIR=`extract_header_define $GLUE_HEADER HA_VARLIBHBDIR`
AC_SUBST(HA_VARLIBHBDIR)
AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file)
OCF_ROOT_DIR=`extract_header_define $GLUE_HEADER OCF_ROOT_DIR`
if test "X$OCF_ROOT_DIR" = X; then
AC_MSG_ERROR(Could not locate OCF directory)
fi
AC_SUBST(OCF_ROOT_DIR)
OCF_RA_DIR=`extract_header_define $GLUE_HEADER OCF_RA_DIR`
AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs)
AC_SUBST(OCF_RA_DIR)
dnl Extract this value from glue_config.h once we no longer support anything else
STONITH_PLUGIN_DIR="$libdir/stonith/plugins/stonith/"
AC_DEFINE_UNQUOTED(STONITH_PLUGIN_DIR,"$STONITH_PLUGIN_DIR", Location for Stonith plugins)
AC_SUBST(STONITH_PLUGIN_DIR)
RH_STONITH_DIR="$sbindir"
AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents)
RH_STONITH_PREFIX="fence_"
AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents)
AC_PATH_PROGS(HG, hg false)
AC_MSG_CHECKING(build version)
BUILD_VERSION=unknown
if test -f $srcdir/.hg_archival.txt; then
BUILD_VERSION=`cat $srcdir/.hg_archival.txt | awk '/node:/ { print $2 }'`
elif test -x $HG -a -d .hg; then
BUILD_VERSION=`$HG id -itb`
if test $? != 0; then
BUILD_VERSION=unknown
fi
fi
AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
AC_MSG_RESULT($BUILD_VERSION)
AC_SUBST(BUILD_VERSION)
dnl ===============================================
dnl Program Paths
dnl ===============================================
PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
export PATH
dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL
dnl was NOT being expanded all the time thus causing things to fail.
AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13)
AM_PATH_PYTHON
AC_CHECK_PROGS(MAKE, gmake make)
AC_PATH_PROGS(HTML2TXT, lynx w3m)
AC_PATH_PROGS(HELP2MAN, help2man)
AC_PATH_PROGS(POD2MAN, pod2man, pod2man)
AC_PATH_PROGS(ASCIIDOC, asciidoc)
AC_PATH_PROGS(PUBLICAN, publican)
AC_PATH_PROGS(FOP, fop)
AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh)
AC_PATH_PROGS(SCP, scp, /usr/bin/scp)
AC_PATH_PROGS(HG, hg, /bin/false)
AC_PATH_PROGS(TAR, tar)
AC_PATH_PROGS(MD5, md5)
AC_PATH_PROGS(TEST, test)
AC_PATH_PROGS(PKGCONFIG, pkg-config)
AC_PATH_PROGS(XML2CONFIG, xml2-config)
AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
if test x"${LIBTOOL}" = x""; then
AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE})
fi
if test x"${MAKE}" = x""; then
AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE})
fi
AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
if test x"${HELP2MAN}" != x""; then
PKG_FEATURES="$PKG_FEATURES manpages"
fi
AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"")
if test x"${ASCIIDOC}" != x""; then
PKG_FEATURES="$PKG_FEATURES asciidoc"
fi
AM_CONDITIONAL(BUILD_DOCBOOK, test ${PUBLICAN} != x"")
if test ${PUBLICAN} != x""; then
PKG_FEATURES="$PKG_FEATURES publican"
fi
dnl ===============================================
dnl Libraries
dnl ===============================================
AC_CHECK_LIB(socket, socket) dnl -lsocket
AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc...
AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux)
AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64)
AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available )
AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available)
AC_CHECK_LIB(uuid, uuid_parse) dnl e2fsprogs
AC_CHECK_LIB(uuid, uuid_create) dnl ossp
if test x"${PKGCONFIG}" = x""; then
AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE})
fi
dnl
dnl On many systems libcrypto is needed when linking against libsnmp.
dnl Check to see if it exists, and if so use it.
dnl
AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",)
AC_SUBST(CRYPTOLIB)
if test "x${enable_thread_safe}" = "xyes"; then
GPKGNAME="gthread-2.0"
else
GPKGNAME="glib-2.0"
fi
if
$PKGCONFIG --exists $GPKGNAME
then
GLIBCONFIG="$PKGCONFIG $GPKGNAME"
else
set -x
echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH
$PKGCONFIG --exists $GPKGNAME; echo $?
$PKGCONFIG --cflags $GPKGNAME; echo $?
$PKGCONFIG $GPKGNAME; echo $?
set +x
AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE})
fi
AC_MSG_RESULT(using $GLIBCONFIG)
#
# Where is dlopen?
#
if test "$ac_cv_lib_c_dlopen" = yes; then
LIBADD_DL=""
elif test "$ac_cv_lib_dl_dlopen" = yes; then
LIBADD_DL=-ldl
else
LIBADD_DL=${lt_cv_dlopen_libs}
fi
dnl
dnl Check for location of gettext
dnl
dnl On at least Solaris 2.x, where it is in libc, specifying lintl causes
dnl grief. Ensure minimal result, not the sum of all possibilities.
dnl And do libc first.
dnl Known examples:
dnl c: Linux, Solaris 2.6+
dnl intl: BSD, AIX
AC_CHECK_LIB(c, gettext)
if test x$ac_cv_lib_c_gettext != xyes; then
AC_CHECK_LIB(intl, gettext)
fi
if test x$ac_cv_lib_c_gettext != xyes -a x$ac_cv_lib_intl_gettext != xyes; then
AC_MSG_ERROR(You need gettext installed in order to build ${PACKAGE})
fi
if test "X$GLIBCONFIG" != X; then
AC_MSG_CHECKING(for special glib includes: )
GLIBHEAD=`$GLIBCONFIG --cflags`
AC_MSG_RESULT($GLIBHEAD)
CPPFLAGS="$CPPFLAGS $GLIBHEAD"
AC_MSG_CHECKING(for glib library flags)
GLIBLIB=`$GLIBCONFIG --libs`
AC_MSG_RESULT($GLIBLIB)
LIBS="$LIBS $GLIBLIB"
fi
dnl ========================================================================
dnl Headers
dnl ========================================================================
AC_HEADER_STDC
AC_CHECK_HEADERS(arpa/inet.h)
AC_CHECK_HEADERS(asm/types.h)
AC_CHECK_HEADERS(assert.h)
AC_CHECK_HEADERS(auth-client.h)
AC_CHECK_HEADERS(ctype.h)
AC_CHECK_HEADERS(dirent.h)
AC_CHECK_HEADERS(errno.h)
AC_CHECK_HEADERS(fcntl.h)
AC_CHECK_HEADERS(getopt.h)
AC_CHECK_HEADERS(glib.h)
AC_CHECK_HEADERS(grp.h)
AC_CHECK_HEADERS(limits.h)
AC_CHECK_HEADERS(linux/errqueue.h)
AC_CHECK_HEADERS(malloc.h)
AC_CHECK_HEADERS(netdb.h)
AC_CHECK_HEADERS(netinet/in.h)
AC_CHECK_HEADERS(netinet/ip.h)
AC_CHECK_HEADERS(pam/pam_appl.h)
AC_CHECK_HEADERS(pthread.h)
AC_CHECK_HEADERS(pwd.h)
AC_CHECK_HEADERS(security/pam_appl.h)
AC_CHECK_HEADERS(sgtty.h)
AC_CHECK_HEADERS(signal.h)
AC_CHECK_HEADERS(stdarg.h)
AC_CHECK_HEADERS(stddef.h)
AC_CHECK_HEADERS(stdio.h)
AC_CHECK_HEADERS(stdlib.h)
AC_CHECK_HEADERS(string.h)
AC_CHECK_HEADERS(strings.h)
AC_CHECK_HEADERS(sys/dir.h)
AC_CHECK_HEADERS(sys/ioctl.h)
AC_CHECK_HEADERS(sys/param.h)
AC_CHECK_HEADERS(sys/poll.h)
AC_CHECK_HEADERS(sys/resource.h)
AC_CHECK_HEADERS(sys/select.h)
AC_CHECK_HEADERS(sys/socket.h)
AC_CHECK_HEADERS(sys/sockio.h)
AC_CHECK_HEADERS(sys/stat.h)
AC_CHECK_HEADERS(sys/time.h)
AC_CHECK_HEADERS(sys/timeb.h)
AC_CHECK_HEADERS(sys/types.h)
AC_CHECK_HEADERS(sys/uio.h)
AC_CHECK_HEADERS(sys/un.h)
AC_CHECK_HEADERS(sys/utsname.h)
AC_CHECK_HEADERS(sys/wait.h)
AC_CHECK_HEADERS(time.h)
AC_CHECK_HEADERS(unistd.h)
AC_CHECK_HEADERS(winsock.h)
dnl These headers need prerequisits before the tests will pass
dnl AC_CHECK_HEADERS(net/if.h)
dnl AC_CHECK_HEADERS(netinet/icmp6.h)
dnl AC_CHECK_HEADERS(netinet/ip6.h)
dnl AC_CHECK_HEADERS(netinet/ip_icmp.h)
AC_MSG_CHECKING(for special libxml2 includes)
if test "x$XML2CONFIG" = "x"; then
AC_MSG_ERROR(libxml2 config not found)
else
XML2HEAD="`$XML2CONFIG --cflags`"
AC_MSG_RESULT($XML2HEAD)
AC_CHECK_LIB(xml2, xmlReadMemory)
AC_CHECK_LIB(xslt, xsltApplyStylesheet)
fi
CPPFLAGS="$CPPFLAGS $XML2HEAD"
AC_CHECK_HEADERS(libxml/xpath.h)
AC_CHECK_HEADERS(libxslt/xslt.h)
if test "$ac_cv_header_libxml_xpath_h" != "yes"; then
AC_MSG_ERROR(The libxml developement headers were not found)
fi
if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then
AC_MSG_ERROR(The libxslt developement headers were not found)
fi
dnl ========================================================================
dnl Structures
dnl ========================================================================
AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
dnl ========================================================================
dnl Functions
dnl ========================================================================
AC_CHECK_FUNCS(g_log_set_default_handler)
AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function]))
dnl ========================================================================
dnl ltdl
dnl ========================================================================
AC_CHECK_LIB(ltdl, lt_dlopen, [LTDL_foo=1])
if test "x${enable_bundled_ltdl}" = "xyes"; then
if test $ac_cv_lib_ltdl_lt_dlopen = yes; then
AC_MSG_NOTICE([Disabling usage of installed ltdl])
fi
ac_cv_lib_ltdl_lt_dlopen=no
fi
LIBLTDL_DIR=""
if test $ac_cv_lib_ltdl_lt_dlopen != yes ; then
AC_MSG_NOTICE([Installing local ltdl])
LIBLTDL_DIR=libltdl
( cd $srcdir ; $TAR -xvf libltdl.tar )
if test "$?" -ne 0; then
AC_MSG_ERROR([$TAR of libltdl.tar in $srcdir failed])
fi
AC_CONFIG_SUBDIRS(libltdl)
else
LIBS="$LIBS -lltdl"
AC_MSG_NOTICE([Using installed ltdl])
INCLTDL=""
LIBLTDL=""
fi
AC_SUBST(INCLTDL)
AC_SUBST(LIBLTDL)
AC_SUBST(LIBLTDL_DIR)
dnl ========================================================================
dnl bzip2
dnl ========================================================================
AC_CHECK_HEADERS(bzlib.h)
AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress)
if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then
AC_MSG_ERROR(BZ2 libraries not found)
fi
if test x$ac_cv_header_bzlib_h != xyes; then
AC_MSG_ERROR(BZ2 Development headers not found)
fi
dnl ========================================================================
dnl ncurses
dnl ========================================================================
dnl
dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
dnl Many non-Linux deliver "curses"; sites may add "ncurses".
dnl
dnl However, the source-code recommendation for both is to #include "curses.h"
dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
dnl
dnl ncurse takes precedence.
dnl
AC_CHECK_HEADERS(curses.h)
AC_CHECK_HEADERS(curses/curses.h)
AC_CHECK_HEADERS(ncurses.h)
AC_CHECK_HEADERS(ncurses/ncurses.h)
dnl Although n-library is preferred, only look for it if the n-header was found.
CURSESLIBS=''
if test "$ac_cv_header_ncurses_h" = "yes"; then
AC_CHECK_LIB(ncurses, printw,
[CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]
)
fi
if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then
AC_CHECK_LIB(ncurses, printw,
[CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]
)
fi
dnl Only look for non-n-library if there was no n-library.
if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]
)
fi
dnl Only look for non-n-library if there was no n-library.
if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]
)
fi
if test "x$CURSESLIBS" != "x"; then
PKG_FEATURES="$PKG_FEATURES ncurses"
fi
dnl Check for printw() prototype compatibility
if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then
AC_MSG_CHECKING(whether printw() requires argument of "const char *")
ac_save_LIBS=$LIBS
LIBS="$CURSESLIBS $LIBS"
ac_save_CFLAGS=$CFLAGS
CFLAGS="-Wcast-qual -Werror"
AC_LINK_IFELSE(
[AC_LANG_PROGRAM(
[
#if defined(HAVE_CURSES_H)
# include <curses.h>
#elif defined(HAVE_NCURSES_H)
# include <ncurses.h>
#endif
],
[printw((const char *)"Test");]
)],
[ac_cv_compatible_printw=yes],
[ac_cv_compatible_printw=no]
)
LIBS=$ac_save_LIBS
CFLAGS=$ac_save_CFLAGS
AC_MSG_RESULT([$ac_cv_compatible_printw])
if test "$ac_cv_compatible_printw" = no; then
AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.])
AC_MSG_NOTICE([Disabling curses])
AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?])
fi
fi
AC_SUBST(CURSESLIBS)
dnl ========================================================================
dnl Cluster infrastructure - Heartbeat
dnl ========================================================================
dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols
dnl So check for all the depenancies (so they're added to LIBS) before checking for -lplumb
AC_CHECK_LIB(pils, PILLoadPlugin)
AC_CHECK_LIB(plumb, G_main_add_IPC_Channel)
if test x"$ac_cv_lib_plumb_G_main_add_IPC_Channel" != x"yes"; then
AC_MSG_FAILURE(Core Heartbeat utility libraries not found: $ac_cv_lib_plumb_G_main_add_IPC_Channel)
fi
dnl Compatability checks
AC_CHECK_FUNCS(msgfromIPC_timeout)
AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include <lrm/lrm_api.h>]])
dnl ========================================================================
dnl Cluster stack - Heartbeat
dnl ========================================================================
case $SUPPORT_HEARTBEAT in
1|yes|true)
AC_CHECK_LIB(hbclient, ll_cluster_new,
[SUPPORT_HEARTBEAT=1], [AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found)]);;
try)
AC_CHECK_LIB(hbclient, ll_cluster_new,
[SUPPORT_HEARTBEAT=1], [SUPPORT_HEARTBEAT=0]);;
*) SUPPORT_HEARTBEAT=0;;
esac
AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1)
AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer)
dnl ========================================================================
dnl Cluster stack - OpenAIS
dnl ========================================================================
AISLIB=""
dnl Normalize the values
case $SUPPORT_AIS in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_AIS=no;;
esac
AC_MSG_CHECKING(for native AIS)
AISMSGLIB=""
AIS_VERSION="none"
COROSYNC_PKG="$PKGCONFIG libcoroipcc"
if test $SUPPORT_AIS = no; then
AC_MSG_RESULT(no... not requested.)
else
AC_MSG_RESULT($SUPPORT_AIS, with '$AISPREFIX')
AC_CHECK_HEADERS(openais/saAis.h)
AC_CHECK_HEADERS(corosync/coroipcc.h)
$COROSYNC_PKG --exists
if test $? = 0; then
AIS_VERSION="corosync"
elif test "$ac_cv_header_openais_saAis_h" = "yes"; then
AIS_VERSION="whitetank"
else
aisreason="Whitetank headers not found"
fi
fi
if test $AIS_VERSION != "none"; then
AC_MSG_CHECKING(for OpenAIS branch)
AC_MSG_RESULT($AIS_VERSION)
fi
if test $AIS_VERSION = "corosync"; then
if test "$ac_cv_header_corosync_coroipcc_h" != "yes"; then
AIS_VERSION="none"
aisreason="Corosync headers not found"
fi
saveLIBS="$LIBS"
LIBS="$LIBS `$COROSYNC_PKG --libs-only-L`"
AC_CHECK_LIB(coroipcc, coroipcc_msg_send_reply_receive, [])
LIBS="$saveLIBS"
if test $ac_cv_lib_coroipcc_coroipcc_msg_send_reply_receive != yes; then
AC_MSG_RESULT(Cannot locate AIS messaging library)
aisreason="requred Corosync libraries not found"
AIS_VERSION="none"
fi
fi
dnl continue?
if test $AIS_VERSION = "whitetank"; then
dnl Find it in lib, lib64, or wherever it wants to live...
AC_MSG_CHECKING(location of OpenAIS libraries)
dnl CoroSync location
alib=`ls ${AISPREFIX}/*/libcpg.so | head -n 1`
if test -z "$alib"; then
dnl Whitetank location
alib=`ls ${AISPREFIX}/*/*/libcpg.so | head -n 1`
fi
AISLIB=`dirname $alib`
AC_MSG_RESULT($AISLIB)
if test "x$AISLIB" = "x"; then
AC_MSG_WARN(Use --with-ais-prefix to specify the prefix OpenAIS was installed with)
aisreason="library directory not found"
AIS_VERSION="none"
elif test ! -d "$AISLIB"; then
AC_MSG_WARN(Use --with-ais-prefix to specify the prefix OpenAIS was installed with)
aisreason="specified library directory does not exist"
AIS_VERSION="none"
fi
fi
dnl continue?
if test $AIS_VERSION = "whitetank"; then
AC_MSG_CHECKING(location of OpenAIS plugins)
if test -z "$LCRSODIR"; then
LCRSODIR="$libexecdir/lcrso"
alib=`ls ${AISPREFIX}/*/lcrso/objdb.lcrso | head -n 1`
LCRSODIR=`dirname $alib`
fi
AC_MSG_RESULT($LCRSODIR)
if test "x$LCRSODIR" = "x"; then
AC_MSG_RESULT(Invalid. Please specify the correct location with --with-lcrso-dir)
aisreason="plugin directory not found"
AIS_VERSION="none"
elif test ! -d "$LCRSODIR"; then
AC_MSG_RESULT(Invalid. Please specify the correct location with --with-lcrso-dir)
aisreason="specified plugin directory does not exist"
AIS_VERSION="none"
fi
fi
dnl continue?
if test $AIS_VERSION = "whitetank"; then
dnl Don't add the messaging library to LIBS since most daemons don't need/use it
saveLIBS="$LIBS"
LIBS="$LIBS -L${AISLIB} -R${AISLIB}"
AC_CHECK_LIB(SaMsg, saSendReceiveReply, [])
AC_CHECK_LIB(SaMsg, openais_msg_send_reply_receive, [])
if test $ac_cv_lib_SaMsg_openais_msg_send_reply_receive = yes; then
: OpenAIS
elif test $ac_cv_lib_SaMsg_saSendReceiveReply = yes; then
: OpenAIS
AC_DEFINE_UNQUOTED(TRADITIONAL_AIS_IPC, 1, "Use the 'old' AIS IPC interface")
else
AC_MSG_RESULT(Cannot locate AIS messaging library)
aisreason="requred libraries not found"
AIS_VERSION="none"
fi
LIBS="$saveLIBS"
fi
SUPPORT_AIS=1
case $AIS_VERSION in
corosync)
AC_DEFINE_UNQUOTED(AIS_COROSYNC, 1, "AIS target is the corosync series")
LCRSODIR=`$PKGCONFIG corosync --variable=lcrsodir`
CFLAGS="$CFLAGS `$COROSYNC_PKG --cflags`"
AISMSGLIB=`$COROSYNC_PKG --libs`
;;
whitetank)
AC_DEFINE_UNQUOTED(AIS_WHITETANK, 1, "AIS target is the whitetank series")
CFLAGS="$CFLAGS -I$AISPREFIX/include/openais"
AISMSGLIB="-L${AISLIB} -R${AISLIB} -lSaMsg"
;;
none)
SUPPORT_AIS=0
if test "x$aisreason" != x; then
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support OpenAIS: $aisreason)
else
AC_MSG_FAILURE(Unable to support OpenAIS: $aisreason)
fi
fi
;;
*) AC_MSG_FAILURE(Unknown OpenAIS branch: $AIS_VERSION);;
esac
AC_DEFINE_UNQUOTED(SUPPORT_AIS, $SUPPORT_AIS, Support the OpenAIS messaging and membership layer)
AM_CONDITIONAL(BUILD_AIS_SUPPORT, test $SUPPORT_AIS = 1)
dnl
dnl Cluster stack - Sanity
dnl
STACKS=""
CLUSTERLIBS=""
if test $SUPPORT_HEARTBEAT = 1; then
STACKS="$STACKS heartbeat"
CLUSTERLIBS="$CLUSTERLIBS -lhbclient -lccmclient"
fi
if test $SUPPORT_AIS = 1; then
STACKS="$STACKS $AIS_VERSION"
CLUSTERLIBS="$CLUSTERLIBS ${AISMSGLIB}"
else
AISPREFIX=""
LCRSODIR="$libdir"
fi
PKG_FEATURES="$PKG_FEATURES$STACKS"
AC_MSG_CHECKING(for supported stacks)
if test x"$STACKS" = x; then
AC_MSG_FAILURE(You must choose at least one cluster stack to support)
fi
AC_MSG_RESULT($STACKS)
AC_SUBST(CLUSTERLIBS)
AC_SUBST(LCRSODIR)
dnl ========================================================================
dnl SNMP
dnl ========================================================================
case $SUPPORT_SNMP in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_SNMP=no;;
esac
SNMPLIB=""
AC_MSG_CHECKING(for snmp support)
if test $SUPPORT_SNMP = no; then
AC_MSG_RESULT(no... not requested.)
SUPPORT_SNMP=0
else
SNMPCONFIG=""
AC_MSG_RESULT($SUPPORT_SNMP)
AC_CHECK_HEADERS(net-snmp/net-snmp-config.h)
if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then
SUPPORT_SNMP="no"
fi
if test $SUPPORT_SNMP != no; then
AC_PATH_PROGS(SNMPCONFIG, net-snmp-config)
if test "X${SNMPCONFIG}" = "X"; then
AC_MSG_RESULT(You need the net_snmp development package to continue.)
SUPPORT_SNMP=no
fi
fi
if test $SUPPORT_SNMP != no; then
AC_MSG_CHECKING(for special snmp libraries)
SNMPLIBS=`$SNMPCONFIG --agent-libs`
AC_MSG_RESULT($SNMPLIBS)
fi
if test $SUPPORT_SNMP != no; then
savedLibs=$LIBS
LIBS="$LIBS $SNMPLIBS"
AC_CHECK_FUNCS(netsnmp_transport_open_client)
if test $ac_cv_func_netsnmp_transport_open_client != yes; then
SUPPORT_SNMP=no
fi
LIBS=$savedLibs
fi
if test $SUPPORT_SNMP = no; then
SUPPORT_SNMP=0
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support SNMP)
else
AC_MSG_FAILURE(Unable to support SNMP)
fi
else
SUPPORT_SNMP=1
fi
fi
if test $SUPPORT_SNMP = 1; then
PKG_FEATURES="$PKG_FEATURES snmp"
fi
AC_SUBST(SNMPLIBS)
AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1")
AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps)
dnl ========================================================================
dnl ESMTP
dnl ========================================================================
case $SUPPORT_ESMTP in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_ESMTP=no;;
esac
ESMTPLIB=""
AC_MSG_CHECKING(for esmtp support)
if test $SUPPORT_ESMTP = no; then
AC_MSG_RESULT(no... not requested.)
SUPPORT_ESMTP=0
else
ESMTPCONFIG=""
AC_MSG_RESULT($SUPPORT_ESMTP)
AC_CHECK_HEADERS(libesmtp.h)
if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then
ENABLE_ESMTP="no"
fi
if test $SUPPORT_ESMTP != no; then
AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config)
if test "X${ESMTPCONFIG}" = "X"; then
AC_MSG_RESULT(You need the libesmtp development package to continue.)
SUPPORT_ESMTP=no
fi
fi
if test $SUPPORT_ESMTP != no; then
AC_MSG_CHECKING(for special esmtp libraries)
ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '`
AC_MSG_RESULT($ESMTPLIBS)
fi
if test $SUPPORT_ESMTP = no; then
SUPPORT_ESMTP=0
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support ESMTP)
else
AC_MSG_FAILURE(Unable to support ESMTP)
fi
else
SUPPORT_ESMTP=1
fi
fi
if test $SUPPORT_ESMTP = 1; then
PKG_FEATURES="$PKG_FEATURES libesmtp"
fi
AC_SUBST(ESMTPLIBS)
AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1")
AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP)
dnl ========================================================================
dnl GnuTLS
dnl ========================================================================
AC_CHECK_HEADERS(gnutls/gnutls.h)
AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h)
dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program.
dnl If no 'libgnutls-config', try traditional autoconf means.
AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config)
if test -n "$LIBGNUTLS_CONFIG"; then
AC_MSG_CHECKING(for gnutls header flags)
GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`";
AC_MSG_RESULT($GNUTLSHEAD)
AC_MSG_CHECKING(for gnutls library flags)
GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`";
AC_MSG_RESULT($GNUTLSLIBS)
else
AC_CHECK_LIB(gnutls, gnutls_init)
fi
AC_SUBST(GNUTLSHEAD)
AC_SUBST(GNUTLSLIBS)
dnl ========================================================================
dnl System Health
dnl ========================================================================
dnl Check if servicelog development package is installed
SERVICELOG=servicelog-1
SERVICELOG_EXISTS="no"
AC_MSG_CHECKING(for $SERVICELOG packages)
if
$PKGCONFIG --exists $SERVICELOG
then
SERVICELOG_EXISTS="yes"
fi
AC_MSG_RESULT($SERVICELOG_EXISTS)
AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes")
dnl Check if OpenIMPI packages and servicelog are installed
OPENIPMI="OpenIPMI OpenIPMIposix"
OPENIPMI_SERVICELOG_EXISTS="no"
AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages)
if
$PKGCONFIG --exists $OPENIPMI $SERVICELOG
then
OPENIPMI_SERICELOG_EXISTS="yes"
fi
AC_MSG_RESULT($OPENIPMI_SERICELOG_EXISTS)
AM_CONDITIONAL(BUILD_OPENIPMI_SERICELOG, test "$OPENIPMI_SERICELOG_EXISTS" = "yes")
dnl ========================================================================
dnl checks for library functions to replace them
dnl
dnl NoSuchFunctionName:
dnl is a dummy function which no system supplies. It is here to make
dnl the system compile semi-correctly on OpenBSD which doesn't know
dnl how to create an empty archive
dnl
dnl scandir: Only on BSD.
dnl System-V systems may have it, but hidden and/or deprecated.
dnl A replacement function is supplied for it.
dnl
dnl setenv: is some bsdish function that should also be avoided (use
dnl putenv instead)
dnl On the other hand, putenv doesn't provide the right API for the
dnl code and has memory leaks designed in (sigh...) Fortunately this
dnl A replacement function is supplied for it.
dnl
dnl strerror: returns a string that corresponds to an errno.
dnl A replacement function is supplied for it.
dnl
dnl unsetenv: is some bsdish function that should also be avoided (No
dnl replacement)
dnl A replacement function is supplied for it.
dnl
dnl strnlen: is a gnu function similar to strlen, but safer.
dnl We wrote a tolearably-fast replacement function for it.
dnl
dnl strndup: is a gnu function similar to strdup, but safer.
dnl We wrote a tolearably-fast replacement function for it.
dnl
dnl daemon: is a GNU function. The daemon() function is for programs wishing to
dnl detach themselves from the controlling terminal and run in the
dnl background as system daemon
dnl A replacement function is supplied for it.
AC_REPLACE_FUNCS(alphasort inet_pton NoSuchFunctionName scandir setenv strerror unsetenv strnlen strndup daemon strlcpy strlcat)
dnl ========================================================================
dnl Compiler flags
dnl ========================================================================
dnl Make sure that CFLAGS is not exported. If the user did
dnl not have CFLAGS in their environment then this should have
dnl no effect. However if CFLAGS was exported from the user's
dnl environment, then the new CFLAGS will also be exported
dnl to sub processes.
CC_ERRORS=""
CC_EXTRAS=""
if export | fgrep " CFLAGS=" > /dev/null; then
export -n CFLAGS || true # We don't want to bomb out if this fails
fi
if test "$GCC" != yes; then
CFLAGS="$CFLAGS -g"
enable_fatal_warnings=no
else
CFLAGS="$CFLAGS -ggdb3 -O0"
# We had to eliminate -Wnested-externs because of libtool changes
EXTRA_FLAGS="-fgnu89-inline
-fstack-protector-all
-Wall
-Waggregate-return
-Wbad-function-cast
-Wcast-qual
-Wcast-align
-Wdeclaration-after-statement
-Wendif-labels
-Wfloat-equal
-Wformat=2
-Wformat-security
-Wformat-nonliteral
-Winline
-Wmissing-prototypes
-Wmissing-declarations
-Wnested-externs
-Wno-long-long
-Wno-strict-aliasing
-Wpointer-arith
-Wstrict-prototypes
-Wunsigned-char
-Wwrite-strings"
# Additional warnings it might be nice to enable one day
# -Wshadow
# -Wunreachable-code
for j in $EXTRA_FLAGS
do
if
cc_supports_flag $j
then
CC_EXTRAS="$CC_EXTRAS $j"
fi
done
dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x
GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'`
AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4)
dnl System specific options
case "$host_os" in
*linux*|*bsd*)
if test "${enable_fatal_warnings}" = "unknown"; then
enable_fatal_warnings=yes
fi
;;
esac
if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then
enable_fatal_warnings=yes
else
enable_fatal_warnings=no
fi
if test "x${enable_ansi}" != xno && cc_supports_flag -std=iso9899:199409 ; then
AC_MSG_NOTICE(Enabling ANSI Compatibility)
CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY"
fi
AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS})
fi
CFLAGS="$CFLAGS $CC_EXTRAS"
NON_FATAL_CFLAGS="$CFLAGS"
AC_SUBST(NON_FATAL_CFLAGS)
dnl
dnl We reset CFLAGS to include our warnings *after* all function
dnl checking goes on, so that our warning flags don't keep the
dnl AC_*FUNCS() calls above from working. In particular, -Werror will
dnl *always* cause us troubles if we set it before here.
dnl
dnl
if test "x${enable_fatal_warnings}" = xyes ; then
AC_MSG_NOTICE(Enabling Fatal Warnings)
CFLAGS="$CFLAGS -Werror"
fi
AC_SUBST(CFLAGS)
dnl This is useful for use in Makefiles that need to remove one specific flag
CFLAGS_COPY="$CFLAGS"
AC_SUBST(CFLAGS_COPY)
AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries
AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff...
AC_SUBST(LOCALE)
dnl Options for cleaning up the compiler output
QUIET_LIBTOOL_OPTS=""
QUIET_MAKE_OPTS=""
if test "x${enable_quiet}" = "xyes"; then
QUIET_LIBTOOL_OPTS="--quiet"
QUIET_MAKE_OPTS="--quiet"
fi
AC_MSG_RESULT(Supress make details: ${enable_quiet})
dnl Put the above variables to use
LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
MAKE="${MAKE} \$(QUIET_MAKE_OPTS)"
AC_SUBST(CC)
AC_SUBST(MAKE)
AC_SUBST(LIBTOOL)
AC_SUBST(QUIET_MAKE_OPTS)
AC_SUBST(QUIET_LIBTOOL_OPTS)
dnl The Makefiles and shell scripts we output
AC_CONFIG_FILES(Makefile \
cts/Makefile \
cts/CTSvars.py \
cts/LSBDummy \
cib/Makefile \
crmd/Makefile \
pengine/Makefile \
pengine/regression.core.sh \
debian/Makefile \
doc/Makefile \
doc/cibadmin.8 \
doc/crm_resource.8 \
include/Makefile \
include/crm/Makefile \
include/crm/common/Makefile \
include/crm/pengine/Makefile \
replace/Makefile \
lib/Makefile \
lib/ais/Makefile \
lib/common/Makefile \
lib/cib/Makefile \
lib/pengine/Makefile \
lib/transition/Makefile \
lib/fencing/Makefile \
lib/plugins/Makefile \
lib/plugins/lrm/Makefile \
fencing/Makefile \
extra/Makefile \
extra/resources/Makefile \
tools/Makefile \
tools/haresources2cib.py \
tools/hb2openais.sh \
tools/crm_primitive.py \
tools/crm \
- tools/shell/Makefile \
- tools/shell/templates/Makefile \
- tools/shell/regression/Makefile \
- tools/shell/regression/regression.sh \
- tools/shell/regression/lrmregtest-lsb \
- tools/shell/regression/testcases/Makefile \
+ shell/Makefile \
+ shell/templates/Makefile \
+ shell/regression/Makefile \
+ shell/regression/regression.sh \
+ shell/regression/lrmregtest-lsb \
+ shell/regression/testcases/Makefile \
xml/Makefile \
xml/pacemaker.rng \
xml/resources.rng \
xml/constraints.rng \
xml/rule.rng \
xml/nvset.rng \
)
dnl Now process the entire list of files added by previous
dnl calls to AC_CONFIG_FILES()
AC_OUTPUT()
dnl *****************
dnl Configure summary
dnl *****************
AC_MSG_RESULT([])
AC_MSG_RESULT([$PACKAGE configuration:])
AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)])
AC_MSG_RESULT([ Features =${PKG_FEATURES}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ Prefix = ${prefix}])
AC_MSG_RESULT([ Executables = ${sbindir}])
AC_MSG_RESULT([ Man pages = ${mandir}])
AC_MSG_RESULT([ Libraries = ${libdir}])
AC_MSG_RESULT([ Header files = ${includedir}])
AC_MSG_RESULT([ Arch-independent files = ${datadir}])
AC_MSG_RESULT([ State information = ${localstatedir}])
AC_MSG_RESULT([ System configuration = ${sysconfdir}])
AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ Use system LTDL = ${ac_cv_lib_ltdl_lt_dlopen}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}])
AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ CFLAGS = ${CFLAGS}])
AC_MSG_RESULT([ Libraries = ${LIBS}])
AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}])
diff --git a/cts/CTStests.py b/cts/CTStests.py
index 1de479b906..e4f6aac398 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -1,2398 +1,2415 @@
'''CTS: Cluster Testing System: Tests module
There are a few things we want to do here:
'''
__copyright__='''
Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
Add RecourceRecover testcase Zhao Kai <zhaokai@cn.ibm.com>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# SPECIAL NOTE:
#
# Tests may NOT implement any cluster-manager-specific code in them.
# EXTEND the ClusterManager object to provide the base capabilities
# the test needs if you need to do something that the current CM classes
# do not. Otherwise you screw up the whole point of the object structure
# in CTS.
#
# Thank you.
#
import CTS
import CTSaudits
import time, os, re, types, string, tempfile, sys
from CTSaudits import *
from stat import *
# List of all class objects for tests which we ought to
# consider running.
class AllTests:
'''
A collection of tests which are run at random.
'''
def __init__(self, scenario, cm, tests, Audits):
self.CM = cm
self.Env = cm.Env
self.Scenario = scenario
self.Tests = []
self.Audits = []
self.ns=CTS.NodeStatus(self.Env)
self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
self.IndividualStats= {}
for audit in Audits:
if not issubclass(audit.__class__, ClusterAudit):
raise ValueError("Init value must be a subclass of ClusterAudit")
if audit.is_applicable():
self.Audits.append(audit)
for test in tests:
if not issubclass(test.__class__, CTSTest):
raise ValueError("Init value must be a subclass of CTSTest")
if test.is_applicable():
self.Tests.append(test)
if not scenario.IsApplicable():
raise ValueError("Scenario not applicable in"
" given Environment")
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def audit(self, BadNews, test):
errcount=0
BadNewsDebug=0
#BadNews.debug=1
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append("BadNews:")
ignorelist.extend(self.CM.errorstoignore())
if test:
ignorelist.extend(test.errorstoignore())
while errcount < 1000:
if BadNewsDebug: print "Looking for BadNews"
match=BadNews.look(0)
if match:
if BadNewsDebug: print "BadNews found: "+match
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
if BadNewsDebug: print "Ignoring based on pattern: ("+ignore+")"
add_err = 0
if add_err == 1:
self.CM.log("BadNews: " + match)
self.incr("BadNews")
errcount=errcount+1
else:
break
else:
answer = raw_input('Big problems. Continue? [nY]')
if answer and answer == "n":
self.CM.log("Shutting down.")
self.CM.stopall()
self.summarize()
raise ValueError("Looks like we hit a BadNews jackpot!")
for audit in self.Audits:
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
self.incr("auditfail")
if test:
test.incr("auditfail")
def summarize(self):
self.CM.log("****************")
self.CM.log("Overall Results:" + repr(self.Stats))
self.CM.log("****************")
stat_filter = {
"calls":0,
"failure":0,
"skipped":0,
"auditfail":0,
}
self.CM.log("Test Summary")
for test in self.Tests:
for key in stat_filter.keys():
stat_filter[key] = test.Stats[key]
self.CM.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
self.CM.debug("Detailed Results")
for test in self.Tests:
self.CM.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
self.CM.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def test_loop(self, BadNews, max):
testcount=1
self.CM.log("Executing all tests once")
for test in self.Tests:
if self.run_test(BadNews, test, testcount):
testcount += 1
return testcount
def run_test(self, BadNews, test, testcount):
nodechoice = self.Env.RandomNode()
ret = 1
where = ""
did_run = 0
self.CM.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]")
- starttime=test.set_starttime()
+ starttime = test.set_timer()
if not test.setup(nodechoice):
self.CM.log("Setup failed")
ret = 0
elif not test.canrunnow(nodechoice):
self.CM.log("Skipped")
test.skipped()
else:
did_run = 1
ret = test(nodechoice)
if not test.teardown(nodechoice):
self.CM.log("Teardown failed")
ret = 0
- test.log_mark("stop")
stoptime=time.time()
self.CM.oprofileSave(testcount)
elapsed_time = stoptime - starttime
- test_time = stoptime - test.starttime
+ test_time = stoptime - test.get_timer()
if not test.has_key("min_time"):
test["elapsed_time"] = elapsed_time
test["min_time"] = test_time
test["max_time"] = test_time
else:
test["elapsed_time"] = test["elapsed_time"] + elapsed_time
if test_time < test["min_time"]:
test["min_time"] = test_time
if test_time > test["max_time"]:
test["max_time"] = test_time
if ret:
self.incr("success")
- self.CM.debug("Test %s runtime: %.2f" % (test.name, test_time))
+ test.log_timer()
else:
self.incr("failure")
self.CM.statall()
did_run = 1 # Force the test count to be incrimented anyway so test extraction works
self.audit(BadNews, test)
return did_run
def run(self, max=1):
(
'''
Set up the given scenario, then run the selected tests at
random for the selected number of iterations.
''')
BadNews=CTS.LogWatcher(self.CM["LogFileName"], self.CM["BadRegexes"]
, timeout=0)
BadNews.setwatch()
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
self.CM.oprofileStop()
self.CM.oprofileStart()
if not self.CM.Env["DoBSC"]:
audit = LogAudit(self.CM)
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
return (None, None)
else:
self.CM.log("Audit " + audit.name() + " passed.")
audit = DiskAudit(self.CM)
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
return (None, None)
else:
self.CM.log("Audit " + audit.name() + " passed.")
if not self.Scenario.SetUp(self.CM):
return (None, None)
self.CM.oprofileSave(0)
time.sleep(30)
# This makes sure everything is stabilized before starting...
self.audit(BadNews, None)
testcount = self.test_loop(BadNews, max)
self.Scenario.TearDown(self.CM)
self.CM.oprofileSave(testcount)
self.CM.oprofileStop()
self.audit(BadNews, None)
for test in self.Tests:
self.IndividualStats[test.name] = test.Stats
return self.Stats, self.IndividualStats
class RandomTests(AllTests):
def test_loop(self, BadNews, max):
testcount=1
self.CM.log("Executing tests at random")
while testcount <= max:
test = self.Env.RandomGen.choice(self.Tests)
if self.run_test(BadNews, test, testcount):
testcount += 1
return testcount
class BenchTests(AllTests):
'''
Nothing (yet) here.
'''
AllTestClasses = [ ]
class CTSTest:
'''
A Cluster test.
We implement the basic set of properties and behaviors for a generic
cluster test.
Cluster tests track their own statistics.
We keep each of the kinds of counts we track as separate {name,value}
pairs.
'''
def __init__(self, cm):
#self.name="the unnamed test"
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
# if not issubclass(cm.__class__, ClusterManager):
# raise ValueError("Must be a ClusterManager object")
self.CM = cm
self.Audits = []
self.timeout=120
- self.starttime=0
self.passed = 1
self.is_loop = 0
self.is_unsafe = 0
self.is_experimental = 0
self.is_valgrind = 0
self.benchmark = 0 # which tests to benchmark
+ self.timer = {} # timers
def has_key(self, key):
return self.Stats.has_key(key)
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
return self.Stats[key]
def log_mark(self, msg):
- self.CM.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
- return
-
- def set_starttime(self):
- self.starttime=time.time()
- self.log_mark("start")
- return self.starttime
+ self.CM.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
+ return
+
+ def get_timer(self,key = "test"):
+ try: return self.timer[key]
+ except: return 0
+
+ def set_timer(self,key = "test"):
+ self.timer[key] = time.time()
+ return self.timer[key]
+
+ def log_timer(self,key = "test"):
+ elapsed = 0
+ if key in self.timer:
+ elapsed = time.time() - self.timer[key]
+ s = key == "test" and self.name or "%s:%s" %(self.name,key)
+ self.CM.debug("%s runtime: %.2f" % (s, elapsed))
+ del self.timer[key]
+ return elapsed
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
# Reset the test passed boolean
if name == "calls":
self.passed = 1
def failure(self, reason="none"):
'''Increment the failure count'''
self.passed = 0
self.incr("failure")
self.CM.log(("Test %s" % self.name).ljust(35) +" FAILED: %s" % reason)
return None
def success(self):
'''Increment the success count'''
self.incr("success")
return 1
def skipped(self):
'''Increment the skipped count'''
self.incr("skipped")
return 1
def __call__(self, node):
'''Perform the given test'''
raise ValueError("Abstract Class member (__call__)")
self.incr("calls")
return self.failure()
def audit(self):
passed = 1
if len(self.Audits) > 0:
for audit in self.Audits:
if not audit():
self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
self.incr("auditfail")
passed = 0
return passed
def setup(self, node):
'''Setup the given test'''
return self.success()
def teardown(self, node):
'''Tear down the given test'''
return self.success()
def local_badnews(self, prefix, watch, local_ignore=[]):
errcount = 0
if not prefix:
prefix = "LocalBadNews:"
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append(prefix)
ignorelist.extend(local_ignore)
while errcount < 100:
match=watch.look(0)
if match:
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
add_err = 0
if add_err == 1:
self.CM.log(prefix + " " + match)
errcount=errcount+1
else:
break
else:
self.CM.log("Too many errors!")
return errcount
def is_applicable(self):
return self.is_applicable_common()
def is_applicable_common(self):
'''Return TRUE if we are applicable in the current test configuration'''
#raise ValueError("Abstract Class member (is_applicable)")
if self.is_loop and not self.CM.Env["loop-tests"]:
return 0
elif self.is_unsafe and not self.CM.Env["unsafe-tests"]:
return 0
elif self.is_valgrind and not self.CM.Env["valgrind-tests"]:
return 0
elif self.is_experimental and not self.CM.Env["experimental-tests"]:
return 0
return 1
def find_ocfs2_resources(self, node):
self.r_o2cb = None
self.r_ocfs2 = []
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rtype == "o2cb" and r.parent != "NA":
self.CM.debug("Found o2cb: %s" % self.r_o2cb)
self.r_o2cb = r.parent
if re.search("^Constraint", line):
c = AuditConstraint(self.CM, line)
if c.type == "rsc_colocation" and c.target == self.r_o2cb:
self.r_ocfs2.append(c.rsc)
self.CM.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
return len(self.r_ocfs2)
def canrunnow(self, node):
'''Return TRUE if we can meaningfully run right now'''
return 1
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return []
###################################################################
class StopTest(CTSTest):
###################################################################
'''Stop (deactivate) the cluster manager on a node'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="Stop"
def __call__(self, node):
'''Perform the 'stop' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] != "up":
return self.skipped()
patterns = []
# Technically we should always be able to notice ourselves stopping
patterns.append(self.CM["Pat:We_stopped"] % node)
#if self.CM.Env["use_logd"]:
# patterns.append(self.CM["Pat:Logd_stopped"] % node)
# Any active node needs to notice this one left
# NOTE: This wont work if we have multiple partitions
for other in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[other] == "up" and other != node:
patterns.append(self.CM["Pat:They_stopped"] %(other, node))
#self.debug("Checking %s will notice %s left"%(other, node))
watch = CTS.LogWatcher(
self.CM["LogFileName"], patterns, self.CM["DeadTime"])
watch.setwatch()
if node == self.CM.OurNode:
self.incr("us")
else:
if self.CM.upcount() <= 1:
self.incr("all")
else:
self.incr("them")
self.CM.StopaCM(node)
watch_result = watch.lookforall()
failreason=None
UnmatchedList = "||"
if watch.unmatched:
(rc, output) = self.CM.rsh(node, "/bin/ps axf", None)
for line in output:
self.CM.debug(line)
for regex in watch.unmatched:
self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex))
UnmatchedList += regex + "||";
failreason="Missing shutdown pattern"
self.CM.cluster_stable(self.CM["DeadTime"])
if not watch.unmatched or self.CM.upcount() == 0:
return self.success()
if len(watch.unmatched) >= self.CM.upcount():
return self.failure("no match against (%s)" % UnmatchedList)
if failreason == None:
return self.success()
else:
return self.failure(failreason)
#
# We don't register StopTest because it's better when called by
# another test...
#
###################################################################
class StartTest(CTSTest):
###################################################################
'''Start (activate) the cluster manager on a node'''
def __init__(self, cm, debug=None):
CTSTest.__init__(self,cm)
self.name="start"
self.debug = debug
def __call__(self, node):
'''Perform the 'start' test. '''
self.incr("calls")
if self.CM.upcount() == 0:
self.incr("us")
else:
self.incr("them")
if self.CM.ShouldBeStatus[node] != "down":
return self.skipped()
elif self.CM.StartaCM(node):
return self.success()
else:
return self.failure("Startup %s on node %s failed"
%(self.CM["Name"], node))
#
# We don't register StartTest because it's better when called by
# another test...
#
###################################################################
class FlipTest(CTSTest):
###################################################################
'''If it's running, stop it. If it's stopped start it.
Overthrow the status quo...
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Flip"
self.start = StartTest(cm)
self.stop = StopTest(cm)
def __call__(self, node):
'''Perform the 'Flip' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] == "up":
self.incr("stopped")
ret = self.stop(node)
type="up->down"
# Give the cluster time to recognize it's gone...
time.sleep(self.CM["StableTime"])
elif self.CM.ShouldBeStatus[node] == "down":
self.incr("started")
ret = self.start(node)
type="down->up"
else:
return self.skipped()
self.incr(type)
if ret:
return self.success()
else:
return self.failure("%s failure" % type)
# Register FlipTest as a good test to run
AllTestClasses.append(FlipTest)
###################################################################
class RestartTest(CTSTest):
###################################################################
'''Stop and restart a node'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Restart"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.benchmark = 1
def __call__(self, node):
'''Perform the 'restart' test. '''
self.incr("calls")
self.incr("node:" + node)
ret1 = 1
if self.CM.StataCM(node):
self.incr("WasStopped")
if not self.start(node):
return self.failure("start (setup) failure: "+node)
- self.set_starttime()
+ self.set_timer()
if not self.stop(node):
return self.failure("stop failure: "+node)
if not self.start(node):
return self.failure("start failure: "+node)
return self.success()
# Register RestartTest as a good test to run
AllTestClasses.append(RestartTest)
###################################################################
class StonithdTest(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="Stonithd"
self.startall = SimulStartLite(cm)
self.benchmark = 1
def __call__(self, node):
self.incr("calls")
if len(self.CM.Env["nodes"]) < 2:
return self.skipped()
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
watchpats = []
watchpats.append("Forcing node %s to be terminated" % node)
watchpats.append("Scheduling Node %s for STONITH" % node)
watchpats.append("Executing .* fencing operation")
watchpats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node)
if not self.CM.is_node_dc(node):
# Won't be found if the DC is shot (and there's no equivalent message from stonithd)
watchpats.append("tengine_stonith_callback: .*: OK ")
# TODO else: look for the notification on a peer once implimented
if self.CM.Env["at-boot"] == 0:
self.CM.debug("Expecting %s to stay down" % node)
self.CM.ShouldBeStatus[node]="down"
else:
self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"]))
watchpats.append("%s crmd: .* S_STARTING -> S_PENDING" % node)
watchpats.append("%s crmd: .* S_PENDING -> S_NOT_DC" % node)
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats,
self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
watch.setwatch()
self.CM.rsh(node, "crm_attribute --node %s --type status --attr-name terminate --attr-value true" % node)
+ self.set_timer("fence")
matched = watch.lookforall()
+ self.log_timer("fence")
+ self.set_timer("reform")
if matched:
self.CM.debug("Found: "+ repr(matched))
else:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
self.CM.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.CM.debug("Waiting STONITHd node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.CM["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
+ self.log_timer("reform")
return self.success()
def errorstoignore(self):
return [ "Executing .* fencing operation" ]
def is_applicable(self):
if not self.is_applicable_common():
return 0
if self.CM.Env.has_key("DoStonith"):
return self.CM.Env["DoStonith"]
return 1
AllTestClasses.append(StonithdTest)
###################################################################
class StartOnebyOne(CTSTest):
###################################################################
'''Start all the nodes ~ one by one'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="StartOnebyOne"
self.stopall = SimulStopLite(cm)
self.start = StartTest(cm)
self.ns=CTS.NodeStatus(cm.Env)
def __call__(self, dummy):
'''Perform the 'StartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Test setup failed")
failed=[]
- self.set_starttime()
+ self.set_timer()
for node in self.CM.Env["nodes"]:
if not self.start(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to start: " + repr(failed))
return self.success()
# Register StartOnebyOne as a good test to run
AllTestClasses.append(StartOnebyOne)
###################################################################
class SimulStart(CTSTest):
###################################################################
'''Start all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStart"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStart' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
self.CM.clear_all_caches()
if not self.startall(None):
return self.failure("Startall failed")
return self.success()
# Register SimulStart as a good test to run
AllTestClasses.append(SimulStart)
###################################################################
class SimulStop(CTSTest):
###################################################################
'''Stop all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStop"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStop' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.stopall(None):
return self.failure("Stopall failed")
return self.success()
# Register SimulStop as a good test to run
AllTestClasses.append(SimulStop)
###################################################################
class StopOnebyOne(CTSTest):
###################################################################
'''Stop all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="StopOnebyOne"
self.startall = SimulStartLite(cm)
self.stop = StopTest(cm)
def __call__(self, dummy):
'''Perform the 'StopOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
failed=[]
- self.set_starttime()
+ self.set_timer()
for node in self.CM.Env["nodes"]:
if not self.stop(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to stop: " + repr(failed))
self.CM.clear_all_caches()
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(StopOnebyOne)
###################################################################
class RestartOnebyOne(CTSTest):
###################################################################
'''Restart all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="RestartOnebyOne"
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'RestartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
did_fail=[]
- self.set_starttime()
+ self.set_timer()
self.restart = RestartTest(self.CM)
for node in self.CM.Env["nodes"]:
if not self.restart(node):
did_fail.append(node)
if did_fail:
return self.failure("Could not restart %d nodes: %s"
%(len(did_fail), repr(did_fail)))
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(RestartOnebyOne)
###################################################################
class PartialStart(CTSTest):
###################################################################
'''Start a node - but tell it to stop before it finishes starting up'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="PartialStart"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
#self.is_unsafe = 1
def __call__(self, node):
'''Perform the 'PartialStart' test. '''
self.incr("calls")
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
# FIXME! This should use the CM class to get the pattern
# then it would be applicable in general
watchpats = []
watchpats.append("Starting crmd")
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats,
timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.CM.StartaCMnoBlock(node)
ret = watch.lookforall()
if not ret:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
return self.failure("Setup of %s failed" % node)
ret = self.stopall(None)
if not ret:
return self.failure("%s did not stop in time" % node)
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(PartialStart)
#######################################################################
class StandbyTest(CTSTest):
#######################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Standby"
self.benchmark = 1
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
# make sure the node is active
# set the node to standby mode
# check resources, none resource should be running on the node
# set the node to active mode
# check resouces, resources should have been migrated back (SHOULD THEY?)
def __call__(self, node):
self.incr("calls")
ret=self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
self.CM.debug("Make sure node %s is active" % node)
if self.CM.StandbyStatus(node) != "off":
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
self.CM.debug("Getting resources running on node %s" % node)
rsc_on_node = self.CM.active_resources(node)
self.CM.debug("Setting node %s to standby mode" % node)
if not self.CM.SetStandbyMode(node, "on"):
return self.failure("can't set node %s to standby mode" % node)
- self.log_mark("standby:on")
+ self.set_timer("on")
time.sleep(1) # Allow time for the update to be applied and cause something
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "on":
return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
- self.log_mark("standby:on-idle")
+ self.log_timer("on")
self.CM.debug("Checking resources")
bad_run = self.CM.active_resources(node)
if len(bad_run) > 0:
rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
self.CM.debug("Setting node %s to active mode" % node)
self.CM.SetStandbyMode(node, "off")
return rc
self.CM.debug("Setting node %s to active mode" % node)
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
- self.log_mark("standby:off")
+ self.set_timer("off")
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
- self.log_mark("standby:off-idle")
+ self.log_timer("off")
return self.success()
AllTestClasses.append(StandbyTest)
#######################################################################
class ValgrindTest(CTSTest):
#######################################################################
'''Check for memory leaks'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Valgrind"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
self.is_valgrind = 1
self.is_loop = 1
def setup(self, node):
self.incr("calls")
ret=self.stopall(None)
if not ret:
return self.failure("Stop all nodes failed")
# Enable valgrind
self.logPat = "/tmp/%s-*.valgrind" % self.name
self.CM.Env["valgrind-prefix"] = self.name
self.CM.rsh(node, "rm -f %s" % self.logPat, None)
ret=self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
for node in self.CM.Env["nodes"]:
(rc, output) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
for line in output:
self.CM.debug(line)
return self.success()
def teardown(self, node):
# Disable valgrind
self.CM.Env["valgrind-prefix"] = None
# Return all nodes to normal
ret=self.stopall(None)
if not ret:
return self.failure("Stop all nodes failed")
return self.success()
def find_leaks(self):
# Check for leaks
leaked = []
self.stop = StopTest(self.CM)
for node in self.CM.Env["nodes"]:
(rc, ps_out) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
rc = self.stop(node)
if not rc:
self.failure("Couldn't shut down %s" % node)
rc = self.CM.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e ERROR.*SUMMARY:.*[1-9].*errors %s" % self.logPat, 0)
if rc != 1:
leaked.append(node)
self.failure("Valgrind errors detected on %s" % node)
for line in ps_out:
self.CM.log(line)
(rc, output) = self.CM.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logPat, None)
for line in output:
self.CM.log(line)
(rc, output) = self.CM.rsh(node, "cat %s" % self.logPat, None)
for line in output:
self.CM.debug(line)
self.CM.rsh(node, "rm -f %s" % self.logPat, None)
return leaked
def __call__(self, node):
leaked = self.find_leaks()
if len(leaked) > 0:
return self.failure("Nodes %s leaked" % repr(leaked))
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ]
#######################################################################
class StandbyLoopTest(ValgrindTest):
#######################################################################
'''Check for memory leaks by putting a node in and out of standby for an hour'''
def __init__(self, cm):
ValgrindTest.__init__(self,cm)
self.name="StandbyLoop"
def __call__(self, node):
lpc = 0
delay = 2
failed = 0
done=time.time() + self.CM.Env["loop-minutes"]*60
while time.time() <= done and not failed:
lpc = lpc + 1
time.sleep(delay)
if not self.CM.SetStandbyMode(node, "on"):
self.failure("can't set node %s to standby mode" % node)
failed = lpc
time.sleep(delay)
if not self.CM.SetStandbyMode(node, "off"):
self.failure("can't set node %s to active mode" % node)
failed = lpc
leaked = self.find_leaks()
if failed:
return self.failure("Iteration %d failed" % failed)
elif len(leaked) > 0:
return self.failure("Nodes %s leaked" % repr(leaked))
return self.success()
AllTestClasses.append(StandbyLoopTest)
##############################################################################
class BandwidthTest(CTSTest):
##############################################################################
# Tests should not be cluster-manager-specific
# If you need to find out cluster manager configuration to do this, then
# it should be added to the generic cluster manager API.
'''Test the bandwidth which heartbeat uses'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "Bandwidth"
self.start = StartTest(cm)
self.__setitem__("min",0)
self.__setitem__("max",0)
self.__setitem__("totalbandwidth",0)
self.tempfile = tempfile.mktemp(".cts")
self.startall = SimulStartLite(cm)
def __call__(self, node):
'''Perform the Bandwidth test'''
self.incr("calls")
if self.CM.upcount()<1:
return self.skipped()
Path = self.CM.InternalCommConfig()
if "ip" not in Path["mediatype"]:
return self.skipped()
port = Path["port"][0]
port = int(port)
ret = self.startall(None)
if not ret:
return self.failure("Test setup failed")
time.sleep(5) # We get extra messages right after startup.
fstmpfile = "/var/run/band_estimate"
dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
% (port, fstmpfile)
rc = self.CM.rsh(node, dumpcmd)
if rc == 0:
farfile = "root@%s:%s" % (node, fstmpfile)
self.CM.rsh.cp(farfile, self.tempfile)
Bandwidth = self.countbandwidth(self.tempfile)
if not Bandwidth:
self.CM.log("Could not compute bandwidth.")
return self.success()
intband = int(Bandwidth + 0.5)
self.CM.log("...bandwidth: %d bits/sec" % intband)
self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
if self.Stats["min"] == 0:
self.Stats["min"] = Bandwidth
if Bandwidth > self.Stats["max"]:
self.Stats["max"] = Bandwidth
if Bandwidth < self.Stats["min"]:
self.Stats["min"] = Bandwidth
self.CM.rsh(node, "rm -f %s" % fstmpfile)
os.unlink(self.tempfile)
return self.success()
else:
return self.failure("no response from tcpdump command [%d]!" % rc)
def countbandwidth(self, file):
fp = open(file, "r")
fp.seek(0)
count = 0
sum = 0
while 1:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count=count+1
linesplit = string.split(line," ")
for j in range(len(linesplit)-1):
if linesplit[j]=="udp": break
if linesplit[j]=="length:": break
try:
sum = sum + int(linesplit[j+1])
except ValueError:
self.CM.log("Invalid tcpdump line: %s" % line)
return None
T1 = linesplit[0]
timesplit = string.split(T1,":")
time2split = string.split(timesplit[2],".")
time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
break
while count < 100:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count = count+1
linessplit = string.split(line," ")
for j in range(len(linessplit)-1):
if linessplit[j] =="udp": break
if linesplit[j]=="length:": break
try:
sum=int(linessplit[j+1])+sum
except ValueError:
self.CM.log("Invalid tcpdump line: %s" % line)
return None
T2 = linessplit[0]
timesplit = string.split(T2,":")
time2split = string.split(timesplit[2],".")
time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
time = time2-time1
if (time <= 0):
return 0
return (sum*8)/time
def is_applicable(self):
'''BandwidthTest never applicable'''
return 0
AllTestClasses.append(BandwidthTest)
###################################################################
class ResourceRecover(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="ResourceRecover"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.max=30
self.rid=None
#self.is_unsafe = 1
self.benchmark = 1
# these are the values used for the new LRM API call
self.action = "asyncmon"
self.interval = 0
def __call__(self, node):
'''Perform the 'ResourceRecover' test. '''
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
resourcelist = self.CM.active_resources(node)
# if there are no resourcelist, return directly
if len(resourcelist)==0:
self.CM.log("No active resources on %s" % node)
return self.skipped()
self.rid = self.CM.Env.RandomGen.choice(resourcelist)
rsc = None
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self.CM, line)
if tmp.id == self.rid:
rsc = tmp
# Handle anonymous clones that get renamed
self.rid = rsc.clone_id
break
if not rsc:
return self.failure("Could not find %s in the resource list" % self.rid)
self.CM.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
pats = []
pats.append("Updating failcount for %s on .* after .* %s"
% (self.rid, self.action))
if rsc.managed():
pats.append("crmd:.* Performing .* op=%s_stop_0" % self.rid)
if rsc.unique():
pats.append("crmd:.* Performing .* op=%s_start_0" % self.rid)
pats.append("crmd:.* LRM operation %s_start_0.*confirmed.*ok" % self.rid)
else:
# Anonymous clones may get restarted with a different clone number
pats.append("crmd:.* Performing .* op=.*_start_0")
pats.append("crmd:.* LRM operation .*_start_0.*confirmed.*ok")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
self.CM.rsh(node, "crm_resource -F -r %s -H %s &>/dev/null" % (self.rid, node))
+ self.set_timer("recover")
watch.lookforall()
+ self.log_timer("recover")
self.CM.cluster_stable()
recovered=self.CM.ResourceLocation(self.rid)
if watch.unmatched:
return self.failure("Patterns not found: %s" % repr(watch.unmatched))
elif rsc.unique() and len(recovered) > 1:
return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
elif len(recovered) > 0:
self.CM.debug("%s is running on: %s" %(self.rid, repr(recovered)))
elif rsc.managed():
return self.failure("%s was not recovered and is inactive" % self.rid)
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ """Updating failcount for %s""" % self.rid,
"""Unknown operation: fail""",
"""ERROR: sending stonithRA op to stonithd failed.""",
"""ERROR: process_lrm_event: LRM operation %s_%s_%d""" % (self.rid, self.action, self.interval),
"""ERROR: process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
]
AllTestClasses.append(ResourceRecover)
###################################################################
class ComponentFail(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="ComponentFail"
self.startall = SimulStartLite(cm)
self.complist = cm.Components()
self.patterns = []
self.okerrpatterns = []
self.is_unsafe = 1
def __call__(self, node):
'''Perform the 'ComponentFail' test. '''
self.incr("calls")
self.patterns = []
self.okerrpatterns = []
# start all nodes
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.CM.cluster_stable(self.CM["StableTime"]):
return self.failure("Setup failed - unstable")
node_is_dc = self.CM.is_node_dc(node, None)
# select a component to kill
chosen = self.CM.Env.RandomGen.choice(self.complist)
while chosen.dc_only == 1 and node_is_dc == 0:
chosen = self.CM.Env.RandomGen.choice(self.complist)
self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
self.incr(chosen.name)
if chosen.name != "aisexec":
if self.CM["Name"] != "crm-lha" or chosen.name != "pengine":
self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
self.patterns.extend(chosen.pats)
if node_is_dc:
self.patterns.extend(chosen.dc_pats)
# In an ideal world, this next stuff should be in the "chosen" object as a member function
if self.CM["Name"] == "crm-lha" and chosen.triggersreboot:
# Make sure the node goes down and then comes back up if it should reboot...
for other in self.CM.Env["nodes"]:
if other != node:
self.patterns.append(self.CM["Pat:They_stopped"] %(other, node))
self.patterns.append(self.CM["Pat:Slave_started"] % node)
self.patterns.append(self.CM["Pat:Local_started"] % node)
if chosen.dc_only:
# Sometimes these will be in the log, and sometimes they won't...
self.okerrpatterns.append("%s crmd:.*Process %s:.* exited" %(node, chosen.name))
self.okerrpatterns.append("%s crmd:.*I_ERROR.*crmdManagedChildDied" %node)
self.okerrpatterns.append("%s crmd:.*The %s subsystem terminated unexpectedly" %(node, chosen.name))
self.okerrpatterns.append("ERROR: Client .* exited with return code")
else:
# Sometimes this won't be in the log...
self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
self.okerrpatterns.append(self.CM["Pat:ChildExit"])
# supply a copy so self.patterns doesnt end up empty
tmpPats = []
tmpPats.extend(self.patterns)
self.patterns.extend(chosen.badnews_ignore)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonithPats = []
stonithPats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node)
stonith = CTS.LogWatcher(self.CM["LogFileName"], stonithPats, 0)
stonith.setwatch()
# set the watch for stable
watch = CTS.LogWatcher(
self.CM["LogFileName"], tmpPats,
self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
watch.setwatch()
# kill the component
chosen.kill(node)
# check to see Heartbeat noticed
matched = watch.lookforall(allow_multiple_matches=1)
if matched:
self.CM.debug("Found: "+ repr(matched))
else:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
if self.CM.Env["at-boot"] == 0:
self.CM.debug("Checking if %s was shot" % node)
shot = stonith.look(60)
if shot:
self.CM.debug("Found: "+ repr(shot))
self.CM.ShouldBeStatus[node]="down"
self.CM.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.CM.debug("Waiting for any STONITHd node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.CM["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self.okerrpatterns.extend(self.patterns)
return self.okerrpatterns
AllTestClasses.append(ComponentFail)
####################################################################
class SplitBrainTest(CTSTest):
####################################################################
'''It is used to test split-brain. when the path between the two nodes break
check the two nodes both take over the resource'''
def __init__(self,cm):
CTSTest.__init__(self,cm)
self.name = "SplitBrain"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.is_experimental = 1
def isolate_partition(self, partition):
other_nodes = []
other_nodes.extend(self.CM.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition))
if len(other_nodes) == 0:
return 1
self.CM.debug("Creating partition: " + repr(partition))
self.CM.debug("Everyone else: " + repr(other_nodes))
for node in partition:
if not self.CM.isolate_node(node, other_nodes):
self.CM.log("Could not isolate %s" % node)
return 0
return 1
def heal_partition(self, partition):
other_nodes = []
other_nodes.extend(self.CM.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]))
if len(other_nodes) == 0:
return 1
self.CM.debug("Healing partition: " + repr(partition))
self.CM.debug("Everyone else: " + repr(other_nodes))
for node in partition:
self.CM.unisolate_node(node, other_nodes)
def __call__(self, node):
'''Perform split-brain test'''
self.incr("calls")
self.passed = 1
partitions = {}
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
while 1:
# Retry until we get multiple partitions
partitions = {}
p_max = len(self.CM.Env["nodes"])
for node in self.CM.Env["nodes"]:
p = self.CM.Env.RandomGen.randint(1, p_max)
if not partitions.has_key(p):
partitions[p]= []
partitions[p].append(node)
p_max = len(partitions.keys())
if p_max > 1:
break
# else, try again
self.CM.debug("Created %d partitions" % p_max)
for key in partitions.keys():
self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
# Disabling STONITH to reduce test complexity for now
self.CM.rsh(node, "crm_attribute -n stonith-enabled -v false")
for key in partitions.keys():
self.isolate_partition(partitions[key])
count = 30
while count > 0:
if len(self.CM.find_partitions()) != p_max:
time.sleep(10)
else:
break
else:
self.failure("Expected partitions were not created")
# Target number of partitions formed - wait for stability
if not self.CM.cluster_stable():
self.failure("Partitioned cluster not stable")
# Now audit the cluster state
self.CM.partitions_expected = p_max
if not self.audit():
self.failure("Audits failed")
self.CM.partitions_expected = 1
# And heal them again
for key in partitions.keys():
self.heal_partition(partitions[key])
# Wait for a single partition to form
count = 30
while count > 0:
if len(self.CM.find_partitions()) != 1:
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not reform")
# Wait for it to have the right number of members
count = 30
while count > 0:
members = []
partitions = self.CM.find_partitions()
if len(partitions) > 0:
members = partitions[0].split()
if len(members) != len(self.CM.Env["nodes"]):
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not completely reform")
# Wait up to 20 minutes - the delay is more preferable than
# trying to continue with in a messed up state
if not self.CM.cluster_stable(1200):
self.failure("Reformed cluster not stable")
answer = raw_input('Continue? [nY]')
if answer and answer == "n":
raise ValueError("Reformed cluster not stable")
# Turn fencing back on
if self.CM.Env["DoStonith"]:
self.CM.rsh(node, "crm_attribute -D -n stonith-enabled")
self.CM.cluster_stable()
if self.passed:
return self.success()
return self.failure("See previous errors")
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return [
"Another DC detected:",
"ERROR: attrd_cib_callback: .*Application of an update diff failed",
"crmd_ha_msg_callback:.*not in our membership list",
"CRIT:.*node.*returning after partition",
]
def is_applicable(self):
if not self.is_applicable_common():
return 0
return len(self.CM.Env["nodes"]) > 2
AllTestClasses.append(SplitBrainTest)
####################################################################
class Reattach(CTSTest):
####################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Reattach"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
self.is_unsafe = 0 # Handled by canrunnow()
def setup(self, node):
return self.startall(None)
def canrunnow(self, node):
'''Return TRUE if we can meaningfully run right now'''
if self.find_ocfs2_resources(node):
self.CM.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
return 0
return 1
def __call__(self, node):
self.incr("calls")
pats = []
managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60)
managed.setwatch()
self.CM.debug("Disable resource management")
self.CM.rsh(node, "crm_attribute -n is-managed-default -v false")
if not managed.lookforall():
self.CM.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not disabled")
pats = []
pats.append("crmd:.*Performing.*_stop_0")
pats.append("crmd:.*Performing.*_start_0")
pats.append("crmd:.*Performing.*_promote_0")
pats.append("crmd:.*Performing.*_demote_0")
pats.append("crmd:.*Performing.*_migrate_.*_0")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
self.CM.debug("Shutting down the cluster")
ret = self.stopall(None)
if not ret:
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
return self.failure("Couldn't shut down the cluster")
self.CM.debug("Bringing the cluster back up")
ret = self.startall(None)
if not ret:
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
return self.failure("Couldn't restart the cluster")
if self.local_badnews("ResourceActivity:", watch):
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
return self.failure("Resources stopped or started during cluster restart")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60)
managed.setwatch()
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
if not managed.lookforall():
self.CM.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not enabled")
self.CM.cluster_stable()
# Ignore actions for STONITH resources
ignore = []
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rclass == "stonith":
self.CM.debug("Ignoring: crmd:.*Performing.*op=%s_.*_0" % r.id)
ignore.append("crmd:.*Performing.*op=%s_.*_0" % r.id)
if self.local_badnews("ResourceActivity:", watch, ignore):
return self.failure("Resources stopped or started after resource management was re-enabled")
return ret
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
"You may ignore this error if it is unmanaged.",
"pingd: .*ERROR: send_ipc_message:",
"pingd: .*ERROR: send_update:",
"lrmd: .*ERROR: notify_client:",
]
def is_applicable(self):
if self.CM["Name"] == "crm-lha":
return None
return 1
AllTestClasses.append(Reattach)
####################################################################
class SpecialTest1(CTSTest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SpecialTest1"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, node):
'''Perform the 'SpecialTest1' test for Andrew. '''
self.incr("calls")
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return ret
# Start the selected node
ret = self.restart1(node)
if not ret:
return ret
# Start all remaining nodes
ret = self.startall(None)
return ret
AllTestClasses.append(SpecialTest1)
####################################################################
class HAETest(CTSTest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="HAETest"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
self.is_loop = 1
def setup(self, node):
# Start all remaining nodes
ret = self.startall(None)
if not ret:
return self.failure("Couldn't start all nodes")
return self.success()
def teardown(self, node):
# Stop everything
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
return self.success()
def wait_on_state(self, node, resource, expected_clones, attempts=240):
while attempts > 0:
active=0
(rc, lines) = self.CM.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None)
# Hack until crm_resource does the right thing
if rc == 0 and lines:
active = len(lines)
if len(lines) == expected_clones:
return 1
elif rc == 1:
self.CM.debug("Resource %s is still inactive" % resource)
elif rc == 234:
self.CM.log("Unknown resource %s" % resource)
return 0
elif rc == 246:
self.CM.log("Cluster is inactive")
return 0
elif rc != 0:
self.CM.log("Call to crm_resource failed, rc=%d" % rc)
return 0
else:
self.CM.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
attempts -= 1
time.sleep(1)
return 0
def find_dlm(self, node):
self.r_dlm = None
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rtype == "controld" and r.parent != "NA":
self.CM.debug("Found dlm: %s" % self.r_dlm)
self.r_dlm = r.parent
return 1
return 0
def find_hae_resources(self, node):
self.r_dlm = None
self.r_o2cb = None
self.r_ocfs2 = []
if self.find_dlm(node):
self.find_ocfs2_resources(node)
def is_applicable(self):
if not self.is_applicable_common():
return 0
if self.CM.Env["Schema"] == "hae":
return 1
return None
####################################################################
class HAERoleTest(HAETest):
####################################################################
def __init__(self, cm):
'''Lars' mount/unmount test for the HA extension. '''
HAETest.__init__(self,cm)
self.name="HAERoleTest"
def change_state(self, node, resource, target):
rc = self.CM.rsh(node, "crm_resource -r %s -p target-role -v %s --meta" % (resource, target))
return rc
def __call__(self, node):
self.incr("calls")
lpc = 0
failed = 0
delay = 2
done=time.time() + self.CM.Env["loop-minutes"]*60
self.find_hae_resources(node)
clone_max = len(self.CM.Env["nodes"])
while time.time() <= done and not failed:
lpc = lpc + 1
self.change_state(node, self.r_dlm, "Stopped")
if not self.wait_on_state(node, self.r_dlm, 0):
self.failure("%s did not go down correctly" % self.r_dlm)
failed = lpc
self.change_state(node, self.r_dlm, "Started")
if not self.wait_on_state(node, self.r_dlm, clone_max):
self.failure("%s did not come up correctly" % self.r_dlm)
failed = lpc
if not self.wait_on_state(node, self.r_o2cb, clone_max):
self.failure("%s did not come up correctly" % self.r_o2cb)
failed = lpc
for fs in self.r_ocfs2:
if not self.wait_on_state(node, fs, clone_max):
self.failure("%s did not come up correctly" % fs)
failed = lpc
if failed:
return self.failure("iteration %d failed" % failed)
return self.success()
AllTestClasses.append(HAERoleTest)
####################################################################
class HAEStandbyTest(HAETest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
HAETest.__init__(self,cm)
self.name="HAEStandbyTest"
def change_state(self, node, resource, target):
rc = self.CM.rsh(node, "crm_standby -l reboot -v %s" % (target))
return rc
def __call__(self, node):
self.incr("calls")
lpc = 0
failed = 0
done=time.time() + self.CM.Env["loop-minutes"]*60
self.find_hae_resources(node)
clone_max = len(self.CM.Env["nodes"])
while time.time() <= done and not failed:
lpc = lpc + 1
self.change_state(node, self.r_dlm, "true")
if not self.wait_on_state(node, self.r_dlm, clone_max-1):
self.failure("%s did not go down correctly" % self.r_dlm)
failed = lpc
self.change_state(node, self.r_dlm, "false")
if not self.wait_on_state(node, self.r_dlm, clone_max):
self.failure("%s did not come up correctly" % self.r_dlm)
failed = lpc
if not self.wait_on_state(node, self.r_o2cb, clone_max):
self.failure("%s did not come up correctly" % self.r_o2cb)
failed = lpc
for fs in self.r_ocfs2:
if not self.wait_on_state(node, fs, clone_max):
self.failure("%s did not come up correctly" % fs)
failed = lpc
if failed:
return self.failure("iteration %d failed" % failed)
return self.success()
AllTestClasses.append(HAEStandbyTest)
###################################################################
class NearQuorumPointTest(CTSTest):
###################################################################
'''
This test brings larger clusters near the quorum point (50%).
In addition, it will test doing starts and stops at the same time.
Here is how I think it should work:
- loop over the nodes and decide randomly which will be up and which
will be down Use a 50% probability for each of up/down.
- figure out what to do to get into that state from the current state
- in parallel, bring up those going up and bring those going down.
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="NearQuorumPoint"
def __call__(self, dummy):
'''Perform the 'NearQuorumPoint' test. '''
self.incr("calls")
startset = []
stopset = []
#decide what to do with each node
for node in self.CM.Env["nodes"]:
action = self.CM.Env.RandomGen.choice(["start","stop"])
#action = self.CM.Env.RandomGen.choice(["start","stop","no change"])
if action == "start" :
startset.append(node)
elif action == "stop" :
stopset.append(node)
self.CM.debug("start nodes:" + repr(startset))
self.CM.debug("stop nodes:" + repr(stopset))
#add search patterns
watchpats = [ ]
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
watchpats.append(self.CM["Pat:We_stopped"] % node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
#watchpats.append(self.CM["Pat:Slave_started"] % node)
watchpats.append(self.CM["Pat:Local_started"] % node)
else:
for stopping in stopset:
if self.CM.ShouldBeStatus[stopping] == "up":
watchpats.append(self.CM["Pat:They_stopped"] % (node, stopping))
if len(watchpats) == 0:
return self.skipped()
if len(startset) != 0:
watchpats.append(self.CM["Pat:DC_IDLE"])
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
#begin actions
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
#get the result
if watch.lookforall():
self.CM.cluster_stable()
return self.success()
self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched))
#get the "bad" nodes
upnodes = []
for node in stopset:
if self.CM.StataCM(node) == 1:
upnodes.append(node)
downnodes = []
for node in startset:
if self.CM.StataCM(node) == 0:
downnodes.append(node)
if upnodes == [] and downnodes == []:
self.CM.cluster_stable()
# Make sure they're completely down with no residule
for node in stopset:
self.CM.rsh(node, self.CM["StopCmd"])
return self.success()
if len(upnodes) > 0:
self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes))
if len(downnodes) > 0:
self.CM.log("Warn: Unstartable nodes: " + repr(downnodes))
return self.failure()
AllTestClasses.append(NearQuorumPointTest)
###################################################################
class RollingUpgradeTest(CTSTest):
###################################################################
'''Perform a rolling upgrade of the cluster'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="RollingUpgrade"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def setup(self, node):
# Start all remaining nodes
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
for node in self.CM.Env["nodes"]:
if not self.downgrade(node, None):
return self.failure("Couldn't downgrade %s" % node)
ret = self.startall(None)
if not ret:
return self.failure("Couldn't start all nodes")
return self.success()
def teardown(self, node):
# Stop everything
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
for node in self.CM.Env["nodes"]:
if not self.upgrade(node, None):
return self.failure("Couldn't upgrade %s" % node)
return self.success()
def install(self, node, version, start=1, flags="--force"):
target_dir = "/tmp/rpm-%s" % version
src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
self.CM.log("Installing %s on %s with %s" % (version, node, flags))
if not self.stop(node):
return self.failure("stop failure: "+node)
rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
rc = self.CM.rsh(node, "rm -f %s/*.rpm" % target_dir)
(rc, lines) = self.CM.rsh(node, "ls -1 %s/*.rpm" % src_dir, None)
for line in lines:
line = line[:-1]
rc = self.CM.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir))
rc = self.CM.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
if start and not self.start(node):
return self.failure("start failure: "+node)
return self.success()
def upgrade(self, node, start=1):
return self.install(node, self.CM.Env["current-version"], start)
def downgrade(self, node, start=1):
return self.install(node, self.CM.Env["previous-version"], start, "--force --nodeps")
def __call__(self, node):
'''Perform the 'Rolling Upgrade' test. '''
self.incr("calls")
for node in self.CM.Env["nodes"]:
if self.upgrade(node):
return self.failure("Couldn't upgrade %s" % node)
self.CM.cluster_stable()
return self.success()
def is_applicable(self):
if not self.is_applicable_common():
return None
if not self.CM.Env.has_key("rpm-dir"):
return None
if not self.CM.Env.has_key("current-version"):
return None
if not self.CM.Env.has_key("previous-version"):
return None
return 1
# Register RestartTest as a good test to run
AllTestClasses.append(RollingUpgradeTest)
###################################################################
class BSC_AddResource(CTSTest):
###################################################################
'''Add a resource to the cluster'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="AddResource"
self.resource_offset = 0
self.cib_cmd="""cibadmin -C -o %s -X '%s' """
def __call__(self, node):
self.incr("calls")
self.resource_offset = self.resource_offset + 1
r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
start_pat = "crmd.*%s_start_0.*confirmed.*ok"
patterns = []
patterns.append(start_pat % r_id)
watch = CTS.LogWatcher(
self.CM["LogFileName"], patterns, self.CM["DeadTime"])
watch.setwatch()
fields = string.split(self.CM.Env["IPBase"], '.')
fields[3] = str(int(fields[3])+1)
ip = string.join(fields, '.')
self.CM.Env["IPBase"] = ip
if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
return self.failure("Make resource %s failed" % r_id)
failed = 0
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.CM.log ("Warn: Pattern not found: %s" % (regex))
failed = 1
if failed:
return self.failure("Resource pattern(s) not found")
if not self.CM.cluster_stable(self.CM["DeadTime"]):
return self.failure("Unstable cluster")
return self.success()
def make_ip_resource(self, node, id, rclass, type, ip):
self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node))
rsc_xml="""
<primitive id="%s" class="%s" type="%s" provider="heartbeat">
<instance_attributes id="%s"><attributes>
<nvpair id="%s" name="ip" value="%s"/>
</attributes></instance_attributes>
</primitive>""" % (id, rclass, type, id, id, ip)
node_constraint="""
<rsc_location id="run_%s" rsc="%s">
<rule id="pref_run_%s" score="100">
<expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
</rule>
</rsc_location>""" % (id, id, id, id, node)
rc = 0
(rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
if rc != 0:
self.CM.log("Constraint creation failed: %d" % rc)
return None
(rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
if rc != 0:
self.CM.log("Resource creation failed: %d" % rc)
return None
return 1
def is_applicable(self):
if self.CM.Env["DoBSC"]:
return 1
return None
class SimulStopLite(CTSTest):
###################################################################
'''Stop any active nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStopLite"
def __call__(self, dummy):
'''Perform the 'SimulStopLite' setup work. '''
self.incr("calls")
self.CM.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.incr("WasStarted")
watchpats.append(self.CM["Pat:We_stopped"] % node)
#if self.CM.Env["use_logd"]:
# watchpats.append(self.CM["Pat:Logd_stopped"] % node)
if len(watchpats) == 0:
self.CM.clear_all_caches()
return self.success()
# Stop all the nodes - at about the same time...
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
- self.set_starttime()
+ self.set_timer()
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
if watch.lookforall():
self.CM.clear_all_caches()
# Make sure they're completely down with no residule
for node in self.CM.Env["nodes"]:
self.CM.rsh(node, self.CM["StopCmd"])
return self.success()
did_fail=0
up_nodes = []
for node in self.CM.Env["nodes"]:
if self.CM.StataCM(node) == 1:
did_fail=1
up_nodes.append(node)
if did_fail:
return self.failure("Active nodes exist: " + repr(up_nodes))
self.CM.log("Warn: All nodes stopped but CTS didnt detect: "
+ repr(watch.unmatched))
self.CM.clear_all_caches()
return self.failure("Missing log message: "+repr(watch.unmatched))
def is_applicable(self):
'''SimulStopLite is a setup test and never applicable'''
return 0
###################################################################
class SimulStartLite(CTSTest):
###################################################################
'''Start any stopped nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStartLite"
def __call__(self, dummy):
'''Perform the 'SimulStartList' setup work. '''
self.incr("calls")
self.CM.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
uppat = self.CM["Pat:Slave_started"]
if self.CM.upcount() == 0:
uppat = self.CM["Pat:Local_started"]
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.incr("WasStopped")
watchpats.append(uppat % node)
if len(watchpats) == 0:
return self.success()
watchpats.append(self.CM["Pat:DC_IDLE"])
# Start all the nodes - at about the same time...
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
- self.set_starttime()
+ self.set_timer()
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
if watch.lookforall():
for attempt in (1, 2, 3, 4, 5):
if self.CM.cluster_stable():
return self.success()
return self.failure("Cluster did not stabilize")
did_fail=0
unstable = []
for node in self.CM.Env["nodes"]:
if self.CM.StataCM(node) == 0:
did_fail=1
unstable.append(node)
if did_fail:
return self.failure("Unstarted nodes exist: " + repr(unstable))
unstable = []
for node in self.CM.Env["nodes"]:
if not self.CM.node_stable(node):
did_fail=1
unstable.append(node)
if did_fail:
return self.failure("Unstable cluster nodes exist: "
+ repr(unstable))
self.CM.log("ERROR: All nodes started but CTS didnt detect: "
+ repr(watch.unmatched))
return self.failure()
def is_applicable(self):
'''SimulStartLite is a setup test and never applicable'''
return 0
def TestList(cm, audits):
result = []
for testclass in AllTestClasses:
bound_test = testclass(cm)
if bound_test.is_applicable():
bound_test.Audits = audits
result.append(bound_test)
return result
def BenchTestList(cm, audits):
all = TestList(cm, audits)
result = []
for test in all:
if test.benchmark:
result.append(test)
return result
# vim:ts=4:sw=4:et:
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 283b992a96..c88229ff28 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,106 +1,109 @@
#
# doc: Pacemaker code
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
+helpdir = $(datadir)/$(PACKAGE)
+
ascii = crm_cli.txt crm_fencing.txt
+help_DATA = crm_cli.txt
docbook = Pacemaker_Explained
man_MANS = cibadmin.8 crm_resource.8
doc_DATA = README.hb2openais $(ascii) $(generated_docs)
publican_docs =
generated_docs =
XML_FILES := $(wildcard *.xml)
PNG_FILES := $(wildcard images/*.png)
if BUILD_ASCIIDOC
generated_docs += $(ascii:%.txt=%.html)
endif
if BUILD_DOCBOOK
publican_docs += $(docbook)
endif
EXTRA_DIST = $(man_MANS) $(docbook:%=%.xml)
index.html:
echo "Building documentation index"
echo "<html><body><p>The following <a href=\"http://www.clusterlabs.org/wiki/Pacemaker\">Pacemaker</a> documentation was generated on `date` from version: $(BUILD_VERSION)</p>" > index.html
echo "<ol>" >> index.html
for doc in $(generated_docs); do \
echo "<li><a href=\"$$doc\">$$doc</a></li>" >> index.html; \
done
if BUILD_DOCBOOK
for book in $(docbook); do \
for lang in `ls -1 $(docbook)/publish`; do \
echo "<li>$$book ($$lang)<ul>" >> index.html; \
find $$book/publish/$$lang -name "*.pdf" -exec echo -n "<li><a href=\"{}\">" \; -exec basename {} \; -exec echo "</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
find $$book/publish/$$lang -name "*.txt" -exec echo -n "<li><a href=\"{}\">" \; -exec basename {} \; -exec echo "</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
find $$book/publish/$$lang -name html -exec echo "<li><a href=\"{}/$$book/index.html\">$$book HTML</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
find $$book/publish/$$lang -name html-single -exec echo "<li><a href=\"{}/$$book/index.html\">$$book HTML (single page)</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
echo "</ul></li>" >> index.html; \
done; \
done
endif
echo "</ol>" >> index.html
echo "<p>You can find <a href=\"http://www.clusterlabs.org/wiki/Documentation\">additional documentation</a> and details about the Pacemaker project at <a href=\"http://www.clusterlabs.org\">http://www.clusterlabs.org</a></p>" >> index.html
echo "</body></html>" >> index.html
%.html: %.txt
$(ASCIIDOC) --unsafe --backend=xhtml11 $<
%.txt: %/en-US/*.xml
cd $* && $(PUBLICAN) build --publish --langs=all --formats=pdf,html,html-single,txt
cp $*/publish/en-US/Pacemaker/1.0/txt/$*/$@ $@
if BUILD_DOCBOOK
docbook_txt = $(docbook:%=%.txt)
all-local: $(docbook_txt)
#install-data-local: all-local
install-data-local: all-local
for book in $(docbook); do \
filelist=`find $$book/publish -print`; \
for f in $$filelist; do \
p=`echo $$f | sed s:publish/:: | sed s:Pacemaker/::`; \
if [ -d $$f ]; then \
echo $(INSTALL) -d 775 $(DESTDIR)/$(docdir)/$$p; \
else \
echo $(INSTALL) -m 644 $$f $(DESTDIR)/$(docdir)/$$p; \
fi \
done; \
done
endif
push: all-local index.html
echo Uploading current documentation set to clusterlabs.org
rsync -rtz --progress index.html root@oss.clusterlabs.org:/srv/www/extras/doc/
if BUILD_DOCBOOK
for book in $(docbook); do \
echo Uploading $$book...; \
rsync -rtz --progress --delete $$book/publish/* root@oss.clusterlabs.org:/srv/www/extras/doc/; \
done
endif
clean-local:
-rm -rf $(generated_docs) $(docbook)/tmp $(docbook)/publish
diff --git a/doc/Pacemaker_Explained/en-US/Ap-FAQ.xml b/doc/Pacemaker_Explained/en-US/Ap-FAQ.xml
index e7c095c75e..1f12328c18 100644
--- a/doc/Pacemaker_Explained/en-US/Ap-FAQ.xml
+++ b/doc/Pacemaker_Explained/en-US/Ap-FAQ.xml
@@ -1,96 +1,96 @@
<appendix id="ap-faq">
<title>FAQ</title>
<qandaset defaultlabel="qanda">
<qandadiv>
<title>History</title>
<qandaentry>
<question>
<para>Why is the Project Called Pacemaker?</para>
</question>
<answer>
<para>First of all, the reason its not called the CRM is because of the abundance of <ulink url="http://en.wikipedia.org/wiki/CRM">terms</ulink> that are commonly abbreviated to those three letters.</para>
<para>
The Pacemaker name came from <ulink url="http://khamsouk.souvanlasy.com/">Kham</ulink>, a good friend of mine, and was originally used by a Java GUI that I was prototyping in early 2007.
Alas other commitments have prevented the GUI from progressing much and, when it came time to choose a name for this project, Lars suggested it was an even better fit for an independent CRM.
</para>
<para>
The idea stems from the analogy between the role of this software and that of the little device that keeps the human heart pumping.
Pacemaker monitors the cluster and intervenes when necessary to ensure the smooth operation of the services it provides.
</para>
<para>There were a number of other names (and acronyms) tossed around, but suffice to say "Pacemaker" was the best</para>
</answer>
</qandaentry>
<qandaentry>
<question>
<para>Why was the Pacemaker Project Created?</para>
</question>
<answer>
<para>The decision was made to spin-off the CRM into its own project after the 2.1.3 Heartbeat release in order to</para>
<itemizedlist>
<listitem><para>support both the OpenAIS and Heartbeat cluster stacks equally</para></listitem>
<listitem><para>decouple the release cycles of two projects at very different stages of their life-cycles</para></listitem>
<listitem><para>foster the clearer package boundaries, thus leading to</para></listitem>
<listitem><para>better and more stable interfaces</para></listitem>
</itemizedlist>
</answer>
</qandaentry>
</qandadiv>
<qandadiv>
<title>Setup</title>
<qandaentry>
<question>
<para>What Messaging Layers are Supported?</para>
</question>
<answer>
<itemizedlist>
<listitem><para>OpenAIS (<ulink url="http://www.openais.org/"/>)</para></listitem>
<listitem><para>Heartbeat (<ulink url="http://linux-ha.org/"/>)</para></listitem>
</itemizedlist>
</answer>
</qandaentry>
<qandaentry>
<question>
<para>Can I Choose which Messaging Layer to use at Run Time?</para>
</question>
<answer>
<para>Yes. The CRM will automatically detect who started it and behave accordingly.</para>
</answer>
</qandaentry>
<qandaentry>
<question>
<para>Can I Have a Mixed Heartbeat-OpenAIS Cluster?</para>
</question>
<answer>
<para>No.</para>
</answer>
</qandaentry>
<qandaentry>
<question>
<para>Which Messaging Layer Should I Choose?</para>
</question>
<answer>
<para>This is discussed in <xref linkend="ap-install"/>.</para>
</answer>
</qandaentry>
<qandaentry>
<question>
<para>Where Can I Get Pre-built Packages?</para>
</question>
<answer>
- <para>Official packages for most major .rpm and .deb based distributions are available from:</para>
- <para><ulink url="http://download.opensuse.org/repositories/server:/ha-clustering/"/></para>
- <para>For more information, see our <ulink url="http://clusterlabs.org/wiki/Install">installation page</ulink>.</para>
+ <para>Official packages for most major .rpm and based distributions are available from:</para>
+ <para><ulink url="http://www.clusterlabs.org/rpm"/></para>
+ <para>For Debian packages, building from source and details on using the above repositories, see our <ulink url="http://clusterlabs.org/wiki/Install">installation page</ulink>.</para>
</answer>
</qandaentry>
<qandaentry>
<question>
<para>What Versions of Pacemaker Are Supported?</para>
</question>
<answer>
<para>Please refer to the <ulink url="http://clusterlabs.org/wiki/Releases">Releases</ulink> page for an up-to-date list of versions supported directly by the project.</para>
<para>When seeking assistance, please try to ensure you have one of these versions.</para>
</answer>
</qandaentry>
</qandadiv>
</qandaset>
</appendix>
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml b/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml
index c5ef7570e5..f0e511d061 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml
+++ b/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml
@@ -1,84 +1,84 @@
<chapter id="ch-stonith">
<title>Protecting Your Data - STONITH</title>
<section id="s-stonith-why">
<title>Why You Need STONITH</title>
<para><ulink url="http://en.wikipedia.org/wiki/STONITH">STONITH</ulink> is an acronym for Shoot-The-Other-Node-In-The-Head and it protects your data from being corrupted by rouge nodes or concurrent access.</para>
<para>
Just because a node is unresponsive, this doesn't mean it isn't accessing your data.
The only way to be 100% sure that your data is safe, is to use STONITH so we can be certain that the node is truly offline, before allowing the data to be accessed from another node.
</para>
<para>
STONITH also has a role to play in the event that a clustered service cannot be stopped.
In this case, the cluster uses STONITH to force the whole node offline, thereby making it safe to start the service elsewhere.
</para>
</section>
<section id="s-stonith-choose">
<title>What STONITH Device Should You Use</title>
<para>It is crucial that the STONITH device can allow the cluster to differentiate between a node failure and a network one.</para>
<para>
The biggest mistake people make in choosing a STONITH device is to use remote power switch (such as many on-board IMPI controllers) that shares power with the node it controls.
In such cases, the cluster cannot be sure if the node is really offline, or active and suffering from a network fault.
</para>
<para>Likewise, any device that relies on the machine being active (such as SSH-based "devices" used during testing) are inappropriate.</para>
</section>
<section id="s-stonith-configure">
<title>Configuring STONITH</title>
<orderedlist>
<listitem>
<para>Find the correct driver: <command>stonith -L</command></para>
</listitem>
<listitem>
<para>
Since every device is different, the parameters needed to configure it will vary.
To find out the parameters required by the device: <command>stonith -t <replaceable>type</replaceable> -n</command>
</para>
<para>Hopefully the developers chose names that make sense, if not you can query for some additional information by finding an active cluster node and running:</para>
<para><command>lrmadmin -M stonith <replaceable>type</replaceable> pacemaker</command></para>
<para>The output should be XML formatted text containing additional parameter descriptions</para>
</listitem>
<listitem><para>Create a file called stonith.xml containing a primitive resource with a class of stonith, a type of <replaceable>type</replaceable> and a parameter for each of the values returned in step 2</para></listitem>
<listitem><para>Create a clone from the primitive resource if the device can shoot more than one node<emphasis> and supports multiple simultaneous connections</emphasis>.</para></listitem>
<listitem><para>Upload it into the CIB using cibadmin: <command>cibadmin -C -o resources --xml-file <filename>stonith.xml</filename></command></para></listitem>
</orderedlist>
<section id="s-stonith-example">
<title>Example</title>
<para>Assuming we have an IBM BladeCenter consisting of four nodes and the management interface is active on 10.0.0.1, then we would chose the <literal>external/ibmrsa</literal> driver in step 2 and obtain the following list of parameters</para>
<figure>
<title>Obtaining a list of STONITH Parameters</title>
<screen>
<userinput>
stonith -t external/ibmrsa -n
</userinput>
<computeroutput>
hostname ipaddr userid passwd type
</computeroutput>
</screen>
</figure>
<para>from which we would create a STONITH resource fragment that might look like this</para>
<example>
<title>Sample STONITH Resource</title>
<programlisting>
<![CDATA[
<clone id="Fencing">
<meta_attributes id="fencing">
<nvpair id="Fencing-unique" name="globally-unique" value="false"/>
</meta_attributes>
<primitive id="rsa" class="stonith" type="external/ibmrsa">
<operations>
<op id="rsa-mon-1" name="monitor" interval="120s"/>
</operations>
<instance_attributes id="rsa-parameters">
- <nvpair id="rsa-attr-1" name="hostname" value="node1 node2 node3"/>
+ <nvpair id="rsa-attr-1" name="hostname" value="node1 node2 node3 node4"/>
<nvpair id="rsa-attr-1" name="ipaddr" value="10.0.0.1"/>
<nvpair id="rsa-attr-1" name="userid" value="testuser"/>
<nvpair id="rsa-attr-1" name="passwd" value="abc123"/>
<nvpair id="rsa-attr-1" name="type" value="ibm"/>
</instance_attributes>
</primitive>
</clone>
]]>
</programlisting>
</example>
</section>
</section>
</chapter>
diff --git a/doc/publican-clusterlabs/COPYING b/doc/publican-clusterlabs/COPYING
new file mode 100644
index 0000000000..44306613e2
--- /dev/null
+++ b/doc/publican-clusterlabs/COPYING
@@ -0,0 +1 @@
+SETUP This file should contain your COPYRIGHT License
\ No newline at end of file
diff --git a/doc/publican-clusterlabs/defaults.cfg b/doc/publican-clusterlabs/defaults.cfg
new file mode 100644
index 0000000000..3f6cdc5c1d
--- /dev/null
+++ b/doc/publican-clusterlabs/defaults.cfg
@@ -0,0 +1,6 @@
+# Config::Simple 4.59
+# Thu Nov 12 09:56:27 2009
+
+doc_url: http://www.clusterlabs.org/wiki/Documentation
+prod_url: http://www.clusterlabs.org
+
diff --git a/doc/publican-clusterlabs/en-US/Feedback.xml b/doc/publican-clusterlabs/en-US/Feedback.xml
new file mode 100644
index 0000000000..7f0fe22ae3
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/Feedback.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+]>
+<section>
+ <title>We Need Feedback!</title>
+ <indexterm>
+ <primary>feedback</primary>
+ <secondary>contact information for this manual</secondary>
+ </indexterm>
+ <para>
+ If you find a typographical error in this manual, or if you have thought of a way to make this manual better, we would love to hear from you!
+ Please submit a report in Bugzilla: <ulink url="http://developerbugs.linux-foundation.org/enter_bug.cgi?product=Pacemaker">http://developerbugs.linux-foundation.org/</ulink> against the product <application>&PRODUCT;.</application>
+ </para>
+ <para>
+ When submitting a bug report, be sure to mention the manual's identifier: <citetitle>&BOOKID;</citetitle>
+ </para>
+ <para>
+ If you have a suggestion for improving the documentation, try to be as specific as possible when describing it.
+ If you have found an error, please include the section number and some of the surrounding text so we can find it easily.
+ </para>
+</section>
diff --git a/doc/publican-clusterlabs/en-US/Legal_Notice.xml b/doc/publican-clusterlabs/en-US/Legal_Notice.xml
new file mode 100644
index 0000000000..822fa40f85
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/Legal_Notice.xml
@@ -0,0 +1,35 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE legalnotice PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+]>
+<legalnotice>
+ <para>
+ Copyright <trademark class="copyright"></trademark> &YEAR; &HOLDER;.
+ </para>
+ <para>
+ The text of and illustrations in this document are licensed under a Creative Commons Attribution–Share Alike 3.0 Unported license ("CC-BY-SA"). An explanation of CC-BY-SA is available at <ulink url="http://creativecommons.org/licenses/by-sa/3.0/" />.
+ </para>
+ <para>
+ In accordance with CC-BY-SA, if you distribute this document or an adaptation of it, you must provide the URL for the original version.
+ </para>
+ <para>
+ In addition to the requirements of this license, the following activities are looked upon favorably:
+ <orderedlist>
+ <listitem>
+ <para>
+ If you are distributing Open Publication works on hardcopy or CD-ROM, you provide email notification to the authors of your intent to redistribute at least thirty days before your manuscript or media freeze, to give the authors time to provide updated documents.
+ This notification should describe modifications, if any, made to the document.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ All substantive modifications (including deletions) be either clearly marked up in the document or else described in an attachment to the document.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ Finally, while it is not mandatory under this license, it is considered good form to offer a free copy of any hardcopy or CD-ROM expression of the author(s) work.
+ </para>
+ </listitem>
+ </orderedlist>
+ </para>
+</legalnotice>
diff --git a/doc/publican-clusterlabs/en-US/css/overrides.css b/doc/publican-clusterlabs/en-US/css/overrides.css
new file mode 100644
index 0000000000..7000e52428
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/css/overrides.css
@@ -0,0 +1,57 @@
+a:link {
+ color:#843A39;
+}
+
+a:visited {
+ color:grey;
+}
+
+h1 {
+ color:#a70000;
+}
+
+.producttitle {
+ background: #800 url(../images/h1-bg.png) top left repeat;
+}
+
+.section h1.title {
+ color:#843A39;
+}
+
+
+h2,h3,h4,h5,h6 {
+ color:#843A39;
+}
+
+table {
+ border:1px solid #aaa;
+}
+
+table th {
+ background-color:#900;
+}
+
+table tr.even td {
+ background-color:#f5f5f5;
+}
+
+#title a {
+ height:54px;
+}
+
+.term{
+ color:#a70000;
+}
+
+.revhistory table th {
+ color:#a70000;
+}
+
+.edition {
+ color: #a70000;
+}
+
+span.remark{
+ background-color: #ffff00;
+}
+
diff --git a/doc/publican-clusterlabs/en-US/images/1.png b/doc/publican-clusterlabs/en-US/images/1.png
new file mode 100644
index 0000000000..520ff22683
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/1.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/1.svg b/doc/publican-clusterlabs/en-US/images/1.svg
new file mode 100644
index 0000000000..a874e974c1
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/1.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg1">
+ <defs
+ id="defs1" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text1"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan1">1</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/10.png b/doc/publican-clusterlabs/en-US/images/10.png
new file mode 100644
index 0000000000..6fc0647356
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/10.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/10.svg b/doc/publican-clusterlabs/en-US/images/10.svg
new file mode 100644
index 0000000000..6892e0de9b
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/10.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg10">
+ <defs
+ id="defs10" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text10"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan10">10</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/11.png b/doc/publican-clusterlabs/en-US/images/11.png
new file mode 100644
index 0000000000..2887c4c8ac
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/11.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/11.svg b/doc/publican-clusterlabs/en-US/images/11.svg
new file mode 100644
index 0000000000..7f5c269cfc
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/11.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg11">
+ <defs
+ id="defs11" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text11"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan11">11</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/12.png b/doc/publican-clusterlabs/en-US/images/12.png
new file mode 100644
index 0000000000..2667299e3a
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/12.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/12.svg b/doc/publican-clusterlabs/en-US/images/12.svg
new file mode 100644
index 0000000000..9bcf3cc586
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/12.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg12">
+ <defs
+ id="defs12" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text12"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan12">12</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/13.png b/doc/publican-clusterlabs/en-US/images/13.png
new file mode 100644
index 0000000000..fb9f00ce66
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/13.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/13.svg b/doc/publican-clusterlabs/en-US/images/13.svg
new file mode 100644
index 0000000000..796664551f
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/13.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg13">
+ <defs
+ id="defs13" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text13"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan13">13</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/14.png b/doc/publican-clusterlabs/en-US/images/14.png
new file mode 100644
index 0000000000..2f6d426846
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/14.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/14.svg b/doc/publican-clusterlabs/en-US/images/14.svg
new file mode 100644
index 0000000000..869be6ab2b
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/14.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg14">
+ <defs
+ id="defs14" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text14"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan14">14</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/15.png b/doc/publican-clusterlabs/en-US/images/15.png
new file mode 100644
index 0000000000..62645715c3
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/15.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/15.svg b/doc/publican-clusterlabs/en-US/images/15.svg
new file mode 100644
index 0000000000..003f16c2e8
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/15.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg15">
+ <defs
+ id="defs15" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text15"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan15">15</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/16.png b/doc/publican-clusterlabs/en-US/images/16.png
new file mode 100644
index 0000000000..3873d725b4
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/16.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/16.svg b/doc/publican-clusterlabs/en-US/images/16.svg
new file mode 100644
index 0000000000..ab4bee018b
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/16.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg16">
+ <defs
+ id="defs16" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text16"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan16">16</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/17.png b/doc/publican-clusterlabs/en-US/images/17.png
new file mode 100644
index 0000000000..66615089cb
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/17.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/17.svg b/doc/publican-clusterlabs/en-US/images/17.svg
new file mode 100644
index 0000000000..ff29b7aa41
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/17.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg17">
+ <defs
+ id="defs17" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text17"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan17">17</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/18.png b/doc/publican-clusterlabs/en-US/images/18.png
new file mode 100644
index 0000000000..cf5cd4584b
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/18.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/18.svg b/doc/publican-clusterlabs/en-US/images/18.svg
new file mode 100644
index 0000000000..6225d8a91a
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/18.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg18">
+ <defs
+ id="defs18" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text18"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan18">18</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/19.png b/doc/publican-clusterlabs/en-US/images/19.png
new file mode 100644
index 0000000000..0e481be1fe
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/19.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/19.svg b/doc/publican-clusterlabs/en-US/images/19.svg
new file mode 100644
index 0000000000..efd3c0eeb9
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/19.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg19">
+ <defs
+ id="defs19" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text19"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan19">19</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/2.png b/doc/publican-clusterlabs/en-US/images/2.png
new file mode 100644
index 0000000000..ccaa4240d6
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/2.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/2.svg b/doc/publican-clusterlabs/en-US/images/2.svg
new file mode 100644
index 0000000000..27100aca9f
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/2.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg2">
+ <defs
+ id="defs2" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text2"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan2">2</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/20.png b/doc/publican-clusterlabs/en-US/images/20.png
new file mode 100644
index 0000000000..a3852a14e2
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/20.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/20.svg b/doc/publican-clusterlabs/en-US/images/20.svg
new file mode 100644
index 0000000000..7d727359a3
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/20.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg20">
+ <defs
+ id="defs20" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text20"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan20">20</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/21.png b/doc/publican-clusterlabs/en-US/images/21.png
new file mode 100644
index 0000000000..b958a30b0b
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/21.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/21.svg b/doc/publican-clusterlabs/en-US/images/21.svg
new file mode 100644
index 0000000000..babe3ad790
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/21.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg21">
+ <defs
+ id="defs21" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text21"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan21">21</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/22.png b/doc/publican-clusterlabs/en-US/images/22.png
new file mode 100644
index 0000000000..e4c68b1407
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/22.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/22.svg b/doc/publican-clusterlabs/en-US/images/22.svg
new file mode 100644
index 0000000000..d7c7af500d
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/22.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg22">
+ <defs
+ id="defs22" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text22"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan22">22</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/23.png b/doc/publican-clusterlabs/en-US/images/23.png
new file mode 100644
index 0000000000..a627327317
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/23.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/23.svg b/doc/publican-clusterlabs/en-US/images/23.svg
new file mode 100644
index 0000000000..399deffb41
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/23.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg23">
+ <defs
+ id="defs23" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text23"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan23">23</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/24.png b/doc/publican-clusterlabs/en-US/images/24.png
new file mode 100644
index 0000000000..bdcea873a0
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/24.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/24.svg b/doc/publican-clusterlabs/en-US/images/24.svg
new file mode 100644
index 0000000000..25a37e8dc8
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/24.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg24">
+ <defs
+ id="defs24" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text24"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan24">24</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/25.png b/doc/publican-clusterlabs/en-US/images/25.png
new file mode 100644
index 0000000000..db5a9eeedb
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/25.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/25.svg b/doc/publican-clusterlabs/en-US/images/25.svg
new file mode 100644
index 0000000000..adfb67af3e
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/25.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg25">
+ <defs
+ id="defs25" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text25"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan25">25</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/26.png b/doc/publican-clusterlabs/en-US/images/26.png
new file mode 100644
index 0000000000..4bf703699f
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/26.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/26.svg b/doc/publican-clusterlabs/en-US/images/26.svg
new file mode 100644
index 0000000000..6e8e5d62b8
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/26.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg26">
+ <defs
+ id="defs26" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text26"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan26">26</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/27.png b/doc/publican-clusterlabs/en-US/images/27.png
new file mode 100644
index 0000000000..d0105b0a43
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/27.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/27.svg b/doc/publican-clusterlabs/en-US/images/27.svg
new file mode 100644
index 0000000000..299194e448
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/27.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg27">
+ <defs
+ id="defs27" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text27"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan27">27</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/28.png b/doc/publican-clusterlabs/en-US/images/28.png
new file mode 100644
index 0000000000..19eac5c489
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/28.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/28.svg b/doc/publican-clusterlabs/en-US/images/28.svg
new file mode 100644
index 0000000000..5df71b8555
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/28.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg28">
+ <defs
+ id="defs28" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text28"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan28">28</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/29.png b/doc/publican-clusterlabs/en-US/images/29.png
new file mode 100644
index 0000000000..e64d4e6e32
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/29.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/29.svg b/doc/publican-clusterlabs/en-US/images/29.svg
new file mode 100644
index 0000000000..88aa692be4
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/29.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg29">
+ <defs
+ id="defs29" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text29"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan29">29</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/3.png b/doc/publican-clusterlabs/en-US/images/3.png
new file mode 100644
index 0000000000..73478793d5
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/3.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/3.svg b/doc/publican-clusterlabs/en-US/images/3.svg
new file mode 100644
index 0000000000..7954fa5a0d
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/3.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg3">
+ <defs
+ id="defs3" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text3"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan3">3</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/4.png b/doc/publican-clusterlabs/en-US/images/4.png
new file mode 100644
index 0000000000..9ae22affd1
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/4.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/4.svg b/doc/publican-clusterlabs/en-US/images/4.svg
new file mode 100644
index 0000000000..c97f7658a5
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/4.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg4">
+ <defs
+ id="defs4" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text4"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan4">4</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/5.png b/doc/publican-clusterlabs/en-US/images/5.png
new file mode 100644
index 0000000000..bba793a77a
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/5.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/5.svg b/doc/publican-clusterlabs/en-US/images/5.svg
new file mode 100644
index 0000000000..6450446938
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/5.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg5">
+ <defs
+ id="defs5" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text5"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan5">5</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/6.png b/doc/publican-clusterlabs/en-US/images/6.png
new file mode 100644
index 0000000000..b5483fbb58
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/6.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/6.svg b/doc/publican-clusterlabs/en-US/images/6.svg
new file mode 100644
index 0000000000..45af47a1cd
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/6.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg6">
+ <defs
+ id="defs6" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text6"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan6">6</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/7.png b/doc/publican-clusterlabs/en-US/images/7.png
new file mode 100644
index 0000000000..d085f2d805
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/7.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/7.svg b/doc/publican-clusterlabs/en-US/images/7.svg
new file mode 100644
index 0000000000..2e9ffec986
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/7.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg7">
+ <defs
+ id="defs7" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text7"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan7">7</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/8.png b/doc/publican-clusterlabs/en-US/images/8.png
new file mode 100644
index 0000000000..f37327163d
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/8.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/8.svg b/doc/publican-clusterlabs/en-US/images/8.svg
new file mode 100644
index 0000000000..340dbce58e
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/8.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg8">
+ <defs
+ id="defs8" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text8"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan8">8</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/9.png b/doc/publican-clusterlabs/en-US/images/9.png
new file mode 100644
index 0000000000..e8709d448b
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/9.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/9.svg b/doc/publican-clusterlabs/en-US/images/9.svg
new file mode 100644
index 0000000000..3838627082
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/9.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="32"
+ height="32"
+ id="svg9">
+ <defs
+ id="defs9" />
+ <circle
+ cx="16"
+ cy="16"
+ r="14"
+ id="circle"
+ style="fill:#aa0000" />
+ <text
+ x="16"
+ y="16"
+ transform="scale(0.89833804,1.1131667)"
+ id="text9"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="18"
+ y="20"
+ id="tspan9">9</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/Pacemaker-stack.svg b/doc/publican-clusterlabs/en-US/images/Pacemaker-stack.svg
new file mode 100644
index 0000000000..54da570000
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/Pacemaker-stack.svg
@@ -0,0 +1,1083 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1052.3622"
+ height="744.09448"
+ id="svg2"
+ sodipodi:version="0.32"
+ inkscape:version="0.46+devel"
+ sodipodi:docname="Pacemaker-stack.svg"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape"
+ version="1.0"
+ enable-background="new">
+ <defs
+ id="defs4">
+ <linearGradient
+ id="linearGradient3468">
+ <stop
+ style="stop-color:#000000;stop-opacity:0.31481481;"
+ offset="0"
+ id="stop3470" />
+ <stop
+ style="stop-color:#ff0000;stop-opacity:0.125;"
+ offset="1"
+ id="stop3472" />
+ </linearGradient>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lend"
+ style="overflow:visible;">
+ <path
+ id="path6465"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.8) rotate(180) translate(12.5,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Lstart"
+ style="overflow:visible">
+ <path
+ id="path6480"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(1.1) translate(1,0)" />
+ </marker>
+ <linearGradient
+ id="linearGradient4767">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4769" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4771" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ id="perspective10" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ gridtolerance="20"
+ guidetolerance="10"
+ objecttolerance="10"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.84801059"
+ inkscape:cx="542.71246"
+ inkscape:cy="382.07947"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1680"
+ inkscape:window-height="1001"
+ inkscape:window-x="0"
+ inkscape:window-y="26"
+ inkscape:snap-global="true"
+ inkscape:window-maximized="1">
+ <inkscape:grid
+ type="xygrid"
+ id="grid9704"
+ visible="true"
+ enabled="true"
+ spacingx="2px"
+ spacingy="2px"
+ dotted="true" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="Areas"
+ style="display:inline">
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#c8c8c8;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.14258301;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.42774902, 3.42774902;stroke-dashoffset:0;display:inline;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect5128"
+ width="826.31659"
+ height="311.55591"
+ x="122.95211"
+ y="415.39191" />
+ <rect
+ style="fill:#1d00fb;fill-opacity:0.1254902;fill-rule:evenodd;stroke:#000000;stroke-width:1.0216763;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.06502925, 3.06502925;stroke-dashoffset:0"
+ id="rect9869"
+ width="415.27484"
+ height="390.60483"
+ x="406.04105"
+ y="24.711681" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#969696;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.82043868;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.4613161, 2.4613161;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect9835"
+ width="303.51474"
+ height="286.53323"
+ x="137.8407"
+ y="428.20981" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706000000000;fill-rule:evenodd;stroke:#000000;stroke-width:1.6;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.8,4.8;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect9833"
+ width="443.37769"
+ height="233.1974"
+ x="490.47974"
+ y="477.91968" />
+ <rect
+ style="fill:#000000;fill-opacity:0.03921569;fill-rule:evenodd;stroke:#000000;stroke-width:2.02884078;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:6.08652264, 6.08652264;stroke-dashoffset:0;display:inline"
+ id="rect3450"
+ width="149.7639"
+ height="164.98082"
+ x="661.49438"
+ y="36.672237" />
+ <rect
+ y="639.48785"
+ x="149.6749"
+ height="65.67556"
+ width="280.07031"
+ id="rect4863"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:0.99444085;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.98332261, 2.98332261;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ </g>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ style="display:inline">
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"
+ x="346.99619"
+ y="817.37476"
+ id="text4785"><tspan
+ sodipodi:role="line"
+ id="tspan4787"
+ x="346.99619"
+ y="817.37476" /></text>
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot4887"
+ style="font-size:12px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"><flowRegion
+ id="flowRegion4889"><rect
+ id="rect4891"
+ width="176.30978"
+ height="85.671654"
+ x="264.87854"
+ y="756.44318" /></flowRegion><flowPara
+ id="flowPara4893" /></flowRoot> <flowRoot
+ xml:space="preserve"
+ id="flowRoot5303"
+ style="font-size:22px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"><flowRegion
+ id="flowRegion5305"><rect
+ id="rect5307"
+ width="110.59427"
+ height="24.664909"
+ x="555.35828"
+ y="146.1257" /></flowRegion><flowPara
+ id="flowPara5309" /></flowRoot> <flowRoot
+ xml:space="preserve"
+ id="flowRoot5392"
+ style="font-size:10px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"><flowRegion
+ id="flowRegion5394"><rect
+ id="rect5396"
+ width="88.316284"
+ height="44.555965"
+ x="-177.42821"
+ y="246.37662" /></flowRegion><flowPara
+ id="flowPara5398" /></flowRoot> <rect
+ style="font-size:36px;fill:#ab37c3;fill-opacity:0.62835245;stroke:#000000;stroke-width:2;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5101"
+ width="320"
+ height="60"
+ x="486.03693"
+ y="345.32062" />
+ <text
+ xml:space="preserve"
+ style="font-size:28px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="646.10529"
+ y="382.73761"
+ id="text5103"><tspan
+ sodipodi:role="line"
+ id="tspan5105"
+ x="646.10529"
+ y="382.73761">OpenAIS</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f0f300;fill-opacity:0.96168584;stroke:#000000;stroke-width:1.79003513;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5113"
+ width="85.653038"
+ height="119.70997"
+ x="493.6633"
+ y="182.87541" />
+ <text
+ xml:space="preserve"
+ style="font-size:22px;font-style:normal;font-weight:normal;text-align:center;writing-mode:tb-rl;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="537.14764"
+ y="243.33733"
+ id="text5115"><tspan
+ sodipodi:role="line"
+ id="tspan5117"
+ x="288.1481"
+ y="492.33685">LRM</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00fab4;fill-opacity:0.31372549;stroke:#000000;stroke-width:0.72598213;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5386"
+ width="60"
+ height="20"
+ x="590.31262"
+ y="144.68628" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="620.38293"
+ y="160.41284"
+ id="text5388"><tspan
+ sodipodi:role="line"
+ id="tspan5390"
+ x="620.38293"
+ y="160.41284">SBD</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.93765765;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5177"
+ width="120.56235"
+ height="19.44669"
+ x="339.75027"
+ y="375.23956" />
+ <text
+ xml:space="preserve"
+ style="font-size:18px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="400.37854"
+ y="389.28772"
+ id="text5179"><tspan
+ sodipodi:role="line"
+ id="tspan5181"
+ x="400.37854"
+ y="389.28772">dlm_controld</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.93805921;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5228"
+ width="120.56194"
+ height="19.463411"
+ x="339.18832"
+ y="345.77615" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="399.81653"
+ y="361.97128"
+ id="text5230"><tspan
+ sodipodi:role="line"
+ id="tspan5232"
+ x="399.81653"
+ y="361.97128">ocfs2_controld</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.93805921;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5241"
+ width="120.56194"
+ height="19.463411"
+ x="339.18832"
+ y="315.23956" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="399.81647"
+ y="331.43469"
+ id="text5243"><tspan
+ sodipodi:role="line"
+ id="tspan5245"
+ x="399.81647"
+ y="331.43469">clvmd</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00c500;fill-opacity:0.5098038;stroke:#000000;stroke-width:2.23176122;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5107"
+ width="249.62111"
+ height="69.6679"
+ x="559.74811"
+ y="261.31393" />
+ <text
+ xml:space="preserve"
+ style="font-size:26px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="683.57037"
+ y="303.89301"
+ id="text5109"><tspan
+ sodipodi:role="line"
+ id="tspan5111"
+ x="683.57037"
+ y="303.89301">Pacemaker</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00c500;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.87441856;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5332"
+ width="60"
+ height="60"
+ x="670.31262"
+ y="214.68628" />
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="700.33459"
+ y="248.26538"
+ id="text5649"><tspan
+ sodipodi:role="line"
+ id="tspan5651"
+ x="700.33459"
+ y="248.26538">CIB</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00c500;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.87555265;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5653"
+ width="60"
+ height="60"
+ x="740.31262"
+ y="214.68628" />
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="770.12708"
+ y="240.96313"
+ id="text5655"><tspan
+ sodipodi:role="line"
+ id="tspan5657"
+ x="770.12708"
+ y="240.96313">Policy</tspan><tspan
+ sodipodi:role="line"
+ x="770.12708"
+ y="253.46313"
+ id="tspan5659">Engine</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 684.55866,296.14788 0,0"
+ id="path9147"
+ inkscape:connector-type="polyline" />
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot9370"
+ style="font-size:10px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"><flowRegion
+ id="flowRegion9372"><rect
+ id="rect9374"
+ width="168.62997"
+ height="73.112297"
+ x="587.25684"
+ y="472.87146" /></flowRegion><flowPara
+ id="flowPara9376" /></flowRoot> <flowRoot
+ xml:space="preserve"
+ id="flowRoot9390"
+ style="font-size:10px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"><flowRegion
+ id="flowRegion9392"><rect
+ id="rect9394"
+ width="143.86613"
+ height="64.857681"
+ x="714.61371"
+ y="458.72067" /></flowRegion><flowPara
+ id="flowPara9396" /></flowRoot> <flowRoot
+ xml:space="preserve"
+ id="flowRoot9404"
+ style="font-size:10px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Arial"><flowRegion
+ id="flowRegion9406"><rect
+ id="rect9408"
+ width="94.338448"
+ height="67.216141"
+ x="709.89679"
+ y="516.50299" /></flowRegion><flowPara
+ id="flowPara9410" /></flowRoot> <rect
+ style="opacity:1;fill:#000000;fill-opacity:0.5098038;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4, 4;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect9644"
+ width="0"
+ height="0"
+ x="860"
+ y="714.09448" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="225.69032"
+ y="477.62704"
+ id="text9733"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9735"
+ x="225.69032"
+ y="477.62704">ext3, XFS</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="358.56805"
+ y="476.44781"
+ id="text9739"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9741"
+ x="358.56805"
+ y="476.44781">OCFS2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="288.00186"
+ y="548.08936"
+ id="text9745"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9747"
+ x="288.00186"
+ y="548.08936">cLVM2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="218.04878"
+ y="615.1178"
+ id="text9751"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9753"
+ x="218.04878"
+ y="615.1178">DRBD</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="350.87958"
+ y="615.1178"
+ id="text9757"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9759"
+ x="350.87958"
+ y="615.1178">Multipath IO</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="218.86955"
+ y="678.60858"
+ id="text9763"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9765"
+ x="218.86955"
+ y="678.60858">Local disks</tspan></text>
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect9767"
+ width="112.06894"
+ height="49.757401"
+ x="298.82321"
+ y="646.26764" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="353.23804"
+ y="664.41174"
+ id="text9769"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9771"
+ x="353.23804"
+ y="664.41174">SAN</tspan><tspan
+ sodipodi:role="line"
+ x="353.23804"
+ y="681.91174"
+ id="tspan9773">FC(oE), iSCSI</tspan></text>
+ <path
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:none;stroke:#000000;stroke-width:0.85381305px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Sans;-inkscape-font-specification:Sans"
+ d="m 288.44229,582.26051 0,116.63947"
+ id="path9775" />
+ <rect
+ y="518.65619"
+ x="406.95398"
+ height="49.757401"
+ width="112.06894"
+ id="rect4885"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="466.32343"
+ y="548.07935"
+ id="text9779"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9781"
+ x="466.32343"
+ y="548.07935">DLM</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="614.20917"
+ y="550.09448"
+ id="text9785"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9787"
+ x="614.20917"
+ y="550.09448">SCTP</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="745.90765"
+ y="547.73602"
+ id="text9791"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9793"
+ x="745.90765"
+ y="547.73602">TCP</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="675.76727"
+ y="670.86316"
+ id="text9797"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9799"
+ x="675.76727"
+ y="670.86316">Ethernet</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="805.06036"
+ y="668.5047"
+ id="text9803"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9805"
+ x="805.06036"
+ y="668.5047">Infiniband</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="659.36188"
+ y="631.9386"
+ id="text9809"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9811"
+ x="659.36188"
+ y="631.9386">Bonding</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="875.24768"
+ y="536.03778"
+ id="text9815"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan9817"
+ x="875.24768"
+ y="536.03778">UDP</tspan><tspan
+ sodipodi:role="line"
+ x="875.24768"
+ y="553.53778"
+ id="tspan4926">multicast</tspan></text>
+ <rect
+ style="opacity:1;fill:#1d00fb;fill-opacity:0.4750958;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3, 3;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect9843"
+ width="0"
+ height="10"
+ x="80"
+ y="124.09448" />
+ <rect
+ style="fill:#1d00fb;fill-opacity:0.4750958;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3, 3;stroke-dashoffset:0;display:inline"
+ id="rect3432"
+ width="120"
+ height="40"
+ x="675.35095"
+ y="98.365234" />
+ <text
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="731.12549"
+ y="124.02045"
+ id="text3434"><tspan
+ sodipodi:role="line"
+ id="tspan3436"
+ x="731.12549"
+ y="124.02045">Fat GUI</tspan></text>
+ <rect
+ style="fill:#1d00fb;fill-opacity:0.4750958;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3, 3;stroke-dashoffset:0;display:inline"
+ id="rect3438"
+ width="120"
+ height="40"
+ x="674.57642"
+ y="152.71002" />
+ <text
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="730.35095"
+ y="178.36523"
+ id="text3440"><tspan
+ sodipodi:role="line"
+ id="tspan3442"
+ x="730.35095"
+ y="178.36523">CRM Shell</tspan></text>
+ <rect
+ style="fill:#1d00fd;fill-opacity:0.4745098;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3, 3;stroke-dashoffset:0;display:inline"
+ id="rect3444"
+ width="120"
+ height="40"
+ x="676.30884"
+ y="45.865906" />
+ <text
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="732.08337"
+ y="71.521118"
+ id="text3446"><tspan
+ sodipodi:role="line"
+ id="tspan3448"
+ x="732.08337"
+ y="71.521118">Web GUI</tspan></text>
+ <rect
+ style="opacity:1;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4936"
+ width="0"
+ height="10"
+ x="-240"
+ y="14.094482" />
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5272"
+ width="90.140778"
+ height="20.532293"
+ x="370.4534"
+ y="223.6217" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="415.17615"
+ y="237.93082"
+ id="text3398"><tspan
+ sodipodi:role="line"
+ id="tspan3400"
+ x="415.17615"
+ y="237.93082">Filesystems</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect3402"
+ width="90.140778"
+ height="20.532293"
+ x="370.4534"
+ y="254.15399" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.17615"
+ y="268.4631"
+ id="text3404"><tspan
+ sodipodi:role="line"
+ id="tspan3406"
+ x="415.17615"
+ y="268.4631">IP address</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect3408"
+ width="90.140778"
+ height="20.532293"
+ x="370.4534"
+ y="283.6217" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.17615"
+ y="297.93079"
+ id="text3410"><tspan
+ sodipodi:role="line"
+ id="tspan3412"
+ x="415.17615"
+ y="297.93079">DRBD</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect3414"
+ width="90.140778"
+ height="20.532293"
+ x="370.31262"
+ y="194.15399" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.03534"
+ y="208.4631"
+ id="text3416"><tspan
+ sodipodi:role="line"
+ id="tspan3418"
+ x="415.03534"
+ y="208.4631">iSCSI</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect3420"
+ width="90.140778"
+ height="20.532293"
+ x="370.31262"
+ y="163.6217" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.03534"
+ y="177.93082"
+ id="text3422"><tspan
+ sodipodi:role="line"
+ id="tspan3424"
+ x="415.03534"
+ y="177.93082">Apache</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect3426"
+ width="90.140778"
+ height="20.532293"
+ x="370.4534"
+ y="44.153984" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.17615"
+ y="58.463097"
+ id="text3428"><tspan
+ sodipodi:role="line"
+ id="tspan3430"
+ x="415.17615"
+ y="58.463097">SAP</tspan></text>
+ <rect
+ style="writing-mode:tb-rl;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.55184007;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect4930"
+ width="31.12739"
+ height="268.20279"
+ x="-501.60672"
+ y="44.179905"
+ transform="scale(-1,1)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-weight:normal;text-align:center;writing-mode:tb-rl;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="484.99542"
+ y="174.45819"
+ id="text4932"><tspan
+ sodipodi:role="line"
+ id="tspan4934"
+ x="219.26895"
+ y="440.18463">OCF agents</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect4938"
+ width="90.140778"
+ height="20.532293"
+ x="370.31262"
+ y="133.6217" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.03534"
+ y="147.9308"
+ id="text4940"><tspan
+ sodipodi:role="line"
+ id="tspan4942"
+ x="415.03534"
+ y="147.9308">Xen</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect4944"
+ width="90.140778"
+ height="20.532293"
+ x="370.31262"
+ y="103.62169" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.03534"
+ y="117.9308"
+ id="text4946"><tspan
+ sodipodi:role="line"
+ id="tspan4948"
+ x="415.03534"
+ y="117.9308">libvirt</tspan></text>
+ <rect
+ style="font-size:22px;fill:#f9ea00;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.83309597;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect4950"
+ width="90.140778"
+ height="20.532293"
+ x="370.4534"
+ y="73.621689" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="415.17615"
+ y="87.930801"
+ id="text4952"><tspan
+ sodipodi:role="line"
+ id="tspan4954"
+ x="415.17615"
+ y="87.930801">MySQL</tspan></text>
+ <rect
+ style="writing-mode:tb-rl;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.61414051;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect5020"
+ width="29.88586"
+ height="151.11201"
+ x="-540.25555"
+ y="44.743347"
+ transform="scale(-1,1)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-weight:normal;text-align:center;writing-mode:tb-rl;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="525.37073"
+ y="110.02466"
+ id="text5022"><tspan
+ sodipodi:role="line"
+ id="tspan5024"
+ x="154.83542"
+ y="480.55994">LSB</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;writing-mode:tb-rl;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial"
+ x="528.08606"
+ y="91.580811"
+ id="text5026"><tspan
+ sodipodi:role="line"
+ id="tspan5028"
+ x="147.89059"
+ y="483.27527"> </tspan></text>
+ <rect
+ style="writing-mode:tb-rl;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.60802722;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect5057"
+ width="29.891973"
+ height="149.93889"
+ x="-574.25861"
+ y="44.740292"
+ transform="scale(-1,1)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14.30705261px;font-style:normal;font-weight:normal;text-align:center;writing-mode:tb-rl;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="571.63904"
+ y="109.98261"
+ id="text5059"
+ transform="scale(0.9785384,1.0219323)"><tspan
+ sodipodi:role="line"
+ id="tspan5061"
+ x="156.69727"
+ y="524.92438">STONITH</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;writing-mode:tb-rl;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="568.08606"
+ y="91.580811"
+ id="text5063"><tspan
+ sodipodi:role="line"
+ id="tspan5065"
+ x="147.89059"
+ y="523.27527"> </tspan></text>
+ <rect
+ style="font-size:22px;fill:#00fab4;fill-opacity:0.31372549;stroke:#000000;stroke-width:0.72598213;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect5085"
+ width="60"
+ height="20"
+ x="590.31262"
+ y="114.68628" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="620.38293"
+ y="130.41283"
+ id="text5087"><tspan
+ sodipodi:role="line"
+ id="tspan5089"
+ x="620.38293"
+ y="130.41283">iLO</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00fab4;fill-opacity:0.31372549;stroke:#000000;stroke-width:0.72598213;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect5091"
+ width="60"
+ height="20"
+ x="590.31262"
+ y="84.686279" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="620.38293"
+ y="100.41283"
+ id="text5093"><tspan
+ sodipodi:role="line"
+ id="tspan5095"
+ x="620.38293"
+ y="100.41283">DRAC</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00fab4;fill-opacity:0.31372549;stroke:#000000;stroke-width:0.72598213;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect5097"
+ width="60"
+ height="20"
+ x="590.31262"
+ y="54.686279" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="620.38293"
+ y="70.412834"
+ id="text5099"><tspan
+ sodipodi:role="line"
+ id="tspan5101"
+ x="620.38293"
+ y="70.412834">...</tspan></text>
+ <rect
+ style="font-size:22px;fill:#00fab4;fill-opacity:0.5098038;stroke:#000000;stroke-width:0.72598213;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
+ id="rect5103"
+ width="60"
+ height="100"
+ x="590.31262"
+ y="174.68628" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-weight:normal;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;display:inline;font-family:Arial"
+ x="920.97113"
+ y="104.14124"
+ id="text5105"><tspan
+ sodipodi:role="line"
+ id="tspan5107"
+ x="920.97113"
+ y="104.14124" /></text>
+ <rect
+ style="opacity:1;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect5111"
+ width="0"
+ height="20"
+ x="780"
+ y="94.094482" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-weight:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Arial"
+ x="620.21008"
+ y="228.22388"
+ id="text5113"><tspan
+ sodipodi:role="line"
+ id="tspan5115"
+ x="620.21008"
+ y="228.22388">Fencing</tspan></text>
+ <rect
+ style="opacity:1;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect5117"
+ width="10"
+ height="0"
+ x="730"
+ y="204.09448" />
+ <rect
+ style="opacity:1;fill:#d1fc25;fill-opacity:0.78927199;fill-rule:evenodd;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect5119"
+ width="0"
+ height="0"
+ x="950"
+ y="104.09448" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
+ x="705.74603"
+ y="452.82455"
+ id="text5124"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan5126"
+ x="705.74603"
+ y="452.82455"
+ style="font-size:28px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans Bold">Linux kernel</tspan></text>
+ <rect
+ y="582.58923"
+ x="297.64398"
+ height="49.757401"
+ width="112.06894"
+ id="rect4873"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect4875"
+ width="112.06894"
+ height="49.757401"
+ x="166.74939"
+ y="583.76849" />
+ <rect
+ y="646.2677"
+ x="166.39093"
+ height="49.757401"
+ width="112.06894"
+ id="rect4877"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect4879"
+ width="112.06894"
+ height="49.757401"
+ x="232.7863"
+ y="517.73157" />
+ <rect
+ y="446.97772"
+ x="303.54013"
+ height="49.757401"
+ width="112.06894"
+ id="rect4881"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect4883"
+ width="112.06894"
+ height="49.757401"
+ x="172.64554"
+ y="446.97772" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect4887"
+ width="112.06894"
+ height="49.757401"
+ x="561.12164"
+ y="519.01465" />
+ <rect
+ y="517.83545"
+ x="686.12012"
+ height="49.757401"
+ width="112.06894"
+ id="rect4889"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect4892"
+ width="112.06894"
+ height="49.757401"
+ x="812.29779"
+ y="517.83545" />
+ <rect
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans"
+ id="rect4894"
+ width="112.06894"
+ height="49.757401"
+ x="602.39471"
+ y="601.96613" />
+ <rect
+ y="640.05994"
+ x="619.26239"
+ height="49.757401"
+ width="112.06894"
+ id="rect4896"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ <rect
+ y="638.88074"
+ x="747.44012"
+ height="49.757401"
+ width="112.06894"
+ id="rect4902"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:0.11764706;fill-rule:evenodd;stroke:#000000;stroke-width:1.62952805;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.88858379, 4.88858379;stroke-dashoffset:0;font-family:Sans;-inkscape-font-specification:Sans" />
+ </g>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/dot.png b/doc/publican-clusterlabs/en-US/images/dot.png
new file mode 100644
index 0000000000..079add95de
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/dot.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/dot.svg b/doc/publican-clusterlabs/en-US/images/dot.svg
new file mode 100644
index 0000000000..eaeff2ea78
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/dot.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="5"
+ height="6"
+ id="svgdot">
+ <defs
+ id="defsdot" />
+ <text
+ x="2.5"
+ y="3"
+ transform="scale(0.89833804,1.1131667)"
+ id="textdot"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="4.5"
+ y="5"
+ id="tspandot">dot</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/dot2.png b/doc/publican-clusterlabs/en-US/images/dot2.png
new file mode 100644
index 0000000000..8348fcd054
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/dot2.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/dot2.svg b/doc/publican-clusterlabs/en-US/images/dot2.svg
new file mode 100644
index 0000000000..893f689356
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/dot2.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="5"
+ height="6"
+ id="svgdot2">
+ <defs
+ id="defsdot2" />
+ <text
+ x="2.5"
+ y="3"
+ transform="scale(0.89833804,1.1131667)"
+ id="textdot2"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="4.5"
+ y="5"
+ id="tspandot2">dot2</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/h1-bg.png b/doc/publican-clusterlabs/en-US/images/h1-bg.png
new file mode 100644
index 0000000000..ce080e772e
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/h1-bg.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/h1-bg.svg b/doc/publican-clusterlabs/en-US/images/h1-bg.svg
new file mode 100644
index 0000000000..7dcf47b645
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/h1-bg.svg
@@ -0,0 +1,95 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.0"
+ width="5"
+ height="100"
+ id="svgh1-bg"
+ inkscape:version="0.46+devel"
+ sodipodi:docname="h1-bg.svg">
+ <metadata
+ id="metadata8">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="993"
+ inkscape:window-height="852"
+ id="namedview6"
+ showgrid="false"
+ inkscape:zoom="4.72"
+ inkscape:cx="-38.541797"
+ inkscape:cy="45.205581"
+ inkscape:window-x="0"
+ inkscape:window-y="25"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svgh1-bg" />
+ <defs
+ id="defsh1-bg">
+ <linearGradient
+ id="linearGradient3598">
+ <stop
+ style="stop-color:#bd4c4b;stop-opacity:1;"
+ offset="0"
+ id="stop3600" />
+ <stop
+ id="stop3612"
+ offset="1"
+ style="stop-color:#843a39;stop-opacity:1;" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 50 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="5 : 50 : 1"
+ inkscape:persp3d-origin="2.5 : 33.333333 : 1"
+ id="perspective10" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3598"
+ id="linearGradient3604"
+ x1="0"
+ y1="-5.2763639e-08"
+ x2="0"
+ y2="100"
+ gradientUnits="userSpaceOnUse" />
+ </defs>
+ <text
+ x="2.5"
+ y="50"
+ transform="scale(0.89833804,1.1131667)"
+ id="texth1-bg"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="4.5"
+ y="52"
+ id="tspanh1-bg">h1-bg</tspan></text>
+ <rect
+ style="fill:url(#linearGradient3604);fill-opacity:1"
+ id="rect2816"
+ width="5.0847459"
+ height="100"
+ x="0"
+ y="-5.2763639e-08" />
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/image_left.png b/doc/publican-clusterlabs/en-US/images/image_left.png
new file mode 100644
index 0000000000..f781295359
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/image_left.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/image_left.svg b/doc/publican-clusterlabs/en-US/images/image_left.svg
new file mode 100644
index 0000000000..4ce5460a8c
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/image_left.svg
@@ -0,0 +1,237 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.46+devel"
+ width="195"
+ height="50"
+ sodipodi:docname="image_left.svg"
+ style="display:inline;enable-background:new">
+ <metadata
+ id="metadata8">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs6">
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient4091">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4093" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4095" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4039">
+ <stop
+ style="stop-color:#bd4c4b;stop-opacity:1;"
+ offset="0"
+ id="stop4041" />
+ <stop
+ style="stop-color:#bd4c4b;stop-opacity:0;"
+ offset="1"
+ id="stop4043" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4031">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4033" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4035" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient4013">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4015" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4017" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ id="perspective10" />
+ <inkscape:perspective
+ id="perspective3678"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective3703"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4013"
+ id="linearGradient4019"
+ x1="43.157043"
+ y1="-79.140793"
+ x2="43.157043"
+ y2="-100.66115"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4031"
+ id="linearGradient4037"
+ x1="97.157951"
+ y1="-73.723228"
+ x2="97.157951"
+ y2="-95.479019"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4039"
+ id="linearGradient4045"
+ x1="-0.30023095"
+ y1="65.069283"
+ x2="486.07388"
+ y2="-23.799074"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.5,0,0,0.5,0.24607542,0.11399734)" />
+ <inkscape:perspective
+ id="perspective4071"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4091"
+ id="linearGradient4097"
+ x1="42.10154"
+ y1="-73.627083"
+ x2="42.10154"
+ y2="-92.733322"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(0.64120517,-0.56230747)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4091"
+ id="linearGradient4099"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.50000001,0,0,0.5,31.090491,0.45555381)"
+ x1="42.10154"
+ y1="-73.627083"
+ x2="42.10154"
+ y2="-92.733322" />
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1680"
+ inkscape:window-height="977"
+ id="namedview4"
+ showgrid="false"
+ inkscape:zoom="4.710419"
+ inkscape:cx="82.027581"
+ inkscape:cy="-1.8964956"
+ inkscape:window-x="0"
+ inkscape:window-y="25"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="layer1" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer4"
+ inkscape:label="Blank"
+ style="display:none" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer1"
+ inkscape:label="Background"
+ style="display:inline">
+ <rect
+ style="fill:url(#linearGradient4045);fill-opacity:1"
+ id="rect2821"
+ width="195"
+ height="32.575058"
+ x="0.095963053"
+ y="0.073580861" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="Pacemaker"
+ style="display:inline">
+ <text
+ xml:space="preserve"
+ style="font-size:27.22674942px;font-style:normal;font-weight:normal;line-height:125%;opacity:0.95121951;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+ x="19.977491"
+ y="35.90564"
+ id="text3656"
+ sodipodi:linespacing="125%"
+ transform="scale(0.93645805,1.0678535)"><tspan
+ sodipodi:role="line"
+ x="19.977491"
+ y="35.90564"
+ style="font-size:29.96665764px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:URW Bookman L;-inkscape-font-specification:URW Bookman L"
+ id="tspan3662"><tspan
+ style="font-size:45px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:URW Bookman L;-inkscape-font-specification:URW Bookman L"
+ id="tspan4055">P</tspan>acemaker</tspan></text>
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="Reflection"
+ style="display:inline">
+ <text
+ xml:space="preserve"
+ style="font-size:27.22674942px;font-style:normal;font-weight:normal;line-height:125%;opacity:0.35365852;fill:url(#linearGradient4099);fill-opacity:1;stroke:none;display:inline;font-family:Bitstream Vera Sans"
+ x="50.746254"
+ y="-36.627686"
+ id="text3656-0"
+ sodipodi:linespacing="125%"
+ transform="matrix(0.93645805,0,0.77733052,-1.0678535,0,0)"><tspan
+ sodipodi:role="line"
+ x="50.746254"
+ y="-36.627686"
+ style="font-size:29.96665764px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:url(#linearGradient4099);fill-opacity:1;font-family:URW Bookman L;-inkscape-font-specification:URW Bookman L"
+ id="tspan3662-2"><tspan
+ style="font-size:45px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:url(#linearGradient4099);fill-opacity:1;font-family:URW Bookman L;-inkscape-font-specification:URW Bookman L"
+ id="tspan4055-7">P</tspan>acemaker</tspan></text>
+ </g>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/image_right.png b/doc/publican-clusterlabs/en-US/images/image_right.png
new file mode 100644
index 0000000000..9ec1ac5491
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/image_right.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/image_right.svg b/doc/publican-clusterlabs/en-US/images/image_right.svg
new file mode 100644
index 0000000000..205f021f5a
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/image_right.svg
@@ -0,0 +1,99 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.0"
+ width="88"
+ height="40"
+ id="svgimage_left"
+ style="display:inline"
+ inkscape:version="0.46+devel"
+ sodipodi:docname="image_right.svg">
+ <metadata
+ id="metadata3953">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1107"
+ inkscape:window-height="532"
+ id="namedview3951"
+ showgrid="false"
+ inkscape:zoom="5.2444444"
+ inkscape:cx="44"
+ inkscape:cy="22.5"
+ inkscape:window-x="0"
+ inkscape:window-y="25"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svgimage_left" />
+ <defs
+ id="defsimage_left">
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 22.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="88 : 22.5 : 1"
+ inkscape:persp3d-origin="44 : 15 : 1"
+ id="perspective3955" />
+ </defs>
+ <text
+ x="46.796108"
+ y="20.958361"
+ transform="scale(0.89833804,1.1131667)"
+ id="textimage_left"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#bd4c4b;fill-opacity:1;stroke:none;font-family:Liberation Serif"
+ sodipodi:linespacing="125%"><tspan
+ x="48.796108"
+ y="22.958361"
+ id="tspanimage_left"
+ style="fill:#bd4c4b;fill-opacity:1">cluster</tspan><tspan
+ id="tspan3596"
+ style="fill:#843a39;fill-opacity:1">labs</tspan></text>
+ <g
+ id="layer1"
+ style="display:none">
+ <rect
+ width="84.851692"
+ height="42.521187"
+ x="2.0974576"
+ y="1.1440678"
+ id="rect2817" />
+ </g>
+ <g
+ id="layer2"
+ style="display:none" />
+ <text
+ x="1.1440676"
+ y="35.275425"
+ id="text3765"
+ xml:space="preserve"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+ sodipodi:linespacing="125%"><tspan
+ x="1.1440676"
+ y="35.275425"
+ id="tspan3767"
+ style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;opacity:0.46052635;font-family:Liberation Mono;-inkscape-font-specification:Liberation Mono">documentation</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/important.png b/doc/publican-clusterlabs/en-US/images/important.png
new file mode 100644
index 0000000000..969562b7bc
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/important.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/important.svg b/doc/publican-clusterlabs/en-US/images/important.svg
new file mode 100644
index 0000000000..064c783b53
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/important.svg
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="48"
+ height="48"
+ id="svg2">
+ <defs
+ id="defs5" />
+ <path
+ d="M 255.25,-411.29002 L 261.86798,-400.85887 L 273.83367,-397.7882 L 265.95811,-388.27072 L 266.73534,-375.94179 L 255.25,-380.49082 L 243.76466,-375.94179 L 244.54189,-388.27072 L 236.66633,-397.7882 L 248.63202,-400.85887 L 255.25,-411.29002 z "
+ transform="matrix(1.1071323,0,0,1.1071323,-258.4137,459.98052)"
+ style="fill:#2e3436;fill-opacity:1;stroke:#2e3436;stroke-width:4.25880718;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4450" />
+ <path
+ d="M 255.25,-411.29002 L 261.86798,-400.85887 L 273.83367,-397.7882 L 265.95811,-388.27072 L 266.73534,-375.94179 L 255.25,-380.49082 L 243.76466,-375.94179 L 244.54189,-388.27072 L 236.66633,-397.7882 L 248.63202,-400.85887 L 255.25,-411.29002 z "
+ transform="matrix(1.1071323,0,0,1.1071323,-258.4137,459.98052)"
+ style="fill:#fac521;fill-opacity:1;stroke-width:3.4070456;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4452" />
+ <path
+ d="M 24.175987,4.476098 L 16.980534,16.087712 L 3.9317841,19.443104 L 16.980534,20.076901 L 24.175987,10.383543 L 31.408721,20.076901 L 44.457471,19.443104 L 31.468862,16.027571 L 24.175987,4.476098 z "
+ style="fill:#feeaab;fill-opacity:1;stroke-width:3.4070456;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4531" />
+ <path
+ d="M 12.456856,24.055852 C 11.65845,24.299685 14.436112,29.177769 14.436112,32.041127 C 14.436112,37.343117 13.010825,39.831516 15.971742,37.364645 C 18.711008,35.08244 21.184735,34.873512 24.195894,34.873512 C 27.207053,34.873512 29.646656,35.08244 32.38592,37.364645 C 35.346837,39.831516 33.921551,37.343117 33.92155,32.041127 C 33.92155,28.223316 38.868232,20.827013 33.682674,25.591482 C 31.458295,27.635233 27.413886,29.481744 24.195894,29.481744 C 20.977903,29.481744 16.933493,27.635233 14.709113,25.591482 C 13.412724,24.400365 12.722992,23.974574 12.456856,24.055852 z "
+ style="fill:#fcd867;fill-opacity:1;stroke-width:3.4070456;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path2185" />
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/note.png b/doc/publican-clusterlabs/en-US/images/note.png
new file mode 100644
index 0000000000..d04775d990
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/note.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/note.svg b/doc/publican-clusterlabs/en-US/images/note.svg
new file mode 100644
index 0000000000..abe5a60246
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/note.svg
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="48"
+ height="48"
+ id="svg2">
+ <defs
+ id="defs5" />
+ <path
+ d="M 30.27396,4.1232594 L 18.765811,4.1232594 C 11.476786,4.1232594 5.5574109,10.546411 5.5574109,19.960741 C 5.5574109,24.746615 7.0844878,29.075948 9.5403943,32.177328 C 9.4616811,32.681104 9.414455,33.200619 9.414455,33.720144 C 9.414455,39.308917 13.554865,43.591015 18.891751,44.267966 C 17.506371,42.693663 16.656245,40.914707 16.656245,38.616218 C 16.656245,38.01799 16.719219,37.419752 16.82942,36.837262 C 17.459135,36.963202 18.104599,37.026176 18.750063,37.026176 L 30.258211,37.026176 C 37.547237,37.026176 43.466612,29.39081 43.466612,19.960741 C 43.466612,10.530672 37.578724,4.1232594 30.27396,4.1232594 z "
+ style="fill:#2e3436;fill-opacity:1;stroke:#2e3436;stroke-width:4.7150631;stroke-miterlimit:4;stroke-dasharray:none"
+ id="path4317" />
+ <path
+ d="M 30.27396,4.1232594 L 18.765811,4.1232594 C 11.476786,4.1232594 5.5574109,10.546411 5.5574109,19.960741 C 5.5574109,24.746615 7.0844878,29.075948 9.5403943,32.177328 C 9.4616811,32.681104 9.414455,33.200619 9.414455,33.720144 C 9.414455,39.308917 13.554865,43.591015 18.891751,44.267966 C 17.506371,42.693663 16.656245,40.914707 16.656245,38.616218 C 16.656245,38.01799 16.719219,37.419752 16.82942,36.837262 C 17.459135,36.963202 18.104599,37.026176 18.750063,37.026176 L 30.258211,37.026176 C 37.547237,37.026176 43.466612,29.39081 43.466612,19.960741 C 43.466612,10.530672 37.578724,4.1232594 30.27396,4.1232594 z "
+ style="fill:#bfdce8;fill-opacity:1"
+ id="path142" />
+ <path
+ d="M 19.200879,5.5648899 C 12.490241,5.5648899 7.0622987,11.295775 7.0622987,19.690323 C 7.0622987,22.890926 7.8418023,25.879852 9.1910836,28.332288 C 8.6113289,26.599889 8.2852163,24.667826 8.2852163,22.673336 C 8.2852163,14.629768 13.495502,9.1620492 19.925575,9.1620492 L 30.071259,9.1620492 C 36.515213,9.1620492 41.711609,14.616311 41.711609,22.673336 C 41.864688,21.709218 41.983366,20.710908 41.983366,19.690323 C 41.983366,11.281743 36.524624,5.5648899 29.799492,5.5648899 L 19.200879,5.5648899 z "
+ style="fill:#ffffff"
+ id="path2358" />
+ <path
+ d="M 28.241965,33.725087 L 20.792252,33.725087 C 16.073756,33.725087 12.241894,32.944782 12.241894,26.850486 C 12.241894,25.10387 12.368512,23.572125 15.515722,23.567487 L 33.508301,23.540969 C 36.182481,23.537028 36.782127,24.950794 36.782127,26.850486 C 36.782127,32.95497 32.970649,33.725087 28.241965,33.725087 z "
+ style="fill:#d0ecf9;fill-opacity:1"
+ id="path2173" />
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/pacemaker-logo.png b/doc/publican-clusterlabs/en-US/images/pacemaker-logo.png
new file mode 100644
index 0000000000..f5ea2c68a3
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/pacemaker-logo.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/pacemaker_logo.svg b/doc/publican-clusterlabs/en-US/images/pacemaker_logo.svg
new file mode 100644
index 0000000000..a86bea1b06
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/pacemaker_logo.svg
@@ -0,0 +1,145 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2886"
+ version="1.1"
+ inkscape:version="0.46+devel"
+ width="230"
+ height="145"
+ sodipodi:docname="title_logo.svg"
+ style="display:inline">
+ <metadata
+ id="metadata2892">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs2890">
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient3701">
+ <stop
+ style="stop-color:#6289bd;stop-opacity:1;"
+ offset="0"
+ id="stop3703" />
+ <stop
+ style="stop-color:#6289bd;stop-opacity:0;"
+ offset="1"
+ id="stop3705" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ id="perspective2894" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3701"
+ id="linearGradient3707"
+ x1="451.97369"
+ y1="895.57751"
+ x2="1263.8524"
+ y2="83.698822"
+ gradientUnits="userSpaceOnUse" />
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1462"
+ inkscape:window-height="974"
+ id="namedview2888"
+ showgrid="false"
+ inkscape:zoom="3.4736842"
+ inkscape:cx="85.212121"
+ inkscape:cy="78.235112"
+ inkscape:window-x="204"
+ inkscape:window-y="25"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="layer1">
+ <sodipodi:guide
+ position="3.7424242,29.939394"
+ orientation="0,285"
+ id="guide3677" />
+ <sodipodi:guide
+ position="284.42424,175.89394"
+ orientation="-180,0"
+ id="guide3679" />
+ <sodipodi:guide
+ position="285,180"
+ orientation="0,-285"
+ id="guide3681" />
+ <sodipodi:guide
+ position="0,180"
+ orientation="180,0"
+ id="guide3683" />
+ </sodipodi:namedview>
+ <g
+ inkscape:groupmode="layer"
+ id="layer6"
+ inkscape:label="ECG Native"
+ style="display:inline">
+ <path
+ style="fill:none;stroke:#4dae10;stroke-width:3.44250011;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
+ d="m 4.4067411,108.17103 31.5251669,0 c 0,0 14.237168,-12.881264 21.355755,-11.864314 10.575101,0.94188 12.74028,11.501184 14.576153,11.864314 l 24.067596,0.33898 7.796548,14.23717 7.45757,-119.6600386 8.13552,134.9141486 7.79655,-29.83026 29.83025,0 c 0,0 16.61004,-19.660864 23.38965,-19.660864 6.77961,0 15.25411,19.660864 15.25411,19.660864 l 30.50822,0"
+ id="path3779"
+ sodipodi:nodetypes="ccccccccccscc" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer1"
+ inkscape:label="Circle"
+ style="display:inline">
+ <path
+ sodipodi:type="arc"
+ style="fill:url(#linearGradient3707);fill-opacity:1"
+ id="path3699"
+ sodipodi:cx="857.91302"
+ sodipodi:cy="489.63815"
+ sodipodi:rx="405.93933"
+ sodipodi:ry="405.93933"
+ d="m 1263.8524,489.63815 a 405.93933,405.93933 0 1 1 -811.87871,0 405.93933,405.93933 0 1 1 811.87871,0 z"
+ transform="matrix(0.12859795,0,0,0.12859795,3.2325424,27.916571)" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer4"
+ inkscape:label="Pacemaker"
+ style="display:inline">
+ <text
+ xml:space="preserve"
+ style="font-size:8.10000038px;font-style:normal;font-weight:normal;line-height:125%;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+ x="0"
+ y="85.459358"
+ id="text2919"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan2921"
+ x="0"
+ y="85.459358"
+ style="font-size:38.88000107px;font-style:normal;font-variant:normal;font-weight:200;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:#bd4c4b;fill-opacity:1;font-family:Dakota;-inkscape-font-specification:Dakota Thin">Pacemaker</tspan></text>
+ </g>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/shine.png b/doc/publican-clusterlabs/en-US/images/shine.png
new file mode 100644
index 0000000000..a18f7c4612
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/shine.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/stock-go-back.png b/doc/publican-clusterlabs/en-US/images/stock-go-back.png
new file mode 100644
index 0000000000..00850b21b2
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/stock-go-back.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/stock-go-back.svg b/doc/publican-clusterlabs/en-US/images/stock-go-back.svg
new file mode 100644
index 0000000000..b3b908143f
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/stock-go-back.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="22"
+ height="22"
+ id="svgstock-go-back">
+ <defs
+ id="defsstock-go-back" />
+ <text
+ x="11"
+ y="11"
+ transform="scale(0.89833804,1.1131667)"
+ id="textstock-go-back"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="13"
+ y="13"
+ id="tspanstock-go-back">stock-go-back</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/stock-go-forward.png b/doc/publican-clusterlabs/en-US/images/stock-go-forward.png
new file mode 100644
index 0000000000..cc2797a468
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/stock-go-forward.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/stock-go-forward.svg b/doc/publican-clusterlabs/en-US/images/stock-go-forward.svg
new file mode 100644
index 0000000000..7ee607b48f
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/stock-go-forward.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="22"
+ height="22"
+ id="svgstock-go-forward">
+ <defs
+ id="defsstock-go-forward" />
+ <text
+ x="11"
+ y="11"
+ transform="scale(0.89833804,1.1131667)"
+ id="textstock-go-forward"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="13"
+ y="13"
+ id="tspanstock-go-forward">stock-go-forward</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/stock-go-up.png b/doc/publican-clusterlabs/en-US/images/stock-go-up.png
new file mode 100644
index 0000000000..1ebf2799c3
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/stock-go-up.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/stock-go-up.svg b/doc/publican-clusterlabs/en-US/images/stock-go-up.svg
new file mode 100644
index 0000000000..a7c2af2f15
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/stock-go-up.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="22"
+ height="22"
+ id="svgstock-go-up">
+ <defs
+ id="defsstock-go-up" />
+ <text
+ x="11"
+ y="11"
+ transform="scale(0.89833804,1.1131667)"
+ id="textstock-go-up"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="13"
+ y="13"
+ id="tspanstock-go-up">stock-go-up</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/stock-home.png b/doc/publican-clusterlabs/en-US/images/stock-home.png
new file mode 100644
index 0000000000..3f0c190634
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/stock-home.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/stock-home.svg b/doc/publican-clusterlabs/en-US/images/stock-home.svg
new file mode 100644
index 0000000000..d06a54480e
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/stock-home.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="22"
+ height="22"
+ id="svgstock-home">
+ <defs
+ id="defsstock-home" />
+ <text
+ x="11"
+ y="11"
+ transform="scale(0.89833804,1.1131667)"
+ id="textstock-home"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="13"
+ y="13"
+ id="tspanstock-home">stock-home</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/title_logo.png b/doc/publican-clusterlabs/en-US/images/title_logo.png
new file mode 100644
index 0000000000..aa7915a690
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/title_logo.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/title_logo.svg b/doc/publican-clusterlabs/en-US/images/title_logo.svg
new file mode 100644
index 0000000000..a86bea1b06
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/title_logo.svg
@@ -0,0 +1,145 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2886"
+ version="1.1"
+ inkscape:version="0.46+devel"
+ width="230"
+ height="145"
+ sodipodi:docname="title_logo.svg"
+ style="display:inline">
+ <metadata
+ id="metadata2892">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs2890">
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient3701">
+ <stop
+ style="stop-color:#6289bd;stop-opacity:1;"
+ offset="0"
+ id="stop3703" />
+ <stop
+ style="stop-color:#6289bd;stop-opacity:0;"
+ offset="1"
+ id="stop3705" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ id="perspective2894" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3701"
+ id="linearGradient3707"
+ x1="451.97369"
+ y1="895.57751"
+ x2="1263.8524"
+ y2="83.698822"
+ gradientUnits="userSpaceOnUse" />
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1462"
+ inkscape:window-height="974"
+ id="namedview2888"
+ showgrid="false"
+ inkscape:zoom="3.4736842"
+ inkscape:cx="85.212121"
+ inkscape:cy="78.235112"
+ inkscape:window-x="204"
+ inkscape:window-y="25"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="layer1">
+ <sodipodi:guide
+ position="3.7424242,29.939394"
+ orientation="0,285"
+ id="guide3677" />
+ <sodipodi:guide
+ position="284.42424,175.89394"
+ orientation="-180,0"
+ id="guide3679" />
+ <sodipodi:guide
+ position="285,180"
+ orientation="0,-285"
+ id="guide3681" />
+ <sodipodi:guide
+ position="0,180"
+ orientation="180,0"
+ id="guide3683" />
+ </sodipodi:namedview>
+ <g
+ inkscape:groupmode="layer"
+ id="layer6"
+ inkscape:label="ECG Native"
+ style="display:inline">
+ <path
+ style="fill:none;stroke:#4dae10;stroke-width:3.44250011;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
+ d="m 4.4067411,108.17103 31.5251669,0 c 0,0 14.237168,-12.881264 21.355755,-11.864314 10.575101,0.94188 12.74028,11.501184 14.576153,11.864314 l 24.067596,0.33898 7.796548,14.23717 7.45757,-119.6600386 8.13552,134.9141486 7.79655,-29.83026 29.83025,0 c 0,0 16.61004,-19.660864 23.38965,-19.660864 6.77961,0 15.25411,19.660864 15.25411,19.660864 l 30.50822,0"
+ id="path3779"
+ sodipodi:nodetypes="ccccccccccscc" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer1"
+ inkscape:label="Circle"
+ style="display:inline">
+ <path
+ sodipodi:type="arc"
+ style="fill:url(#linearGradient3707);fill-opacity:1"
+ id="path3699"
+ sodipodi:cx="857.91302"
+ sodipodi:cy="489.63815"
+ sodipodi:rx="405.93933"
+ sodipodi:ry="405.93933"
+ d="m 1263.8524,489.63815 a 405.93933,405.93933 0 1 1 -811.87871,0 405.93933,405.93933 0 1 1 811.87871,0 z"
+ transform="matrix(0.12859795,0,0,0.12859795,3.2325424,27.916571)" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer4"
+ inkscape:label="Pacemaker"
+ style="display:inline">
+ <text
+ xml:space="preserve"
+ style="font-size:8.10000038px;font-style:normal;font-weight:normal;line-height:125%;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+ x="0"
+ y="85.459358"
+ id="text2919"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan2921"
+ x="0"
+ y="85.459358"
+ style="font-size:38.88000107px;font-style:normal;font-variant:normal;font-weight:200;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:#bd4c4b;fill-opacity:1;font-family:Dakota;-inkscape-font-specification:Dakota Thin">Pacemaker</tspan></text>
+ </g>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/warning.png b/doc/publican-clusterlabs/en-US/images/warning.png
new file mode 100644
index 0000000000..94b69d1ff1
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/warning.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/warning.svg b/doc/publican-clusterlabs/en-US/images/warning.svg
new file mode 100644
index 0000000000..4231e5ac04
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/warning.svg
@@ -0,0 +1,130 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.0"
+ width="48"
+ height="48"
+ id="svg5921"
+ sodipodi:version="0.32"
+ inkscape:version="0.46"
+ sodipodi:docname="warning.svg"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape">
+ <metadata
+ id="metadata11">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ inkscape:window-height="975"
+ inkscape:window-width="1680"
+ inkscape:pageshadow="2"
+ inkscape:pageopacity="0.0"
+ guidetolerance="10.0"
+ gridtolerance="10.0"
+ objecttolerance="10.0"
+ borderopacity="1.0"
+ bordercolor="#666666"
+ pagecolor="#ffffff"
+ id="base"
+ showgrid="false"
+ inkscape:zoom="1"
+ inkscape:cx="49.390126"
+ inkscape:cy="6.0062258"
+ inkscape:window-x="0"
+ inkscape:window-y="25"
+ inkscape:current-layer="svg5921" />
+ <defs
+ id="defs5923">
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient2400">
+ <stop
+ style="stop-color:#fac521;stop-opacity:1;"
+ offset="0"
+ id="stop2402" />
+ <stop
+ style="stop-color:#fde7a3;stop-opacity:1"
+ offset="1"
+ id="stop2404" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 20 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="40 : 20 : 1"
+ inkscape:persp3d-origin="20 : 13.333333 : 1"
+ id="perspective13" />
+ <inkscape:perspective
+ id="perspective2396"
+ inkscape:persp3d-origin="24 : 16 : 1"
+ inkscape:vp_z="48 : 24 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 24 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective2394"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2400"
+ id="linearGradient2406"
+ x1="-2684.8242"
+ y1="1639.8413"
+ x2="-2684.8242"
+ y2="1587.1559"
+ gradientUnits="userSpaceOnUse" />
+ </defs>
+ <g
+ transform="matrix(0.4536635,0,0,0.4536635,-5.1836431,-4.6889387)"
+ id="layer1">
+ <g
+ transform="translate(2745.6887,-1555.5977)"
+ id="g8304"
+ style="enable-background:new" />
+ </g>
+ <g
+ id="g3189"
+ transform="matrix(1.2987724,0,0,1.2987724,-1.4964485,-1.8271549)">
+ <path
+ style="opacity:1;fill:#2e3436;fill-opacity:1;stroke:none;stroke-opacity:1;enable-background:new"
+ id="path8034"
+ transform="matrix(0.3735251,4.0822414e-3,-4.0822414e-3,0.3735251,605.96125,-374.33682)"
+ d="M -1603,1054.4387 L -1577.0919,1027.891 L -1540,1027.4387 L -1513.4523,1053.3468 L -1513,1090.4387 L -1538.9081,1116.9864 L -1576,1117.4387 L -1602.5477,1091.5306 L -1603,1054.4387 z" />
+ <path
+ style="opacity:1;fill:url(#linearGradient2406);fill-opacity:1;stroke:none;stroke-width:0.72954363000000000;stroke-opacity:1;enable-background:new"
+ id="path8036"
+ d="M -2723.6739,1596.2775 L -2704.5623,1577.1175 L -2677.5001,1577.0833 L -2658.3401,1596.1949 L -2658.3059,1623.257 L -2677.4175,1642.417 L -2704.4797,1642.4513 L -2723.6396,1623.3396 L -2723.6739,1596.2775 z"
+ transform="matrix(0.4536635,0,0,0.4536635,1240.4351,-710.40684)" />
+ <path
+ transform="translate(6.7837002e-6,-8.8630501e-6)"
+ id="path3178"
+ d="M 13.46875,5.0625 L 4.8125,13.78125 L 4.8125,16.625 L 13.46875,7.9375 L 25.75,7.90625 L 34.4375,16.59375 L 34.4375,13.71875 L 25.75,5.0625 L 13.46875,5.0625 z"
+ style="opacity:1;fill:#fde8a6;fill-opacity:1;stroke:none;stroke-width:0.72954363;stroke-opacity:1;enable-background:new" />
+ <path
+ id="path4412"
+ style="fill:#fef2cb;fill-opacity:1;stroke:#fef2cb;stroke-width:0.9430126;stroke-linecap:round;stroke-linejoin:round;stroke-opacity:1"
+ d="M 23.308501,28.806303 C 23.308501,30.239154 22.087319,31.313792 20.231121,31.313792 L 20.198559,31.313792 C 18.358657,31.313792 17.121188,30.239154 17.121188,28.806303 C 17.121188,27.308327 18.391219,26.282537 20.231121,26.282537 C 22.054757,26.282537 23.27593,27.308327 23.308501,28.806303 z M 22.982851,24.507759 L 24.057489,11.351592 L 16.355915,11.351592 L 17.430553,24.507759 L 22.982851,24.507759 z" />
+ <path
+ id="path4414"
+ style="fill:#2e3436"
+ d="M 22.732962,27.86025 C 22.732962,29.293101 21.51178,30.36774 19.655592,30.36774 L 19.623029,30.36774 C 17.783118,30.36774 16.545659,29.293101 16.545659,27.86025 C 16.545659,26.362275 17.81568,25.336485 19.655592,25.336485 C 21.479218,25.336485 22.7004,26.362275 22.732962,27.86025 z M 22.407312,23.561697 L 23.48195,10.40553 L 15.780385,10.40553 L 16.855023,23.561697 L 22.407312,23.561697 z" />
+ </g>
+</svg>
diff --git a/doc/publican-clusterlabs/en-US/images/watermark-draft.png b/doc/publican-clusterlabs/en-US/images/watermark-draft.png
new file mode 100644
index 0000000000..0ead5af8bb
Binary files /dev/null and b/doc/publican-clusterlabs/en-US/images/watermark-draft.png differ
diff --git a/doc/publican-clusterlabs/en-US/images/watermark-draft.svg b/doc/publican-clusterlabs/en-US/images/watermark-draft.svg
new file mode 100644
index 0000000000..4ecbf37fec
--- /dev/null
+++ b/doc/publican-clusterlabs/en-US/images/watermark-draft.svg
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.0"
+ width="500"
+ height="500"
+ id="svgwatermark-draft">
+ <defs
+ id="defswatermark-draft" />
+ <text
+ x="250"
+ y="250"
+ transform="scale(0.89833804,1.1131667)"
+ id="textwatermark-draft"
+ xml:space="preserve"
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Liberation Serif;"><tspan
+ x="252"
+ y="252"
+ id="tspanwatermark-draft">watermark-draft</tspan></text>
+</svg>
diff --git a/doc/publican-clusterlabs/overrides.cfg b/doc/publican-clusterlabs/overrides.cfg
new file mode 100644
index 0000000000..4505371868
--- /dev/null
+++ b/doc/publican-clusterlabs/overrides.cfg
@@ -0,0 +1,5 @@
+# Config::Simple 4.59
+# Thu Nov 12 09:56:27 2009
+
+strict: 0
+
diff --git a/doc/publican-clusterlabs/publican-clusterlabs.spec b/doc/publican-clusterlabs/publican-clusterlabs.spec
new file mode 100644
index 0000000000..9d0cbe73a4
--- /dev/null
+++ b/doc/publican-clusterlabs/publican-clusterlabs.spec
@@ -0,0 +1,43 @@
+%define brand clusterlabs
+
+Name: publican-clusterlabs
+Summary: Common documentation files for %{brand}
+Version: 0.1
+Release: 0%{?dist}
+License: SETUP: Set This
+Group: Applications/Text
+Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Buildarch: noarch
+Source: https://www.SETUP.set.me.example.com/source/%{name}-%{version}.tgz
+Requires: publican >= 1.0
+BuildRequires: publican >= 1.0
+URL: https://www.SETUP.set.me.example.com
+
+%description
+This package provides common files and templates needed to build documentation
+for %{brand} with publican.
+
+%prep
+%setup -q
+
+%build
+publican build --formats=xml --langs=all --publish
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p -m755 $RPM_BUILD_ROOT%{_datadir}/publican/Common_Content
+publican installbrand --path=$RPM_BUILD_ROOT%{_datadir}/publican/Common_Content
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+%doc README
+%doc COPYING
+%{_datadir}/publican/Common_Content/%{brand}
+
+%changelog
+* Thu Nov 12 2009 SETUP:YourName <SETUP:your.email@example.com> 0.1
+- Created Brand
+
diff --git a/doc/publican-clusterlabs/publican.cfg b/doc/publican-clusterlabs/publican.cfg
new file mode 100644
index 0000000000..f48c15064a
--- /dev/null
+++ b/doc/publican-clusterlabs/publican.cfg
@@ -0,0 +1,9 @@
+# Config::Simple 4.59
+# Thu Nov 12 09:56:27 2009
+
+version: 0.1
+xml_lang: en-US
+release: 0
+type: brand
+brand: clusterlabs
+
diff --git a/doc/publican-clusterlabs/xsl/common.xsl b/doc/publican-clusterlabs/xsl/common.xsl
new file mode 100644
index 0000000000..7e4daca81f
--- /dev/null
+++ b/doc/publican-clusterlabs/xsl/common.xsl
@@ -0,0 +1,57 @@
+<?xml version='1.0'?>
+
+<!--
+ Copyright 2009 Andrew Beekhof
+ License: GPL
+ Author: Andrew Beekhof <andrew@beekhof.net>
+-->
+
+<!DOCTYPE xsl:stylesheet [
+<!ENTITY lowercase "'abcdefghijklmnopqrstuvwxyz'">
+<!ENTITY uppercase "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'">
+ ]>
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'
+ xmlns="http://www.w3.org/TR/xhtml1/transitional"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ exclude-result-prefixes="#default">
+
+<xsl:param name="title.color">#843A39</xsl:param>
+<!-- http://docbook.sourceforge.net/release/xsl/current/doc/html/generate.toc.html -->
+<xsl:param name="generate.toc">
+appendix toc,title
+article/appendix nop
+article toc,title
+book toc,title,figure,table,example,equation
+chapter toc,title
+part toc,title
+preface toc,title
+qandadiv nop
+qandaset nop
+reference toc,title
+sect1 nop
+sect2 nop
+sect3 nop
+sect4 nop
+sect5 nop
+section nop
+set toc,title
+<!-- publican defaults
+set toc
+book toc,qandadiv
+article toc
+chapter nop
+qandadiv nop
+qandaset nop
+sect1 nop
+sect2 nop
+sect3 nop
+sect4 nop
+sect5 nop
+section nop
+part nop
+-->
+</xsl:param>
+
+</xsl:stylesheet>
diff --git a/doc/publican-clusterlabs/xsl/html-single.xsl b/doc/publican-clusterlabs/xsl/html-single.xsl
new file mode 100644
index 0000000000..c55bf35db5
--- /dev/null
+++ b/doc/publican-clusterlabs/xsl/html-single.xsl
@@ -0,0 +1,24 @@
+<?xml version='1.0'?>
+
+<!--
+ Copyright 2009 Andrew Beekhof
+ License: GPL
+ Author: Andrew Beekhof <andrew@beekhof.net>
+-->
+
+<!DOCTYPE xsl:stylesheet [
+<!ENTITY lowercase "'abcdefghijklmnopqrstuvwxyz'">
+<!ENTITY uppercase "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'">
+ ]>
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'
+ xmlns="http://www.w3.org/TR/xhtml1/transitional"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ exclude-result-prefixes="#default">
+
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/docbook.xsl"/>
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/graphics.xsl"/>
+<xsl:import href="../../../xsl/html-single.xsl"/>
+<xsl:import href="common.xsl"/>
+</xsl:stylesheet>
diff --git a/doc/publican-clusterlabs/xsl/html.xsl b/doc/publican-clusterlabs/xsl/html.xsl
new file mode 100644
index 0000000000..847a595da3
--- /dev/null
+++ b/doc/publican-clusterlabs/xsl/html.xsl
@@ -0,0 +1,30 @@
+<?xml version='1.0'?>
+
+<!--
+ Copyright 2009 Andrew Beekhof
+ License: GPL
+ Author: Andrew Beekhof <andrew@beekhof.net>
+-->
+
+<!DOCTYPE xsl:stylesheet [
+<!ENTITY lowercase "'abcdefghijklmnopqrstuvwxyz'">
+<!ENTITY uppercase "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'">
+ ]>
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'
+ xmlns="http://www.w3.org/TR/xhtml1/transitional"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ exclude-result-prefixes="#default">
+
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/docbook.xsl"/>
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/graphics.xsl"/>
+<xsl:import href="../../../xsl/html.xsl"/>
+<xsl:import href="common.xsl"/>
+
+<xsl:template name="user.head.content">
+ <xsl:variable name="codefile" select="document('header.html',/)"/>
+ <xsl:copy-of select="$codefile/htmlcode/node()"/>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/doc/publican-clusterlabs/xsl/pdf.xsl b/doc/publican-clusterlabs/xsl/pdf.xsl
new file mode 100644
index 0000000000..5d2f25c339
--- /dev/null
+++ b/doc/publican-clusterlabs/xsl/pdf.xsl
@@ -0,0 +1,26 @@
+<?xml version='1.0'?>
+
+<!--
+ Copyright 2009 Andrew Beekhof
+ License: GPL
+ Author: Andrew Beekhof <andrew@beekhof.net>
+-->
+
+<!DOCTYPE xsl:stylesheet [
+<!ENTITY lowercase "'abcdefghijklmnopqrstuvwxyz'">
+<!ENTITY uppercase "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'">
+ ]>
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'
+ xmlns="http://www.w3.org/TR/xhtml1/transitional"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ exclude-result-prefixes="#default">
+
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/docbook.xsl"/>
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/graphics.xsl"/>
+<xsl:import href="../../../xsl/pdf.xsl"/>
+<xsl:import href="common.xsl"/>
+<xsl:param name="admon.graphics.extension" select="'.svg'"/>
+
+</xsl:stylesheet>
diff --git a/extra/resources/HealthSMART b/extra/resources/HealthSMART
index 6a0801fc42..2c78a87b30 100644
--- a/extra/resources/HealthSMART
+++ b/extra/resources/HealthSMART
@@ -1,278 +1,287 @@
#!/bin/sh
#
#
# HealthSMART OCF RA. Checks the S.M.A.R.T. status of all given
# drives and writes the #health-smart status into the CIB
#
# Copyright (c) 2009 Michael Schwartzkopff
#
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
################################
#
# TODO:
# - All
# - Enable drive parameter with a loop.
# - Error handling if smart does not give temeprature.
#
##################################
#######################################################################
# Initialization:
. ${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs
#
SMARTCTL=/usr/sbin/smartctl
#######################################################################
meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="HealthSMART" version="0.1">
<version>0.1</version>
<longdesc lang="en">
Systhem health agent that checks the S.M.A.R.T. status of the given drives and
updates the #health-smart attribute.
</longdesc>
<shortdesc lang="en">SMART health status</shortdesc>
<parameters>
<parameter name="state" unique="1">
<longdesc lang="en">
Location to store the resource state in.
</longdesc>
<shortdesc lang="en">State file</shortdesc>
<content type="string" default="${HA_VARRUN}/health-smart-{OCF_RESOURCE_INSTANCE}.state" />
</parameter>
<parameter name="drives" unique="1">
<longdesc lang="en">
The drives to check as a SPACE separated list. Enter only the part after the "/dev/" i.e. "sda".
At the moment /dev/sda is hard coded. Sorry.
</longdesc>
<shortdesc lang="en">Drives to check</shortdesc>
<content type="string" default="sda" />
</parameter>
<parameter name="temp_lower_limit" unique="1">
<longdesc lang="en">
Lower limit of the temperature in deg C of the drive(s). Below this limit there status will be red. The yellow limit is 5 deg C more than this value.
</longdesc>
<shortdesc lang="en">Lower limit for the temperature of the drive(s)</shortdesc>
<content type="string" default="0"/>
</parameter>
<parameter name="temp_upper_limit" unique="1">
<longdesc lang="en">
Upper limit of the temperature if deg C of the drives(s). If the drive reports
a temperature higher than this value the status of #health-smart will be red.
The yellow limit is 5 deg C below this value.
</longdesc>
<shortdesc lang="en">Upper limit for red smart attribute</shortdesc>
<content type="string" default="60"/>
</parameter>
</parameters>
<actions>
<action name="start" timeout="10" />
<action name="stop" timeout="10" />
<action name="monitor" timeout="10" interval="10" start-delay="0" />
<action name="meta-data" timeout="5" />
<action name="validate-all" timeout="10" />
</actions>
</resource-agent>
END
}
#######################################################################
+init_smart() {
+
+ if [ "x${OCF_RESKEY_temp_lower_limit}" = "x" ] ; then
+ lower_red_limit=0
+ lower_yellow_limit=5
+ else
+ lower_red_limit=${OCF_RESKEY_temp_lower_limit}
+ let lower_yellow_limit=${OCF_RESKEY_temp_lower_limit}+5
+ fi
+
+ if [ "x${OCF_RESKEY_temp_upper_limit}" = "x" ] ; then
+ upper_red_limit=60
+ upper_yellow_limit=55
+ else
+ upper_red_limit=${OCF_RESKEY_temp_upper_limit}
+ let upper_yellow_limit=${OCF_RESKEY_temp_upper_limit}-5
+ fi
+
+ if [ "x${OCF_RESKEY_drives}" = "x" ] ; then
+ DRIVES="sda"
+ else
+ DRIVES=${OCF_RESKEY_drives}
+ fi
+
+ # echo "Drives: "$DRIVES, "Lower limits: "$lower_red_limit, $lower_yellow_limit, "Upper limits: "$upper_red_limit, $upper_yellow_limit
+
+ if [ ! -x $SMARTCTL ] ; then
+ ocf_log err $SMARTCTL" not installed."
+ exit $OCF_ERR_INSTALLED
+ fi
+
+ $SMARTCTL -i $DRIVE | grep -q "SMART support is: Enabled"
+ ret=$?
+ if [ $ret -ne "0" ] ; then
+ ocf_log err "S.M.A.R.T. not enabled for drive /dev/"${DRIVE}
+ exit $OCF_ERR_INSTALLED
+ fi
+
+}
+
# don't exit on TERM, to test that lrmd makes sure that we do exit
trap sigterm_handler TERM
sigterm_handler() {
ocf_log info "They use TERM to bring us down. No such luck."
return
}
dummy_usage() {
cat <<END
usage: $0 {start|stop|monitor|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
dummy_start() {
dummy_monitor
if [ $? = $OCF_SUCCESS ]; then
return $OCF_SUCCESS
fi
touch ${OCF_RESKEY_state}
}
dummy_stop() {
dummy_monitor
if [ $? = $OCF_SUCCESS ]; then
rm ${OCF_RESKEY_state}
fi
return $OCF_SUCCESS
}
dummy_monitor() {
+
+ init_smart
+
# Monitor _MUST!_ differentiate correctly between running
# (SUCCESS), failed (ERROR) or _cleanly_ stopped (NOT RUNNING).
# That is THREE states, not just yes/no.
if [ -f ${OCF_RESKEY_state} ]; then
# Check overall S.M.A.R.T. status
#
$SMARTCTL -H /dev/sda | grep -q "SMART overall-health self-assessment test result: PASSED"
if [ $ret -ne "0" ]; then
/usr/sbin/attrd_updater -n "#health-smart" -U "red" -d "5s"
return $OCF_SUCCESS
fi
# Check drive temperature
#
TEMP=`$SMARTCTL -A /dev/sda | awk '/^194/ { print $10 }'`
echo "Temp = "$TEMP
if [[ ${TEMP} -lt ${lower_red_limit} ]] ; then
ocf_log info "Drive /dev/sda too cold."
attrd_updater -n "#health-smart" -U "red" -d "5s"
return $OCF_SUCCESS
fi
if [[ $TEMP -gt ${upper_red_limit} ]] ; then
ocf_log info "Drive /dev/sda too hot."
attrd_updater -n "#health-smart" -U "red" -d "5s"
return $OCF_SUCCESS
fi
if [[ $TEMP -lt ${lower_yellow_limit} ]] ; then
ocf_log info "Drive /dev/sda quite cold."
attrd_updater -n "#health-smart" -U "yellow" -d "5s"
return $OCF_SUCCESS
fi
if [[ $TEMP -gt ${upper_yellow_limit} ]] ; then
ocf_log info "Drive /dev/sda quite hot."
attrd_updater -n "#health-smart" -U "yellow" -d "5s"
return $OCF_SUCCESS
fi
attrd_updater -n "#health-smart" -U "green" -d "5s"
return $OCF_SUCCESS
fi
if false ; then
return $OCF_ERR_GENERIC
fi
return $OCF_NOT_RUNNING
}
dummy_validate() {
+
+ init_smart
# Is the state directory writable?
state_dir=`dirname "$OCF_RESKEY_state"`
touch "$state_dir/$$"
if [ $? != 0 ]; then
return $OCF_ERR_ARGS
fi
rm "$state_dir/$$"
return $OCF_SUCCESS
}
: ${OCF_RESKEY_CRM_meta_interval=0}
: ${OCF_RESKEY_CRM_meta_globally_unique:="true"}
if [ "x$OCF_RESKEY_state" = "x" ]; then
if [ ${OCF_RESKEY_CRM_meta_globally_unique} = "false" ]; then
state="${HA_VARRUN}/Dummy-${OCF_RESOURCE_INSTANCE}.state"
# Strip off the trailing clone marker
OCF_RESKEY_state=`echo $state | sed s/:[0-9][0-9]*\.state/.state/`
else
OCF_RESKEY_state="${HA_VARRUN}/Dummy-${OCF_RESOURCE_INSTANCE}.state"
fi
fi
-if [ "x${OCF_RESKEY_temp_lower_limit}" = "x" ] ; then
- lower_red_limit=0
- lower_yellow_limit=5
-else
- lower_red_limit=${OCF_RESKEY_temp_lower_limit}
- let lower_yellow_limit=${OCF_RESKEY_temp_lower_limit}+5
-fi
-
-if [ "x${OCF_RESKEY_temp_upper_limit}" = "x" ] ; then
- upper_red_limit=60
- upper_yellow_limit=55
-else
- upper_red_limit=${OCF_RESKEY_temp_upper_limit}
- let upper_yellow_limit=${OCF_RESKEY_temp_upper_limit}-5
-fi
-
-if [ "x${OCF_RESKEY_drives}" = "x" ] ; then
- DRIVES="sda"
-else
- DRIVES=${OCF_RESKEY_drives}
-fi
-
-echo "Drives: "$DRIVES, "Lower limits: "$lower_red_limit, $lower_yellow_limit, "Upper limits: "$upper_red_limit, $upper_yellow_limit
-
-if [ ! -x $SMARTCTL ] ; then
- ocf_log err $SMARTCTL" not installed."
- exit $OCF_ERR_INSTALLED
-fi
-
-$SMARTCTL -i $DRIVE | grep -q "SMART support is: Enabled"
-ret=$?
-if [ $ret -ne "0" ] ; then
- ocf_log err "S.M.A.R.T. not enabled for drive /dev/"${DRIVE}
- exit $OCF_ERR_INSTALLED
-fi
-
case $__OCF_ACTION in
meta-data) meta_data
exit $OCF_SUCCESS
;;
start) dummy_start;;
stop) dummy_stop;;
monitor) dummy_monitor;;
validate-all) dummy_validate;;
usage|help) dummy_usage
exit $OCF_SUCCESS
;;
*) dummy_usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
rc=$?
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
exit $rc
diff --git a/extra/resources/ping b/extra/resources/ping
index 5be18bc3a5..541c279863 100755
--- a/extra/resources/ping
+++ b/extra/resources/ping
@@ -1,301 +1,299 @@
#!/bin/sh
#
#
# Ping OCF RA that utilizes the system ping
#
# Copyright (c) 2009 Andrew Beekhof
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
#######################################################################
# Initialization:
. ${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs
#######################################################################
meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="ping">
<version>1.0</version>
<longdesc lang="en">
Every time the monitor action is run, this resource agent records (in the CIB) the current number of ping nodes the host can connect to.
It is essentially the same as pingd except that it uses the system ping tool to obtain the results.
</longdesc>
<shortdesc lang="en">node connectivity</shortdesc>
<parameters>
<parameter name="pidfile" unique="0">
<longdesc lang="en">PID file</longdesc>
<shortdesc lang="en">PID file</shortdesc>
<content type="string" default="$HA_VARRUN/ping-${OCF_RESOURCE_INSTANCE}" />
</parameter>
<parameter name="dampen" unique="0">
<longdesc lang="en">
The time to wait (dampening) further changes occur
</longdesc>
<shortdesc lang="en">Dampening interval</shortdesc>
<content type="integer" default="5s"/>
</parameter>
<parameter name="name" unique="0">
<longdesc lang="en">
The name of the attributes to set. This is the name to be used in the constraints.
</longdesc>
<shortdesc lang="en">Attribute name</shortdesc>
<content type="integer" default="${OCF_RESOURCE_INSTANCE}"/>
</parameter>
<parameter name="multiplier" unique="0">
<longdesc lang="en">
The number by which to multiply the number of connected ping nodes by
</longdesc>
<shortdesc lang="en">Value multiplier</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="host_list" unique="0">
<longdesc lang="en">
The list of ping nodes to count. Defaults to all configured ping nodes. Rarely needs to be specified.
</longdesc>
<shortdesc lang="en">Host list</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="attempts" unique="0">
<longdesc lang="en">
Number of ping attempts, per host, before declaring it dead
</longdesc>
<shortdesc lang="en">no. of ping attempts</shortdesc>
<content type="integer" default="2"/>
</parameter>
<parameter name="timeout" unique="0">
<longdesc lang="en">
How long, in seconds, to wait before declaring a ping lost
</longdesc>
<shortdesc lang="en">ping timeout in seconds</shortdesc>
<content type="integer" default="2"/>
</parameter>
<parameter name="options" unique="0">
<longdesc lang="en">
A catch all for any other options that need to be passed to ping.
</longdesc>
<shortdesc lang="en">Extra Options</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="debug" unique="0">
<longdesc lang="en">
Enables to use default attrd_updater verbose logging on every call.
</longdesc>
<shortdesc lang="en">Verbose logging</shortdesc>
<content type="string" default="false"/>
</parameter>
</parameters>
<actions>
-<action name="start" timeout="90" />
-<action name="stop" timeout="100" />
+<action name="start" timeout="60" />
+<action name="stop" timeout="20" />
<action name="reload" timeout="100" />
-<action name="monitor" depth="0" timeout="20" interval="10"/>
+<action name="monitor" depth="0" timeout="60" interval="10"/>
<action name="meta-data" timeout="5" />
<action name="validate-all" timeout="30" />
</actions>
</resource-agent>
END
}
#######################################################################
# don't exit on TERM, to test that lrmd makes sure that we do exit
trap sigterm_handler TERM
sigterm_handler() {
ocf_log info "They use TERM to bring us down. No such luck."
return
}
ping_usage() {
cat <<END
usage: $0 {start|stop|monitor|migrate_to|migrate_from|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
ping_start() {
ping_monitor
if [ $? = $OCF_SUCCESS ]; then
return $OCF_SUCCESS
fi
touch ${OCF_RESKEY_pidfile}
ping_update
}
ping_stop() {
- ping_monitor
- if [ $? = $OCF_SUCCESS ]; then
- rm ${OCF_RESKEY_pidfile}
- fi
+
+ rm -f ${OCF_RESKEY_pidfile}
attrd_updater -D -n $OCF_RESKEY_name -d $OCF_RESKEY_dampen $attrd_options
return $OCF_SUCCESS
}
ping_monitor() {
if [ -f ${OCF_RESKEY_pidfile} ]; then
ping_update
return $OCF_SUCCESS
fi
return $OCF_NOT_RUNNING
}
ping_validate() {
# Is the state directory writable?
state_dir=`dirname "$OCF_RESKEY_pidfile"`
touch "$state_dir/$$"
if [ $? != 0 ]; then
ocf_log err "Invalid location for 'state': $state_dir is not writable"
return $OCF_ERR_ARGS
fi
rm "$state_dir/$$"
# Pidfile better be an absolute path
case $OCF_RESKEY_pidfile in
/*) ;;
*) ocf_log warn "You should use an absolute path for pidfile not: $OCF_RESKEY_pidfile" ;;
esac
# Check the ping interval
if ocf_is_decimal "$OCF_RESKEY_CRM_meta_interval" && [ $OCF_RESKEY_CRM_meta_interval -gt 0 ]; then
:
else
ocf_log err "Invalid ping interval $OCF_RESKEY_interval. It should be positive integer!"
exit $OCF_ERR_CONFIGURED
fi
# Check the host list
if [ "x" = "x$OCF_RESKEY_host_list" ]; then
ocf_log err "Empty host_list. Please specify some nodes to ping"
exit $OCF_ERR_CONFIGURED
fi
# Check the debug option
if [ "x" = "x${OCF_RESKEY_debug}" ]; then
ocf_log err "Debug option not specified. Please specify 'true' or 'false'"
exit $OCF_ERR_CONFIGURED
elif [ "${OCF_RESKEY_debug}" != "true" -a "${OCF_RESKEY_debug}" != "false" ]; then
ocf_log err "Debug option specified is not correct. Please specify 'true' or 'false'"
exit $OCF_ERR_CONFIGURED
fi
check_binary ping
return $OCF_SUCCESS
}
ping_update() {
active=0
for host in $OCF_RESKEY_host_list; do
p_exe=ping
case `uname` in
Linux) p_args="-n -q -W $OCF_RESKEY_timeout -c $OCF_RESKEY_attempts";;
Darwin) p_args="-n -q -t $OCF_RESKEY_timeout -c $OCF_RESKEY_attempts -o";;
*) ocf_log err "Unknown host type: `uname`"; exit $OCF_ERR_INSTALLED;;
esac
case $host in
*:*) p_exe=ping6
esac
p_out=`$p_exe $p_args $OCF_RESKEY_options $host 2>&1`; rc=$?
case $rc in
0) active=`expr $active + 1`;;
1) ocf_log debug "$host is inactive: $p_out";;
*) ocf_log err "Unexpected result for '$p_exe $p_args $OCF_RESKEY_options $host' $rc: $p_out";;
esac
done
score=`expr $active \* $OCF_RESKEY_multiplier`
attrd_updater -n $OCF_RESKEY_name -v $score -d $OCF_RESKEY_dampen $attrd_options
}
: ${OCF_RESKEY_name:="pingd"}
: ${OCF_RESKEY_dampen:="5s"}
: ${OCF_RESKEY_attempts:="5"}
: ${OCF_RESKEY_multiplier:="1"}
: ${OCF_RESKEY_debug:="false"}
-: ${OCF_RESKEY_CRM_meta_timeout=20}
-: ${OCF_RESKEY_CRM_meta_interval=10}
+: ${OCF_RESKEY_CRM_meta_timeout:="60"}
+: ${OCF_RESKEY_CRM_meta_interval="10"}
: ${OCF_RESKEY_CRM_meta_globally_unique:="true"}
if [ -z ${OCF_RESKEY_timeout} ]; then
OCF_RESKEY_timeout=`expr $OCF_RESKEY_CRM_meta_timeout / $OCF_RESKEY_attempts`
OCF_RESKEY_timeout=`expr $OCF_RESKEY_timeout / 1100` # Convert to seconds and finish 10% early
fi
if [ ${OCF_RESKEY_timeout} -lt 1 ]; then
OCF_RESKEY_timeout=5
elif [ ${OCF_RESKEY_timeout} -gt 1000 ]; then
# ping actually complains if this value is too high, 5 minutes is plenty
OCF_RESKEY_timeout=300
fi
if [ ${OCF_RESKEY_CRM_meta_globally_unique} = "false" ]; then
: ${OCF_RESKEY_pidfile:="$HA_VARRUN/ping-${OCF_RESKEY_name}"}
else
: ${OCF_RESKEY_pidfile:="$HA_VARRUN/ping-${OCF_RESOURCE_INSTANCE}"}
fi
attrd_options='-q'
if ocf_is_true ${OCF_RESKEY_debug} ; then
attrd_options=''
fi
case $__OCF_ACTION in
meta-data) meta_data
exit $OCF_SUCCESS
;;
start) ping_start;;
stop) ping_stop;;
monitor) ping_monitor;;
reload) ping_start;;
validate-all) ping_validate;;
usage|help) ping_usage
exit $OCF_SUCCESS
;;
*) ping_usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
exit $?
diff --git a/pacemaker.spec b/pacemaker.spec
index 1b38ba3284..0f0e8ef3aa 100644
--- a/pacemaker.spec
+++ b/pacemaker.spec
@@ -1,1263 +1,1282 @@
%global gname haclient
%global uname hacluster
-%global with_ais_support 1
-%global with_heartbeat_support 1
%global pcmk_docdir %{_docdir}/%{name}
%global specversion 2
#global upstream_version ee19d8e83c2a
%global upstream_prefix pacemaker
# Keep around for when/if required
#global alphatag %{upstream_version}.hg
%global pcmk_release %{?alphatag:0.}%{specversion}%{?alphatag:.%{alphatag}}%{?dist}
+# Compatibility macro wrappers for legacy RPM versions that do not
+# support conditional builds
+%{!?bcond_without: %{expand: %%global bcond_without() %%{expand:%%%%{!?_without_%%{1}:%%%%global with_%%{1} 1}}}}
+%{!?bcond_with: %{expand: %%global bcond_with() %%{expand:%%%%{?_with_%%{1}:%%%%global with_%%{1} 1}}}}
+%{!?with: %{expand: %%global with() %%{expand:%%%%{?with_%%{1}:1}%%%%{!?with_%%{1}:0}}}}
+%{!?without: %{expand: %%global without() %%{expand:%%%%{?with_%%{1}:0}%%%%{!?with_%%{1}:1}}}}
+
+# Conditionals
+# Invoke "rpmbuild --without <feature>" or "rpmbuild --with <feature>"
+# to disable or enable specific features
+%bcond_without ais
+%bcond_without heartbeat
+# ESMTP is not available in RHEL, only in EPEL. Allow people to build
+# the RPM without ESMTP in case they choose not to use EPEL packages
+%bcond_without esmtp
+
Name: pacemaker
Summary: Scalable High-Availability cluster resource manager
Version: 1.0.7
Release: %{pcmk_release}
License: GPLv2+ and LGPLv2+
Url: http://www.clusterlabs.org
Group: System Environment/Daemons
Source0: pacemaker.tar.bz2
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
AutoReqProv: on
Requires(pre): cluster-glue
Requires: resource-agents python
Conflicts: heartbeat < 2.99
%if 0%{?fedora} || 0%{?centos} > 4 || 0%{?rhel} > 4
Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version))
BuildRequires: help2man libtool-ltdl-devel
%endif
%if 0%{?suse_version}
# net-snmp-devel on SLES10 does not suck in tcpd-devel automatically
BuildRequires: help2man tcpd-devel
%endif
# Required for core functionality
BuildRequires: automake autoconf libtool pkgconfig
BuildRequires: glib2-devel cluster-glue-libs-devel libxml2-devel libxslt-devel
BuildRequires: pkgconfig python-devel gcc-c++ bzip2-devel gnutls-devel pam-devel
# Enables optional functionality
BuildRequires: ncurses-devel net-snmp-devel openssl-devel
-BuildRequires: libesmtp-devel lm_sensors-devel libselinux-devel
+BuildRequires: lm_sensors-devel libselinux-devel
+%if %{with esmtp}
+BuildRequires: libesmtp-devel
+%endif
-%if %with_ais_support
+%if %{with ais}
BuildRequires: corosynclib-devel
Requires: corosync
%endif
-%if %with_heartbeat_support
+%if %{with heartbeat}
BuildRequires: heartbeat-devel heartbeat-libs
Requires: heartbeat >= 3.0.0
%endif
%description
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Linux-HA (Heartbeat) and/or OpenAIS.
It supports "n-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
Available rpmbuild rebuild options:
--without : heartbeat ais
%package -n pacemaker-libs
License: GPLv2+ and LGPLv2+
Summary: Libraries used by the Pacemaker cluster resource manager and its clients
Group: System Environment/Daemons
Requires: %{name} = %{version}-%{release}
%description -n pacemaker-libs
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Linux-HA (Heartbeat) and/or OpenAIS.
It supports "n-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
%package -n pacemaker-libs-devel
License: GPLv2+ and LGPLv2+
Summary: Pacemaker development package
Group: Development/Libraries
Requires: %{name}-libs = %{version}-%{release}
Requires: cluster-glue-libs-devel
Obsoletes: libpacemaker3
-%if %with_ais_support
+%if %{with ais}
Requires: corosynclib-devel
%endif
-%if %with_heartbeat_support
+%if %{with heartbeat}
Requires: heartbeat-devel
%endif
%description -n pacemaker-libs-devel
Headers and shared libraries for developing tools for Pacemaker.
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Linux-HA (Heartbeat) and/or OpenAIS.
It supports "n-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
%prep
%setup -q -n %{upstream_prefix}%{?upstream_version}
%build
./autogen.sh
# RHEL <= 5 does not support --docdir
export docdir=%{pcmk_docdir}
%{configure} --localstatedir=%{_var} --enable-fatal-warnings=no
make %{_smp_mflags} docdir=%{pcmk_docdir}
%install
rm -rf %{buildroot}
make install DESTDIR=%{buildroot} docdir=%{pcmk_docdir}
# Scripts that need should be executable
chmod a+x %{buildroot}/%{_libdir}/heartbeat/hb2openais-helper.py
chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py
chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/OCFIPraTest.py
chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/extracttests.py
# These are not actually scripts
find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x
# Dont package static libs or compiled python
find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f
find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
find %{buildroot} -name '*.pyc' -type f -print0 | xargs -0 rm -f
find %{buildroot} -name '*.pyo' -type f -print0 | xargs -0 rm -f
# Do not package these either
rm %{buildroot}/%{_libdir}/heartbeat/crm_primitive.py
+%if %{with ais}
rm %{buildroot}/%{_libdir}/service_crm.so
+%endif
%clean
rm -rf %{buildroot}
%post -n pacemaker-libs -p /sbin/ldconfig
%postun -n pacemaker-libs -p /sbin/ldconfig
%files
###########################################################
%defattr(-,root,root)
%exclude %{_datadir}/pacemaker/tests
%{_datadir}/pacemaker
%{_datadir}/snmp/mibs/PCMK-MIB.txt
%{_libdir}/heartbeat/*
%{_sbindir}/cibadmin
%{_sbindir}/crm_attribute
%{_sbindir}/crm_diff
%{_sbindir}/crm_failcount
%{_sbindir}/crm_master
%{_sbindir}/crm_mon
%{_sbindir}/crm
%{_sbindir}/crm_resource
%{_sbindir}/crm_standby
%{_sbindir}/crm_verify
%{_sbindir}/crmadmin
%{_sbindir}/iso8601
%{_sbindir}/attrd_updater
%{_sbindir}/ptest
%{_sbindir}/crm_shadow
%{_sbindir}/cibpipe
%{_sbindir}/crm_node
-%if %with_heartbeat_support
+%if %{with heartbeat}
%{_sbindir}/crm_uuid
%else
%exclude %{_sbindir}/crm_uuid
%endif
# Packaged elsewhere
%exclude %{pcmk_docdir}/AUTHORS
%exclude %{pcmk_docdir}/COPYING
%exclude %{pcmk_docdir}/COPYING.LIB
%doc %{pcmk_docdir}/crm_cli.txt
%doc %{pcmk_docdir}/crm_fencing.txt
%doc %{pcmk_docdir}/README.hb2openais
%doc %{_mandir}/man8/*.8*
%doc COPYING
%doc AUTHORS
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine
%dir %attr (750, %{uname}, %{gname}) %{_var}/run/crm
%dir /usr/lib/ocf
%dir /usr/lib/ocf/resource.d
/usr/lib/ocf/resource.d/pacemaker
-%if %with_ais_support
+%if %{with ais}
%{_libexecdir}/lcrso/pacemaker.lcrso
%endif
%files -n pacemaker-libs
%defattr(-,root,root)
%{_libdir}/libcib.so.*
%{_libdir}/libcrmcommon.so.*
%{_libdir}/libcrmcluster.so.*
%{_libdir}/libpe_status.so.*
%{_libdir}/libpe_rules.so.*
%{_libdir}/libpengine.so.*
%{_libdir}/libtransitioner.so.*
%{_libdir}/libstonithd.so.*
%doc COPYING.LIB
%doc AUTHORS
%files -n pacemaker-libs-devel
%defattr(-,root,root)
%{_includedir}/pacemaker
%{_includedir}/heartbeat/fencing
%{_libdir}/*.so
%{_datadir}/pacemaker/tests
%doc COPYING.LIB
%doc AUTHORS
%changelog
* Tue Jan 19 2010 Andrew Beekhof <andrew@beekhof.net> - 1.0.7-2
- Rebuild for corosync 1.2.0
* Mon Jan 18 2010 Andrew Beekhof <andrew@beekhof.net> - 1.0.7-1
- Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip
- Statistics:
Changesets: 193
Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-)
- Changes since 1.0.5-4
+ High: PE: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups
+ High: PE: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes
+ High: PE: Bug lf#2209 - Clone ordering should be able to prevent startup of dependant clones
+ High: PE: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe
+ High: PE: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'.
+ High: PE: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced
+ High: PE: Correctly anti-colocate with a group
+ High: PE: Correctly unpack ordering constraints for resource sets to avoid graph loops
+ High: Tools: crm: load help from crm_cli.txt
+ High: Tools: crm: resource sets (bnc#550923)
+ High: Tools: crm: support for comments (LF 2221)
+ High: Tools: crm: support for description attribute in resources/operations (bnc#548690)
+ High: Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093)
+ High: Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215)
+ High: Tools: hb2openais: refuse to convert pure EVMS volumes
+ High: cib: Ensure the loop for login message terminates
+ High: cib: Finally fix reliability of receiving large messages over remote plaintext connections
+ High: cib: Fix remote notifications
+ High: cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM
+ High: cib: Remote plaintext - Retry sending parts of the message that did not fit the first time
+ High: crmd: Ensure batch-limit is correctly enforced
+ High: crmd: Ensure we have the latest status after a transition abort
+ High (bnc#547579,547582): Tools: crm: status section editing support
+ High: shell: Add allow-migrate as allowed meta-attribute (bnc#539968)
+ Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break
+ Medium: PE: Bug lf#2206 - rsc_order constraints always use score at the top level
+ Medium: PE: Only complain about target-role=master for non m/s resources
+ Medium: PE: Prevent non-multistate resources from being promoted through target-role
+ Medium: PE: Provide a default action for resource-set ordering
+ Medium: PE: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults
+ Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line
+ Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members
+ Medium: Tools: crm: add update method to template apply (LF 2289)
+ Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270)
+ Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270)
+ Medium: Tools: crm: do not add score which does not exist
+ Medium: Tools: crm: do not consider warnings as errors (LF 2274)
+ Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304)
+ Medium: Tools: crm: drop empty attributes elements
+ Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300)
+ Medium: Tools: crm: fix exit code on single shot commands
+ Medium: Tools: crm: fix node delete (LF 2305)
+ Medium: Tools: crm: implement -F (--force) option
+ Medium: Tools: crm: rename status to cibstatus (LF 2236)
+ Medium: Tools: crm: revisit configure commit
+ Medium: Tools: crm: stay in crm if user specified level only (LF 2286)
+ Medium: Tools: crm: verify changes on exit from the configure level
+ Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf
+ Medium: cib: Clean up logic for receiving remote messages
+ Medium: cib: Create valid notification control messages
+ Medium: cib: Indicate where the remote connection came from
+ Medium: cib: Send password prompt to stderr so that stdout can be redirected
+ Medium: cts: Fix rsh handling when stdout is not required
+ Medium: doc: Fill in the section on removing a node from an AIS-based cluster
+ Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem
+ Medium: doc: Use Publican for docbook based documentation
+ Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell)
+ Medium: fencing: stonithd: ignore case when comparing host names (LF 2292)
+ Medium: tools: Make crm_mon functional with remote connections
+ Medium: xml: Add stopped as a supported role for operations
+ Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs
+ Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6
* Thu Oct 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-4
- Include the fixes from CoroSync integration testing
- Move the resource templates - they are not documentation
- Ensure documentation is placed in a standard location
- Exclude documentation that is included elsewhere in the package
- Update the tarball from upstream to version ee19d8e83c2a
+ High: cib: Correctly clean up when both plaintext and tls remote ports are requested
+ High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions
+ High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints
+ High: PE: Make sure promote/demote pseudo actions are created correctly
+ High: PE: Prevent target-role from promoting more than master-max instances
+ High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage
+ High: ais: Prevent deadlock - dont try to release IPC message if the connection failed
+ High: cib: For validation errors, send back the full CIB so the client can display the errors
+ High: cib: Prevent use-after-free for remote plaintext connections
+ High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat
* Wed Oct 13 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-3
- Update the tarball from upstream to version 38cd629e5c3c
+ High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled
+ High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change
+ High: PE: Bug lf#2170 - stop-all-resources option had no effect
+ High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not
+ High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined
+ High: PE: do not include master score if it would prevent allocation
+ High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms)
+ High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync
+ High: ais: Gracefully handle changes to the AIS nodeid
+ High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE
+ High: crmd: Prevent use-after-free with LOG_DEBUG_3
+ Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672)
+ Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm
+ Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild
+ Medium: PE: Bug lf#2178 - Indicate unmanaged clones
+ Medium: PE: Bug lf#2180 - Include node information for all failed ops
+ Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint
+ Medium: PE: Correctly log resources that would like to start but can not
+ Medium: PE: Stop ptest from logging to syslog
+ Medium: ais: Include version details in plugin name
+ Medium: crmd: Requery the resource metadata after every start operation
* Fri Aug 21 2009 Tomas Mraz <tmraz@redhat.com> - 1.0.5-2.1
- rebuilt with new openssl
* Wed Aug 19 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-2
- Add versioned perl dependancy as specified by
https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl
- No longer remove RPATH data, it prevents us finding libperl.so and no other
libraries were being hardcoded
- Compile in support for heartbeat
- Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements
depending on which stacks are supported
* Mon Aug 17 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-1
- Add dependancy on resource-agents
- Use the version of the configure macro that supplies --prefix, --libdir, etc
- Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final)
+ High: Tools: crm_resource - Advertise --move instead of --migrate
+ Medium: Extra: New node connectivity RA that uses system ping and attrd_updater
+ Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches
* Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 1.0.5-0.7.c9120a53a6ae.hg
- Use bzipped upstream tarball.
* Wed Jul 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.6.c9120a53a6ae.hg
- Add back missing build auto* dependancies
- Minor cleanups to the install directive
* Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.5.c9120a53a6ae.hg
- Add a leading zero to the revision when alphatag is used
* Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.4.c9120a53a6ae.hg
- Incorporate the feedback from the cluster-glue review
- Realistically, the version is a 1.0.5 pre-release
- Use the global directive instead of define for variables
- Use the haclient/hacluster group/user instead of daemon
- Use the _configure macro
- Fix install dependancies
* Fri Jul 24 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-3
- Initial Fedora checkin
- Include an AUTHORS and license file in each package
- Change the library package name to pacemaker-libs to be more
Fedora compliant
- Remove execute permissions from xml related files
- Reference the new cluster-glue devel package name
- Update the tarball from upstream to version c9120a53a6ae
+ High: PE: Only prevent migration if the clone dependancy is stopping/starting on the target node
+ High: PE: Bug 2160 - Dont shuffle clones due to colocation
+ High: PE: New implementation of the resource migration (not stop/start) logic
+ Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options
+ Medium: PE: Prevent use-of-NULL in find_first_action()
* Tue Jul 14 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-2
- Reference authors from the project AUTHORS file instead of listing in description
- Change Source0 to reference the Mercurial repo
- Cleaned up the summaries and descriptions
- Incorporate the results of Fedora package self-review
* Thu Jun 04 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.4-1
- Update source tarball to revision: 1d87d3e0fc7f (stable-1.0)
- Statistics:
Changesets: 209
Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-)
- Changes since Pacemaker-1.0.3
+ High (bnc#488291): ais: do not rely on byte endianness on ptr cast
+ High (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me)
+ High (bnc#507255): Tools: crm: import properly rsc/op_defaults
+ High (LF 2114): Tools: crm: add support for operation instance attributes
+ High: ais: Bug lf#2126 - Messages replies cannot be routed to transient clients
+ High: ais: Fix compilation for the latest Corosync API (v1719)
+ High: attrd: Do not perform all updates as complete refreshes
+ High: cib: Fix huge memory leak affecting heartbeat-based clusters
+ High: Core: Allow xpath queries to match attributes
+ High: Core: Generate the help text directly from a tool options struct
+ High: Core: Handle differences in 0.6 messaging format
+ High: crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd
+ High: crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors
+ High: crmd: Fix another large memory leak affecting Heartbeat based clusters
+ High: lha: Restore compatability with older versions
+ High: PE: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions
+ High: PE: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions
+ High: PE: Prevent use-ofNULL when using resource ordering sets
+ High: PE: Provide inter-notification ordering guarantees
+ High: PE: Rewrite the notification code to be understanable and extendable
+ High: Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down
+ High: Tools: crm: regression tests
+ High: Tools: crm_mon - Fix smtp notifications
+ High: Tools: crm_resource - Repair the ability to query meta attributes
+ Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates
+ Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly
+ Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes
+ Medium (LF 2107): Tools: crm: revisit exit codes in configure
+ Medium: cib: Do not bother validating updates that only affect the status section
+ Medium: Core: Include supported stacks in version information
+ Medium: crmd: Record in the CIB, the cluster infrastructure being used
+ Medium: cts: Do not combine crm_standby arguments - the wrapper ca not process them
+ Medium: cts: Fix the CIBAusdit class
+ Medium: Extra: Refresh showscores script from Dominik
+ Medium: PE: Build a statically linked version of ptest
+ Medium: PE: Correctly log the actions for resources that are being recovered
+ Medium: PE: Correctly log the occurance of promotion events
+ Medium: PE: Implememt node health based on a patch from Mark Hamzy
+ Medium: Tools: Add examples to help text outputs
+ Medium: Tools: crm: catch syntax errors for configure load
+ Medium: Tools: crm: implement erasing nodes in configure erase
+ Medium: Tools: crm: work with parents only when managing xml objects
+ Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein)
+ Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide
+ Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error
+ Medium: Tools: Include stack information in crm_mon output
+ Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured
* Wed Apr 08 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.3-1
- Update source tarball to revision: b133b3f19797 (stable-1.0) tip
- Statistics:
Changesets: 383
Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-)
- Changes since Pacemaker-1.0.2
+ Added tag SLE11-HAE-GMC for changeset 9196be9830c2
+ High: ais plugin: Fix quorum calculation (bnc#487003)
+ High: ais: Another memory fix leak in error path
+ High: ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading
+ High: ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes
+ High: ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib
+ High: ais: Correctly handle a return value of zero from openais_dispatch_recv()
+ High: ais: Disable logging to a file
+ High: ais: Fix memory leak in error path
+ High: ais: IPC messages are only in scope until a response is sent
+ High: All signal handlers used with CL_SIGNAL() need to be as minimal as possible
+ High: cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format
+ High: cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions
+ High: crm: Avoid infinite loop during crm configure edit (bnc#480327)
+ High: crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically
+ High: crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly
+ High: crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified)
+ High: crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'loose' an election
+ High: crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions
+ High: crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat
+ High: crmd: Erasing the status section should not be forced to the local node
+ High: crmd: Fix memory leak in cib notication processing code
+ High: crmd: Fix memory leak in transition graph processing
+ High: crmd: Fix memory leaks found by valgrind
+ High: crmd: More memory leaks fixes found by valgrind
+ High: fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support
+ High: PE: Bug bnc#466788 - Exclude nodes that can not run resources
+ High: PE: Bug bnc#466788 - Make colocation based on node attributes work
+ High: PE: Bug BNC#478687 - Do not crash when clone-max is 0
+ High: PE: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root
+ High: PE: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated
+ High: PE: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node
+ High: PE: Bug lf#2089 - Meta attributes are not inherited by clone children
+ High: PE: Bug lf#2091 - Correctly restart modified resources that were found active by a probe
+ High: PE: Bug lf#2094 - Fix probe ordering for cloned groups
+ High: PE: Bug LF:2075 - Fix large pingd memory leaks
+ High: PE: Correctly attach orphaned clone children to their parent
+ High: PE: Correctly handle terminate node attributes that are set to the output from time()
+ High: PE: Ensure orphaned clone members are hooked up to the parent when clone-max=0
+ High: PE: Fix memory leak in LogActions
+ High: PE: Fix the determination of whether a group is active
+ High: PE: Look up the correct promotion preference for anonymous masters
+ High: PE: Simplify handling of start failures by changing the default migration-threshold to INFINITY
+ High: PE: The ordered option for clones no longer causes extra start/stop operations
+ High: RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL
+ High: RA: pingd: Set default ping interval to 1 instead of 0 seconds
+ High: Resources: pingd - Correctly tell the ping daemon to shut down
+ High: Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility
+ High: Tools: cli: fix and improve delete command
+ High: Tools: crm: add and implement templates
+ High: Tools: crm: add support for command aliases and some common commands (i.e. cd,exit)
+ High: Tools: crm: create top configuration nodes if they are missing
+ High: Tools: crm: fix parsing attributes for rules (broken by the previous changeset)
+ High: Tools: crm: new ra set of commands
+ High: Tools: crm: resource agents information management
+ High: Tools: crm: rsc/op_defaults
+ High: Tools: crm: support for no value attribute in nvpairs
+ High: Tools: crm: the new configure monitor command
+ High: Tools: crm: the new configure node command
+ High: Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan
+ High: Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf
+ High: Tools: hb2openais: fix a serious recursion bug in xml node processing
+ High: Tools: hb2openais: fix ocfs2 processing
+ High: Tools: pingd - prevent double free of getaddrinfo() output in error path
+ High: Tools: The default re-ping interval for pingd should be 1s not 1ms
+ Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command
+ Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion
+ Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op
+ Medium (bnc#479050): Tools: crm: reimplement cluster properties completion
+ Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff)
+ Medium: ais: Remove the ugly hack for dampening AIS membership changes
+ Medium: cib: Fix memory leaks by using mainloop_add_signal
+ Medium: cib: Move more logging to the debug level (was info)
+ Medium: cib: Overhaul the processing of synchronous replies
+ Medium: Core: Add library functions for instructing the cluster to terminate nodes
+ Medium: crmd: Add new expected-quorum-votes option
+ Medium: crmd: Allow up to 5 retires when an attrd update fails
+ Medium: crmd: Automatically detect and use new values for crm_config options
+ Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations
+ Medium: crmd: Clean up and optimize the DC election algorithm
+ Medium: crmd: Fix memory leak in shutdown
+ Medium: crmd: Fix memory leaks spotted by Valgrind
+ Medium: crmd: Ingore join messages from hosts other than our DC
+ Medium: crmd: Limit the scope of resource updates to the status section
+ Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be
+ Medium: crmd: Re-check the election status after membership events
+ Medium: crmd: Send resource updates via the local CIB during elections
+ Medium: PE: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly
+ Medium: PE: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started
+ Medium: PE: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc()
+ Medium: PE: Compress the display of healthy anonymous clones
+ Medium: PE: Correctly log the actions for resources that are being recovered
+ Medium: PE: Determin a promotion score for complex resources
+ Medium: PE: Ensure clones always have a value for globally-unique
+ Medium: PE: Prevent orphan clones from being allocated
+ Medium: RA: controld: Return proper exit code for stop op.
+ Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test
+ Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup
+ Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py
+ Medium: Tools: crm: add more user input checks
+ Medium: Tools: crm: do not check resource status of we are working with a shadow
+ Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive)
+ Medium: Tools: crm: ignore comments in the CIB
+ Medium: Tools: crm: multiple column output would not work with small lists
+ Medium: Tools: crm: refuse to delete running resources
+ Medium: Tools: crm: rudimentary if-else for templates
+ Medium: Tools: crm: Start/stop clones via target-role.
+ Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes
+ Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds
+ Medium: Tools: crm_shadow - Support -e, the short form of --create-empty
+ Medium: Tools: Make attrd quieter
+ Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak
+ Medium: Tools: Reduce pingd logging
* Mon Feb 16 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.2-1
- Update source tarball to revision: d232d19daeb9 (stable-1.0) tip
- Statistics:
Changesets: 441
Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-)
- Changes since Pacemaker-1.0.1
+ High (bnc#450815): Tools: crm cli: do not generate id for the operations tag
+ High: ais: Add support for the new AIS IPC layer
+ High: ais: Always set header.error to the correct default: SA_AIS_OK
+ High: ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node
+ High: ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec()
+ High: ais: By default, disable supprt for the WIP openais IPC patch
+ High: ais: Detect and handle situations where ais and the crm disagree on the node name
+ High: ais: Ensure crm_peer_seq is updated after a membership update
+ High: ais: Make sure all IPC header fields are set to sane defaults
+ High: ais: Repair and streamline service load now that whitetank startup functions correctly
+ High: build: create and install doc files
+ High: cib: Allow clients without mainloop to connect to the cib
+ High: cib: CID:18 - Fix use-of-NULL in cib_perform_op
+ High: cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op
+ High: cib: Ensure diffs contain the correct values of admin_epoch
+ High: cib: Fix four moderately sized memory leaks detected by Valgrind
+ High: Core: CID:10 - Prevent indexing into an array of schemas with a negative value
+ High: Core: CID:13 - Fix memory leak in log_data_element
+ High: Core: CID:15 - Fix memory leak in crm_get_peer
+ High: Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input
+ High: Core: Fix crash in the membership code preventing node shutdown
+ High: Core: Fix more memory leaks foudn by valgrind
+ High: Core: Prevent unterminated strings after decompression
+ High: crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so
+ High: crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them.
+ High: crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup
+ High: crmd: Correctly handle reconnections to attrd
+ High: crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to
+ High: crmd: If there are no nodes to finalize, start an election.
+ High: crmd: If there are no nodes to welcome, start an election.
+ High: crmd: Prevent node attribute loss by detecting attrd disconnections immediately
+ High: crmd: Prevent node re-probe loops by ensuring manditory actions always complete
+ High: PE: Bug 2005 - Fix startup ordering of cloned stonith groups
+ High: PE: Bug 2006 - Correctly reprobe cloned groups
+ High: PE: Bug BNC:465484 - Fix the no-quorum-policy=suicide option
+ High: PE: Bug LF:1996 - Correctly process disabled monitor operations
+ High: PE: CID:19 - Fix use-of-NULL in determine_online_status
+ High: PE: Clones now default to globally-unique=false
+ High: PE: Correctly calculate the number of available nodes for the clone to use
+ High: PE: Only shoot online nodes with no-quorum-policy=suicide
+ High: PE: Prevent on-fail settings being ignored after a resource is successfully stopped
+ High: PE: Prevent use-of-NULL for failed migrate actions in process_rsc_state()
+ High: PE: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly
+ High: PE: Repar the ability to colocate based on node attributes other than uname
+ High: PE: Start the correct monitor operation for unmanaged masters
+ High: stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers
+ High: stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling
+ High: stonithd: Sending IPC to the cluster is a privileged operation
+ High: stonithd: wrong checks for shmid (0 is a valid id)
+ High: Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB
+ High: Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down
+ High: Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems
+ High: Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline
+ High: Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope
+ High: Tools: Bug BNC:473265 - crm_resource -L dumps core
+ High: Tools: Bug LF:2001 - Transient node attributes should be set via attrd
+ High: Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources
+ High: Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start
+ High: Tools: Cause the correct clone instance to be failed with crm_resource -F
+ High: Tools: cluster_test - Allow the user to select a stack and fix CTS invocation
+ High: Tools: crm cli: allow rename only if the resource is stopped
+ High: Tools: crm cli: catch system errors on file operations
+ High: Tools: crm cli: completion for ids in configure
+ High: Tools: crm cli: drop '-rsc' from attributes for order constraint
+ High: Tools: crm cli: exit with an appropriate exit code
+ High: Tools: crm cli: fix wrong order of action and resource in order constraint
+ High: Tools: crm cli: fox wrong exit code
+ High: Tools: crm cli: improve handling of cib attributes
+ High: Tools: crm cli: new command: configure rename
+ High: Tools: crm cli: new command: configure upgrade
+ High: Tools: crm cli: new command: node delete
+ High: Tools: crm cli: prevent key errors on missing cib attributes
+ High: Tools: crm cli: print long help for help topics
+ High: Tools: crm cli: return on syntax error when parsing score
+ High: Tools: crm cli: rsc_location can be without nvpairs
+ High: Tools: crm cli: short node preference location constraint
+ High: Tools: crm cli: sometimes, on errors, level would change on single shot use
+ High: Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion)
+ High: Tools: crm cli: verify user input for sanity
+ High: Tools: crm: find expressions within rules (do not always skip xml nodes due to used id)
+ High: Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups
+ High: Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps
+ Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status
+ Medium (LF 2009): stonithd: improve timeouts for remote fencing
+ Medium: ais: Allow dead peers to be removed from membership calculations
+ Medium: ais: Pass node deletion events on to clients
+ Medium: ais: Sanitize ipc usage
+ Medium: ais: Supply the node uname in addtion to the id
+ Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g)
+ Medium: Build: Install cluster_test
+ Medium: Build: Use more restrictive CFLAGS and fix the resulting errors
+ Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon
+ Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages
+ Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path
+ Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path
+ Medium: Core: CID:16 - Fix memory leak in date_to_string error path
+ Medium: Core: Try to track down the cause of XML parsing errors
+ Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions
+ Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay
+ Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions.
+ Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers
+ Medium: crmd: Find option values without having to do a config upgrade
+ Medium: crmd: Implement shutdown using a transient node attribute
+ Medium: crmd: Update the crmd options to use dashes instead of underscores
+ Medium: cts: Add 'cluster reattach' to the suite of automated regression tests
+ Medium: cts: cluster_test - Make some usability enhancements
+ Medium: CTS: cluster_test - suggest a valid port number
+ Medium: CTS: Fix python import order
+ Medium: cts: Implement an automated SplitBrain test
+ Medium: CTS: Remove references to deleted classes
+ Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup
+ Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes
+ Medium: PE: CID:17 - Fix memory leak in find_actions_by_task error path
+ Medium: PE: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions
+ Medium: PE: Defer logging the actions performed on a resource until we have processed ordering constraints
+ Medium: PE: Remove the symmetrical attribute of colocation constraints
+ Medium: Resources: pingd - fix the meta defaults
+ Medium: Resources: Stateful - Add missing meta defaults
+ Medium: stonithd: exit if we the pid file cannot be locked
+ Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with
+ Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer
+ Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes
+ Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds)
+ Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification
+ Medium: Tools: crm cli: add back symmetrical for order constraints
+ Medium: Tools: crm cli: generate role in location when converting from xml
+ Medium: Tools: crm cli: handle shlex exceptions
+ Medium: Tools: crm cli: keep order of help topics
+ Medium: Tools: crm cli: refine completion for ids in configure
+ Medium: Tools: crm cli: replace inf with INFINITY
+ Medium: Tools: crm cli: streamline cib load and parsing
+ Medium: Tools: crm cli: supply provider only for ocf class primitives
+ Medium: Tools: crm_mon - Add support for sending mail notifications of resource events
+ Medium: Tools: crm_mon - Include the DC version in status summary
+ Medium: Tools: crm_mon - Sanitize startup and option processing
+ Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps
+ Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit
+ Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd
+ Medium: Tools: hb2openais: replace crmadmin with crm_mon
+ Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb
+ Medium: Tools: hb2openais: reuse code
+ Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources
+ Medium: Tools: Make pingd resilient to attrd failures
+ Medium: Tools: pingd - fix the command line switches
+ Medium: Tools: Rename ccm_tool to crm_node
* Tue Nov 18 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.1-1
- Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip
- Statistics:
Changesets: 170
Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-)
- Changes since Pacemaker-1.0.1
+ High: ais: Allow the crmd to get callbacks whenever a node state changes
+ High: ais: Create an option for starting the mgmtd daemon automatically
+ High: ais: Ensure HA_RSCTMP exists for use by resource agents
+ High: ais: Hook up the openais.conf config logging options
+ High: ais: Zero out the PID of disconnecting clients
+ High: cib: Ensure global updates cause a disk write when appropriate
+ High: Core: Add an extra snaity check to getXpathResults() to prevent segfaults
+ High: Core: Do not redefine __FUNCTION__ unnecessarily
+ High: Core: Repair the ability to have comments in the configuration
+ High: crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete
+ High: crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback
+ High: crmd: Requests to the CIB should cause any prior PE calculations to be ignored
+ High: heartbeat: Wait for membership 'up' events before removing stale node status data
+ High: PE: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set
+ High: PE: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks
+ High: PE: Ensure the terminate node attribute is handled correctly
+ High: PE: Fix optional colocation
+ High: PE: Improve up the detection of 'new' nodes joining the cluster
+ High: PE: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location
+ High: Tools: crm cli: parser: return False on syntax error and None for comments
+ High: Tools: crm cli: unify template and edit commands
+ High: Tools: crm_shadow - Show more line number information after validation failures
+ High: Tools: hb2openais: add option to upgrade the CIB to v3.0
+ High: Tools: hb2openais: add U option to getopts and update usage
+ High: Tools: hb2openais: backup improved and multiple fixes
+ High: Tools: hb2openais: fix class/provider reversal
+ High: Tools: hb2openais: fix testing
+ High: Tools: hb2openais: move the CIB update to the end
+ High: Tools: hb2openais: update logging and set logfile appropriately
+ High: Tools: LF:1969 - Attrd never sets any properties in the cib
+ High: Tools: Make attrd functional on OpenAIS
+ Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes
+ Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block
+ Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf)
+ Medium: cib: Always store cib contents on disk with num_updates=0
+ Medium: cib: Ensure remote access ports are cleaned up on shutdown
+ Medium: crmd: Detect deleted resource operations automatically
+ Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH
+ Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes
+ Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored
+ Medium: crmd: Fix the recording of pending operations in the CIB
+ Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated
+ Medium: crmd: Only the DC should update quorum in an openais cluster
+ Medium: Ensure meta attributes are used consistantly
+ Medium: PE: Allow group and clone level resource attributes
+ Medium: PE: Bug N:437719 - Ensure scores from colocated resources count when allocating groups
+ Medium: PE: Prevent lsb scripts from being used in globally unique clones
+ Medium: PE: Make a best-effort guess at a migration threshold for people with 0.6 configs
+ Medium: Resources: controld - ensure we are part of a clone with globally_unique=false
+ Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation
+ Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts
+ Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version
+ Medium: Tools: crm (bnc#441028): check for key error in attributes management
+ Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status
+ Medium: Tools: crm_mon - Fix the display of timing data
+ Medium: Tools: crm_verify - check that we are being asked to validate a complete config
+ Medium: xml: Relax the restriction on the contents of rsc_locaiton.node
* Thu Oct 16 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.0-1
- Update source tarball to revision: 388654dfef8f tip
- Statistics:
Changesets: 261
Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-)
- Changes since f805e1b30103
+ High: add the crm cli program
+ High: ais: Move the service id definition to a common location and make sure it is always used
+ High: build: rename hb2openais.sh to .in and replace paths with vars
+ High: cib: Implement --create for crm_shadow
+ High: cib: Remove dead files
+ High: Core: Allow the expected number of quorum votes to be configrable
+ High: Core: cl_malloc and friends were removed from Heartbeat
+ High: Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault
+ High: hb2openais.sh: improve pingd handling; several bugs fixed
+ High: hb2openais: fix clone creation; replace EVMS strings
+ High: new hb2openais.sh conversion script
+ High: PE: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty)
+ High: PE: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly.
+ High: PE: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures
+ High: PE: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly
+ High: PE: Bug N:420538 - Anit-colocation caused a positive node preference
+ High: PE: Correctly handle unmanaged resources to prevent them from being started elsewhere
+ High: PE: crm_resource - Fix the --migrate command
+ High: PE: MAke stonith-enabled default to true and warn if no STONITH resources are found
+ High: PE: Make sure orphaned clone children are created correctly
+ High: PE: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete
+ High: stonithd (LF 1951): fix remote stonith operations
+ High: stonithd: fix handling of timeouts
+ High: stonithd: fix logic for stonith resource priorities
+ High: stonithd: implement the fence-timeout instance attribute
+ High: stonithd: initialize value before reading fence-timeout
+ High: stonithd: set timeouts for fencing ops to the timeout of the start op
+ High: stonithd: stonith rsc priorities (new feature)
+ High: Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead
+ High: Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations
+ High: Tools: Make pingd functional on Linux
+ High: Update version numbers for 1.0 candidates
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Use the agreed service number
+ Medium: Build: Reliably detect heartbeat libraries during configure
+ Medium: Build: Supply prototypes for libreplace functions when needed
+ Medium: Build: Teach configure how to find corosync
+ Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support
+ Medium: crmd: Avoid calling GHashTable functions with NULL
+ Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB
+ Medium: crmd: Hook up the stonith-timeout option to stonithd
+ Medium: crmd: Prevent potential use-of-NULL in global_timer_callback
+ Medium: crmd: Rationalize the logging of graph aborts
+ Medium: PE: Add a stonith_timeout option and remove new options that are better set in rsc_defaults
+ Medium: PE: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute
+ Medium: PE: Bug LF:1950 - Notifications do not contain all documented resource state fields
+ Medium: PE: Bug N:417585 - Do not restart group children whos individual score drops below zero
+ Medium: PE: Detect clients that disconnect before receiving their reply
+ Medium: PE: Implement a true maintenance mode
+ Medium: PE: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI
+ Medium: PE: Print the correct message when stonith is disabled
+ Medium: PE: ptest - check the input is valid before proceeding
+ Medium: PE: Revert group stickiness to the 'old way'
+ Medium: PE: Use the correct attribute for action 'requires' (was prereq)
+ Medium: stonithd: Fix compilation without full heartbeat install
+ Medium: stonithd: exit with better code on empty host list
+ Medium: tools: Add a new regression test for CLI tools
+ Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid
+ Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection)
+ Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema
* Mon Sep 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.3-1
- Update source tarball to revision: 33e677ab7764+ tip
- Statistics:
Changesets: 133
Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-)
- Changes since f805e1b30103
+ High: Tools: add the crm cli program
+ High: Core: cl_malloc and friends were removed from Heartbeat
+ High: Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault
+ High: new hb2openais.sh conversion script
+ High: PE: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty)
+ High: PE: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly.
+ High: PE: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures
+ High: PE: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly
+ High: PE: Bug N:420538 - Anit-colocation caused a positive node preference
+ High: PE: Correctly handle unmanaged resources to prevent them from being started elsewhere
+ High: PE: crm_resource - Fix the --migrate command
+ High: PE: MAke stonith-enabled default to true and warn if no STONITH resources are found
+ High: PE: Make sure orphaned clone children are created correctly
+ High: PE: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete
+ High: stonithd (LF 1951): fix remote stonith operations
+ High: Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Use the agreed service number
+ Medium: PE: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute
+ Medium: PE: Bug LF:1950 - Notifications do not contain all documented resource state fields
+ Medium: PE: Bug N:417585 - Do not restart group children whos individual score drops below zero
+ Medium: PE: Implement a true maintenance mode
+ Medium: PE: Print the correct message when stonith is disabled
+ Medium: stonithd: exit with better code on empty host list
+ Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema
* Wed Aug 20 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.1-1
- Update source tarball to revision: f805e1b30103+ tip
- Statistics:
Changesets: 184
Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-)
- Changes since 0.7.0-19
+ Fix compilation when GNUTLS isnt found
+ High: admin: Fix use-after-free in crm_mon
+ High: Build: Remove testing code that prevented heartbeat-only builds
+ High: cib: Use single quotes so that the xpath queries for nvpairs will succeed
+ High: crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies
+ High: crmd: Correctly handle a dead PE process
+ High: crmd: Make sure async-failures cause the failcount to be incrimented
+ High: PE: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes
+ High: PE: Parse resource ordering sets correctly
+ High: PE: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL
+ High: PE: Unpack colocation sets correctly
+ High: Tools: crm_mon - Prevent use-of-NULL for orphaned resources
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Allow transient clients to receive membership updates
+ Medium: ais: Avoid double-free in error path
+ Medium: ais: Include in the mebership nodes for which we have not determined their hostname
+ Medium: ais: Spawn the PE from the ais plugin instead of the crmd
+ Medium: cib: By default, new configurations use the latest schema
+ Medium: cib: Clean up the CIB if it was already disconnected
+ Medium: cib: Only incriment num_updates if something actually changed
+ Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB
+ Medium: Core: Fix memory leak in xpath searches
+ Medium: Core: Get more details regarding parser errors
+ Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values
+ Medium: Core: Switch to the libxml2 parser - its significantly faster
+ Medium: Core: Use a libxml2 library function for xml -> text conversion
+ Medium: crmd: Asynchronous failure actions have no parameters
+ Medium: crmd: Avoid calling glib functions with NULL
+ Medium: crmd: Do not allow an election to promote a node from S_STARTING
+ Medium: crmd: Do not vote if we have not completed the local startup
+ Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently
+ Medium: crmd: Fix the lrmd xpath expressions to not contain quotes
+ Medium: crmd: If we get a join offer during an election, better restart the election
+ Medium: crmd: No further processing is needed when using the LRMs API call for failing resources
+ Medium: crmd: Only update have-quorum if the value changed
+ Medium: crmd: Repair the input validation logic in do_te_invoke
+ Medium: cts: CIBs can no longer contain comments
+ Medium: cts: Enable a bunch of tests that were incorrectly disabled
+ Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names
+ Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER
+ Medium: Fix compilation when heartbeat is not supported
+ Medium: PE: Allow groups to be involved in optional ordering constraints
+ Medium: PE: Allow sets of operations to be reused by multiple resources
+ Medium: PE: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones
+ Medium: PE: Determin the correct migration-threshold during resource expansion
+ Medium: PE: Implement no-quorum-policy=suicide (FATE #303619)
+ Medium: pengine: Clean up resources after stopping old copies of the PE
+ Medium: pengine: Teach the PE how to stop old copies of itself
+ Medium: Tools: Backport hb_report updates
+ Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly
+ Medium: Tools: Rename cib_shadow to crm_shadow
* Fri Jul 18 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0-19
- Update source tarball to revision: 007c3a1c50f5 (unstable) tip
- Statistics:
Changesets: 108
Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-)
- Changes added since unstable-0.7
+ High: admin: Fix use-after-free in crm_mon
+ High: ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf)
+ High: ais: Log terminated processes as an error
+ High: cib: Performance - Reorganize things to avoid calculating the XML diff twice
+ High: PE: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes
+ High: PE: Fix memory leak in action2xml
+ High: PE: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one
+ High: PE: Properly handle clones that are not installed on all nodes
+ Medium: admin: cibadmin - Show any validation errors if the upgrade failed
+ Medium: admin: cib_shadow - Implement --locate to display the underlying filename
+ Medium: admin: cib_shadow - Implement a --diff option
+ Medium: admin: cib_shadow - Implement a --switch option
+ Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated)
+ Medium: ais: Approximate born_on for OpenAIS based clusters
+ Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema
+ Medium: cib: Skip construction of pre-notify messages if no-one wants one
+ Medium: Core: Attempt to streamline some key functions to increase performance
+ Medium: Core: Clean up XML parser after validation
+ Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh
+ Medium: Fix memory leaks when resetting the name of an XML object
+ Medium: PE: Prefer the current location if it is one of a group of nodes with the same (highest) score
* Wed Jun 25 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0-1
- Update source tarball to revision: bde0c7db74fb tip
- Statistics:
Changesets: 439
Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-)
- Changes added since stable-0.6
+ High: A new tool for setting up and invoking CTS
+ High: Admin: All tools now use --node (-N) for specifying node unames
+ High: Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs
+ High: cib: Cleanup the API - remove redundant input fields
+ High: cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster
+ High: cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications
+ High: Core: Add a facility for automatically upgrading old configurations
+ High: Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled
+ High: Core: Allow sending TLS messages larger than the MTU
+ High: Core: Fix parsing of time-only ISO dates
+ High: Core: Smarter handling of XML values containing quotes
+ High: Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself
+ High: Core: The xml ID type does not allow UUIDs that start with a number
+ High: Core: Implement XPath based versions of query/delete/replace/modify
+ High: Core: Remove some HA2.0.(3,4) compatability code
+ High: crmd: Overhaul the detection of nodes that are starting vs. failed
+ High: PE: Bug LF:1459 - Allow failures to expire
+ High: PE: Have the PE do non-persistent configuration upgrades before performing calculations
+ High: PE: Replace failure-stickiness with a simple 'migration-threshold'
+ High: TE: Simplify the design by folding the tengine process into the crmd
+ Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource
+ Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute
+ Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history
+ Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data
+ Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes
+ Medium: Admin: crm_mon - include timing data for failed actions
+ Medium: ais: Read options from the environment since objdb is not completely usable yet
+ Medium: cib: Add sections for op_defaults and rsc_defaults
+ Medium: cib: Better matching notification callbacks (for detecting duplicates and removal)
+ Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects
+ Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s
+ Medium: cib: Detect updates that decrease the version tuple
+ Medium: cib: Implement a client-side operation timeout - Requires LHA update
+ Medium: cib: Implement callbacks and async notifications for remote connections
+ Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin)
+ Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated
+ Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet
+ Medium: cib: Reimplement get|set|delete attributes using XPath
+ Medium: cib: Remove some useless parts of the API
+ Medium: cib: Remove the 'attributes' scaffolding from the new format
+ Medium: cib: Implement the ability for clients to connect to remote servers
+ Medium: Core: Add support for validating xml against RelaxNG schemas
+ Medium: Core: Allow more than one item to be modified/deleted in XPath based operations
+ Medium: Core: Fix the sort_pairs function for creating sorted xml objects
+ Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time
+ Medium: Core: Reduce the amount of xml copying occuring
+ Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++')
+ Medium: crmd: Add support for lrm_ops->fail_rsc if its available
+ Medium: crmd: HB - watch link status for node leaving events
+ Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns
+ Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately
+ Medium: PE: Bug LF:1328 - Do not fencing nodes in clusters without managed resources
+ Medium: PE: Bug LF:1461 - Give transient node attributes (in <status/>) preference over persistent ones (in <nodes/>)
+ Medium: PE: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints
+ Medium: PE: Bug LF:1886 - Create a resource and operation 'defaults' config section
+ Medium: PE: Bug LF:1892 - Allow recurring actions to be triggered at known times
+ Medium: PE: Bug LF:1926 - Probes should complete before stop actions are invoked
+ Medium: PE: Fix the standby when its set as a transient attribute
+ Medium: PE: Implement a global 'stop-all-resources' option
+ Medium: PE: Implement cibpipe, a tool for performing/simulating config changes "offline"
+ Medium: PE: We do not allow colocation with specific clone instances
+ Medium: Tools: pingd - Implement a stack-independant version of pingd
+ Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7
* Thu Jun 19 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.5-1
- Update source tarball to revision: b9fe723d1ac5 tip
- Statistics:
Changesets: 48
Diff: 37 files changed, 1204 insertions(+), 234 deletions(-)
- Changes since Pacemaker-0.6.4
+ High: Admin: Repair the ability to delete failcounts
+ High: ais: Audit IPC handling between the AIS plugin and CRM processes
+ High: ais: Have the plugin create needed /var/lib directories
+ High: ais: Make sure the sync and async connections are assigned correctly (not swapped)
+ High: cib: Correctly detect configuration changes - num_updates does not count
+ High: PE: Apply stickiness values to the whole group, not the individual resources
+ High: PE: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node
+ High: PE: Bug N:396293 - Enforce manditory group restarts due to ordering constraints
+ High: PE: Correctly recover master instances found active on more than one node
+ High: PE: Fix memory leaks reported by Valgrind
+ Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi
+ Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters
+ Medium: crmd: Ensure joins are completed promptly when a node taking part dies
+ Medium: PE: Avoid clone instance shuffling in more cases
+ Medium: PE: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically
+ Medium: PE: Make use of target_rc data to correctly process resource operations
+ Medium: PE: Prevent a possible use of NULL in sort_clone_instance()
+ Medium: TE: Include target rc in the transition key - used to correctly determin operation failure
* Thu May 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.4-1
- Update source tarball to revision: 226d8e356924 tip
- Statistics:
Changesets: 55
Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-)
- Changes since Pacemaker-0.6.3
+ High: crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion
+ High: crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB
+ High: PE: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling
+ High: PE: Ensure 'master' monitor actions are cancelled _before_ we demote the resource
+ High: PE: Fix assert failure leading to core dump - make sure variable is properly initialized
+ High: PE: Make sure 'slave' monitoring happens after the resource has been demoted
+ High: PE: Prevent failure stickiness underflows (where too many failures become a _positive_ preference)
+ Medium: Admin: crm_mon - Only complain if the output file could not be opened
+ Medium: Common: filter_action_parameters - enable legacy handling only for older versions
+ Medium: PE: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY
+ Medium: PE: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51)
+ Medium: TE: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster
* Wed Apr 23 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.3-1
- Update source tarball to revision: fd8904c9bc67 tip
- Statistics:
Changesets: 117
Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-)
- Changes since Pacemaker-0.6.2
+ High: Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order
+ High: Build: SNMP has been moved to the management/pygui project
+ High: crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down
+ High: crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI)
+ High: PE: Allow the cluster to make progress by not retrying failed demote actions
+ High: PE: Anti-colocation with slave should not prevent master colocation
+ High: PE: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources
+ High: PE: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources
+ High: PE: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances
+ High: PE: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios
+ High: PE: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started
+ High: PE: Bug N-347004 - Include notification ordering is correct for interleaved clones
+ High: PE: Bug PM-11 - Directly link probe_complete to starting clone instances
+ High: PE: Bug PM1 - Fix setting failcounts when applied to complex resources
+ High: PE: Bug PM12, LF1648 - Extensive revision of group ordering
+ High: PE: Bug PM7 - Ensure masters are always demoted before they are stopped
+ High: PE: Create probes after allocation to allow smarter handling of anonymous clones
+ High: PE: Do not prioritize clone instances that must be moved
+ High: PE: Fix error in previous commit that allowed more than the required number of masters to be promoted
+ High: PE: Group start ordering fixes
+ High: PE: Implement promote/demote ordering for cloned groups
+ High: TE: Repair failcount updates
+ High: TE: Use the correct offset when updating failcount
+ Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes
+ Medium: Build: Make configure fail if bz2 or libxml2 are not present
+ Medium: Build: Re-instate a better default for LCRSODIR
+ Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients
+ Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date
+ Medium: Core: Drop compataibility code for 2.0.4 and 2.0.5 clusters
+ Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops)
+ Medium: crmd: Save the current CIB contents if we detect the PE crashed
+ Medium: PE: Bug LF:1866 - Fix version check when applying compatability handling for failed start operations
+ Medium: PE: Bug LF:1866 - Restore the ability to have start failures not be fatal
+ Medium: PE: Bug PM1 - Failcount applies to all instances of non-unique clone
+ Medium: PE: Correctly set the state of partially active master/slave groups
+ Medium: PE: Do not claim to be stopping an already stopped orphan
+ Medium: PE: Ensure implies_left ordering constraints are always effective
+ Medium: PE: Indicate each resources 'promotion' score
+ Medium: PE: Prevent a possible use-of-NULL
+ Medium: PE: Reprocess the current action if it changed (so that any prior dependancies are updated)
+ Medium: TE: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition
+ Medium: TE: Bug LF:1859 - Do not abort graphs due to our own failcount updates
+ Medium: TE: Bug LF:1859 - Prevent the TE from interupting itself
* Thu Feb 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.2-1
- Update source tarball to revision: 28b1a8c1868b tip
- Statistics:
Changesets: 11
Diff: 7 files changed, 58 insertions(+), 18 deletions(-)
- Changes since Pacemaker-0.6.1
+ haresources2cib.py: set default-action-timeout to the default (20s)
+ haresources2cib.py: update ra parameters lists
+ Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki)
+ Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded
* Tue Feb 12 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.1-1
- Update source tarball to revision: e7152d1be933 tip
- Statistics:
Changesets: 25
Diff: 37 files changed, 1323 insertions(+), 227 deletions(-)
- Changes since Pacemaker-0.6.0
+ High: CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write
+ High: CIB: Ensure the archived file hits the disk before returning
+ High: CIB: Repair the ability to do 'atomic incriment' updates (value="value++")
+ High: crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL
+ Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know
+ Medium: crmd: Delay starting the IPC server until we are fully functional
+ Medium: CTS: Fix the startup patterns
+ Medium: PE: Bug 1820 - Allow the first resource in a group to be migrated
+ Medium: PE: Bug 1820 - Check the colocation dependancies of resources to be migrated
* Mon Jan 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.0-2
- This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat.
- For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in
the new pacemaker-pygui project. Build dependancies prevent them from being
included in Heartbeat (since the built-in CRM is no longer supported) and,
being non-core components, are not included with Pacemaker.
- Update source tarball to revision: c94b92d550cf
- Statistics:
Changesets: 347
Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-)
- Test hardware:
+ 6-node vmware cluster (sles10-sp1/256Mb/vmware stonith) on a single host (opensuse10.3/2Gb/2.66Ghz Quad Core2)
+ 7-node EMC Centera cluster (sles10/512Mb/2Ghz Xeon/ssh stonith)
- Notes: Heartbeat Stack
+ All testing was performed with STONITH enabled
+ The CRM was enabled using the "crm respawn" directive
- Notes: OpenAIS Stack
+ This release contains a preview of support for the OpenAIS cluster stack
+ The current release of the OpenAIS project is missing two important
patches that we require. OpenAIS packages containing these patches are
available for most major distributions at:
http://download.opensuse.org/repositories/server:/ha-clustering
+ The OpenAIS stack is not currently recommended for use in clusters that
have shared data as STONITH support is not yet implimented
+ pingd is not yet available for use with the OpenAIS stack
+ 3 significant OpenAIS issues were found during testing of 4 and 6 node
clusters. We are activly working together with the OpenAIS project to
get these resolved.
- Pending bugs encountered during testing:
+ OpenAIS #1736 - Openais membership took 20s to stabilize
+ Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match
+ OpenAIS #1793 - Assertion failure in memb_state_gather_enter()
+ OpenAIS #1796 - Cluster message corruption
- Changes since Heartbeat-2.1.2-24
+ High: Add OpenAIS support
+ High: Admin: crm_uuid - Look in the right place for Heartbeat UUID files
+ High: admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query
+ High: cib: Fix CIB_OP_UPDATE calls that modify the whole CIB
+ High: cib: Fix compilation when supporting the heartbeat stack
+ High: cib: Fix memory leaks caused by the switch to get_message_xml()
+ High: cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true
+ High: cib: Use get_message_xml() in preference to cl_get_struct()
+ High: cib: Use the return value from call to write() in cib_send_plaintext()
+ High: Core: ccm nodes can legitimately have a node id of 0
+ High: Core: Fix peer-process tracking for the Heartbeat stack
+ High: Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead
+ High: CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME
+ High: crm: Adopt a more flexible appraoch to enabling Valgrind
+ High: crm: Fix compilation when bzip2 is not installed
+ High: CRM: Future-proof get_message_xml()
+ High: crmd: Filter election responses based on time not FSA state
+ High: crmd: Handle all possible peer states in crmd_ha_status_callback()
+ High: crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules
+ High: crmd: Relax an assertion regrading ccm membership instances
+ High: crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations
+ High: crmd: Heartbeat: Accurately record peer client status
+ High: PE: Bug 1777 - Allow colocation with a resource in the Stopped state
+ High: PE: Bug 1822 - Prevent use-of-NULL in PromoteRsc()
+ High: PE: Implement three recovery policies based on op_status and op_rc
+ High: PE: Parse fail-count correctly (it may be set to ININFITY)
+ High: PE: Prevent graph-loop when stonith agents need to be moved around before a STONITH op
+ High: PE: Prevent graph-loops when two operations have the same name+interval
+ High: te: Cancel active timers when destroying graphs
+ High: TE: Ensure failcount is set correctly for failed stops/starts
+ High: TE: Update failcount for oeprations that time out
+ Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA
+ Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin
+ Medium: cib: Tweak the shutdown code
+ Medium: Common: Only count peer processes of active nodes
+ Medium: Core: Create generic cluster sign-in method
+ Medium: core: Fix compilation when Heartbeat support is disabled
+ Medium: Core: General cleanup for supporting two stacks
+ Medium: Core: iso6601 - Support parsing of time-only strings
+ Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled
+ Medium: crm: Improved logging of errors in the XML parser
+ Medium: crmd: Fix potential use-of-NULL in string comparison
+ Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE
+ Medium: crm_mon: Indicate when a node is both in standby mode and offline
+ Medium: PE: Bug 1822 - Do not try an promote groups if not all of it is active
+ Medium: PE: on_fail=nothing is an alias for 'ignore' not 'restart'
+ Medium: PE: Prevent a potential use-of-NULL in cron_range_satisfied()
+ snmp subagent: fix a problem on displaying an unmanaged group
+ snmp subagent: use the syslog setting
+ snmp: v2 support (thanks to Keisuke MORI)
+ snmp_subagent - made it not complain about some things if shutting down
* Mon Dec 10 2007 Andrew Beekhof <abeekhof@suse.de> - 0.6.0-1
- Initial opensuse package check-in
diff --git a/tools/shell/Makefile.am b/shell/Makefile.am
similarity index 100%
rename from tools/shell/Makefile.am
rename to shell/Makefile.am
diff --git a/tools/shell/regression/001.exp.xml b/shell/regression/001.exp.xml
similarity index 100%
rename from tools/shell/regression/001.exp.xml
rename to shell/regression/001.exp.xml
diff --git a/tools/shell/regression/001.input b/shell/regression/001.input
similarity index 100%
rename from tools/shell/regression/001.input
rename to shell/regression/001.input
diff --git a/tools/shell/regression/002.exp.xml b/shell/regression/002.exp.xml
similarity index 100%
rename from tools/shell/regression/002.exp.xml
rename to shell/regression/002.exp.xml
diff --git a/tools/shell/regression/002.input b/shell/regression/002.input
similarity index 100%
rename from tools/shell/regression/002.input
rename to shell/regression/002.input
diff --git a/tools/shell/regression/003.exp.xml b/shell/regression/003.exp.xml
similarity index 100%
rename from tools/shell/regression/003.exp.xml
rename to shell/regression/003.exp.xml
diff --git a/tools/shell/regression/003.input b/shell/regression/003.input
similarity index 100%
rename from tools/shell/regression/003.input
rename to shell/regression/003.input
diff --git a/tools/shell/regression/004.exp.xml b/shell/regression/004.exp.xml
similarity index 100%
rename from tools/shell/regression/004.exp.xml
rename to shell/regression/004.exp.xml
diff --git a/tools/shell/regression/004.input b/shell/regression/004.input
similarity index 100%
rename from tools/shell/regression/004.input
rename to shell/regression/004.input
diff --git a/tools/shell/regression/Makefile.am b/shell/regression/Makefile.am
similarity index 100%
rename from tools/shell/regression/Makefile.am
rename to shell/regression/Makefile.am
diff --git a/tools/shell/regression/README.regression b/shell/regression/README.regression
similarity index 100%
rename from tools/shell/regression/README.regression
rename to shell/regression/README.regression
diff --git a/tools/shell/regression/crm-interface b/shell/regression/crm-interface
similarity index 100%
rename from tools/shell/regression/crm-interface
rename to shell/regression/crm-interface
diff --git a/tools/shell/regression/crm_regression.sh b/shell/regression/crm_regression.sh
similarity index 100%
rename from tools/shell/regression/crm_regression.sh
rename to shell/regression/crm_regression.sh
diff --git a/tools/shell/regression/defaults b/shell/regression/defaults
similarity index 100%
rename from tools/shell/regression/defaults
rename to shell/regression/defaults
diff --git a/tools/shell/regression/descriptions b/shell/regression/descriptions
similarity index 100%
rename from tools/shell/regression/descriptions
rename to shell/regression/descriptions
diff --git a/tools/shell/regression/empty.xml b/shell/regression/empty.xml
similarity index 100%
rename from tools/shell/regression/empty.xml
rename to shell/regression/empty.xml
diff --git a/tools/shell/regression/evaltest.sh b/shell/regression/evaltest.sh
similarity index 100%
rename from tools/shell/regression/evaltest.sh
rename to shell/regression/evaltest.sh
diff --git a/tools/shell/regression/lrmregtest-lsb.in b/shell/regression/lrmregtest-lsb.in
similarity index 100%
rename from tools/shell/regression/lrmregtest-lsb.in
rename to shell/regression/lrmregtest-lsb.in
diff --git a/tools/shell/regression/regression.sh.in b/shell/regression/regression.sh.in
similarity index 100%
rename from tools/shell/regression/regression.sh.in
rename to shell/regression/regression.sh.in
diff --git a/tools/shell/regression/shadow.base b/shell/regression/shadow.base
similarity index 100%
rename from tools/shell/regression/shadow.base
rename to shell/regression/shadow.base
diff --git a/tools/shell/regression/testcases/BSC b/shell/regression/testcases/BSC
similarity index 100%
rename from tools/shell/regression/testcases/BSC
rename to shell/regression/testcases/BSC
diff --git a/tools/shell/regression/testcases/Makefile.am b/shell/regression/testcases/Makefile.am
similarity index 100%
rename from tools/shell/regression/testcases/Makefile.am
rename to shell/regression/testcases/Makefile.am
diff --git a/tools/shell/regression/testcases/basicset b/shell/regression/testcases/basicset
similarity index 100%
rename from tools/shell/regression/testcases/basicset
rename to shell/regression/testcases/basicset
diff --git a/tools/shell/regression/testcases/common.excl b/shell/regression/testcases/common.excl
similarity index 100%
rename from tools/shell/regression/testcases/common.excl
rename to shell/regression/testcases/common.excl
diff --git a/tools/shell/regression/testcases/confbasic b/shell/regression/testcases/confbasic
similarity index 100%
rename from tools/shell/regression/testcases/confbasic
rename to shell/regression/testcases/confbasic
diff --git a/tools/shell/regression/testcases/confbasic-xml b/shell/regression/testcases/confbasic-xml
similarity index 100%
rename from tools/shell/regression/testcases/confbasic-xml
rename to shell/regression/testcases/confbasic-xml
diff --git a/tools/shell/regression/testcases/confbasic-xml.exp b/shell/regression/testcases/confbasic-xml.exp
similarity index 100%
rename from tools/shell/regression/testcases/confbasic-xml.exp
rename to shell/regression/testcases/confbasic-xml.exp
diff --git a/tools/shell/regression/testcases/confbasic.exp b/shell/regression/testcases/confbasic.exp
similarity index 100%
rename from tools/shell/regression/testcases/confbasic.exp
rename to shell/regression/testcases/confbasic.exp
diff --git a/tools/shell/regression/testcases/delete b/shell/regression/testcases/delete
similarity index 100%
rename from tools/shell/regression/testcases/delete
rename to shell/regression/testcases/delete
diff --git a/tools/shell/regression/testcases/delete.exp b/shell/regression/testcases/delete.exp
similarity index 100%
rename from tools/shell/regression/testcases/delete.exp
rename to shell/regression/testcases/delete.exp
diff --git a/tools/shell/regression/testcases/file b/shell/regression/testcases/file
similarity index 100%
rename from tools/shell/regression/testcases/file
rename to shell/regression/testcases/file
diff --git a/tools/shell/regression/testcases/file.exp b/shell/regression/testcases/file.exp
similarity index 100%
rename from tools/shell/regression/testcases/file.exp
rename to shell/regression/testcases/file.exp
diff --git a/tools/shell/regression/testcases/node b/shell/regression/testcases/node
similarity index 100%
rename from tools/shell/regression/testcases/node
rename to shell/regression/testcases/node
diff --git a/tools/shell/regression/testcases/node.exp b/shell/regression/testcases/node.exp
similarity index 100%
rename from tools/shell/regression/testcases/node.exp
rename to shell/regression/testcases/node.exp
diff --git a/tools/shell/regression/testcases/ra b/shell/regression/testcases/ra
similarity index 100%
rename from tools/shell/regression/testcases/ra
rename to shell/regression/testcases/ra
diff --git a/tools/shell/regression/testcases/ra.exp b/shell/regression/testcases/ra.exp
similarity index 100%
rename from tools/shell/regression/testcases/ra.exp
rename to shell/regression/testcases/ra.exp
diff --git a/tools/shell/regression/testcases/resource b/shell/regression/testcases/resource
similarity index 100%
rename from tools/shell/regression/testcases/resource
rename to shell/regression/testcases/resource
diff --git a/tools/shell/regression/testcases/resource.exp b/shell/regression/testcases/resource.exp
similarity index 100%
rename from tools/shell/regression/testcases/resource.exp
rename to shell/regression/testcases/resource.exp
diff --git a/tools/shell/regression/testcases/shadow b/shell/regression/testcases/shadow
similarity index 100%
rename from tools/shell/regression/testcases/shadow
rename to shell/regression/testcases/shadow
diff --git a/tools/shell/regression/testcases/shadow.exp b/shell/regression/testcases/shadow.exp
similarity index 100%
rename from tools/shell/regression/testcases/shadow.exp
rename to shell/regression/testcases/shadow.exp
diff --git a/tools/shell/regression/testcases/xmlonly.sh b/shell/regression/testcases/xmlonly.sh
similarity index 100%
rename from tools/shell/regression/testcases/xmlonly.sh
rename to shell/regression/testcases/xmlonly.sh
diff --git a/tools/shell/templates/Makefile.am b/shell/templates/Makefile.am
similarity index 100%
rename from tools/shell/templates/Makefile.am
rename to shell/templates/Makefile.am
diff --git a/tools/shell/templates/apache b/shell/templates/apache
similarity index 100%
rename from tools/shell/templates/apache
rename to shell/templates/apache
diff --git a/tools/shell/templates/clvm b/shell/templates/clvm
similarity index 100%
rename from tools/shell/templates/clvm
rename to shell/templates/clvm
diff --git a/tools/shell/templates/filesystem b/shell/templates/filesystem
similarity index 100%
rename from tools/shell/templates/filesystem
rename to shell/templates/filesystem
diff --git a/tools/shell/templates/gfs2 b/shell/templates/gfs2
similarity index 100%
rename from tools/shell/templates/gfs2
rename to shell/templates/gfs2
diff --git a/tools/shell/templates/gfs2-base b/shell/templates/gfs2-base
similarity index 100%
rename from tools/shell/templates/gfs2-base
rename to shell/templates/gfs2-base
diff --git a/tools/shell/templates/ocfs2 b/shell/templates/ocfs2
similarity index 100%
rename from tools/shell/templates/ocfs2
rename to shell/templates/ocfs2
diff --git a/tools/shell/templates/virtual-ip b/shell/templates/virtual-ip
similarity index 100%
rename from tools/shell/templates/virtual-ip
rename to shell/templates/virtual-ip
diff --git a/tools/Makefile.am b/tools/Makefile.am
index 64f1273f21..638248377d 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -1,148 +1,146 @@
#
# Copyright (C) 2004-2009 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
-SUBDIRS = shell
-
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
-I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl
COMMONLIBS = \
$(top_builddir)/lib/common/libcrmcommon.la \
$(top_builddir)/lib/cib/libcib.la \
$(CURSESLIBS) $(CLUSTERLIBS)
headerdir = $(pkgincludedir)/crm
header_HEADERS = attrd.h
EXTRA_DIST = $(sbin_SCRIPTS)
halibdir = $(CRM_DAEMON_DIR)
halib_SCRIPTS = haresources2cib.py hb2openais.sh
halib_PROGRAMS = attrd pingd
halib_PYTHON = crm_primitive.py hb2openais-helper.py
sbin_PROGRAMS = crm_simulate crmadmin cibadmin crm_node crm_attribute crm_resource crm_verify \
crm_uuid crm_shadow attrd_updater crm_diff crm_mon iso8601
testdir = $(datadir)/$(PACKAGE)/tests/cli
test_SCRIPTS = regression.sh
test_DATA = regression.exp
if BUILD_SERVICELOG
sbin_PROGRAMS += notifyServicelogEvent
endif
if BUILD_OPENIPMI_SERICELOG
sbin_PROGRAMS += ipmiservicelogd
endif
if BUILD_HELP
man8_MANS = $(sbin_PROGRAMS:%=%.8)
endif
sbin_SCRIPTS = crm crm_standby crm_master crm_failcount
## SOURCES
#noinst_HEADERS = config.h control.h crmd.h
noinst_HEADERS =
crmadmin_SOURCES = crmadmin.c
crmadmin_LDADD = $(COMMONLIBS) $(CLUSTERLIBS) \
$(top_builddir)/lib/pengine/libpe_status.la
crm_uuid_SOURCES = crm_uuid.c
crm_uuid_LDADD = $(top_builddir)/lib/common/libcrmcluster.la
cibadmin_SOURCES = cibadmin.c
cibadmin_LDADD = $(COMMONLIBS)
crm_shadow_SOURCES = cib_shadow.c
crm_shadow_LDADD = $(COMMONLIBS)
crm_node_SOURCES = ccm_epoche.c
crm_node_LDADD = $(COMMONLIBS) $(CLUSTERLIBS) \
$(top_builddir)/lib/common/libcrmcluster.la
crm_simulate_SOURCES = crm_inject.c
crm_simulate_CFLAGS = -I$(top_srcdir)/pengine
crm_simulate_LDADD = $(COMMONLIBS) \
$(top_builddir)/lib/pengine/libpe_status.la \
$(top_builddir)/pengine/libpengine.la \
$(top_builddir)/lib/cib/libcib.la \
$(top_builddir)/lib/transition/libtransitioner.la
crm_diff_SOURCES = xml_diff.c
crm_diff_LDADD = $(COMMONLIBS)
crm_mon_SOURCES = crm_mon.c
crm_mon_LDADD = $(COMMONLIBS) $(SNMPLIBS) $(ESMTPLIBS) -llrm \
$(top_builddir)/lib/pengine/libpe_status.la
# Arguments could be made that this should live in crm/pengine
crm_verify_SOURCES = crm_verify.c
crm_verify_LDADD = $(COMMONLIBS) \
$(top_builddir)/lib/pengine/libpe_status.la \
$(top_builddir)/pengine/libpengine.la
crm_attribute_SOURCES = crm_attribute.c
crm_attribute_LDADD = $(COMMONLIBS)
crm_resource_SOURCES = crm_resource.c
crm_resource_LDADD = $(COMMONLIBS) \
$(top_builddir)/lib/pengine/libpe_rules.la \
$(top_builddir)/lib/pengine/libpe_status.la \
$(top_builddir)/pengine/libpengine.la
iso8601_SOURCES = test.iso8601.c
iso8601_LDADD = $(COMMONLIBS)
attrd_SOURCES = attrd.c
attrd_LDADD = $(COMMONLIBS) $(top_builddir)/lib/common/libcrmcluster.la
pingd_SOURCES = pingd.c
pingd_LDADD = $(COMMONLIBS)
attrd_updater_SOURCES = attrd_updater.c
attrd_updater_LDADD = $(COMMONLIBS)
if BUILD_SERVICELOG
notifyServicelogEvent_SOURCES = notifyServicelogEvent.c
notifyServicelogEvent_CFLAGS = `pkg-config --cflags servicelog-1`
notifyServicelogEvent_LDFLAGS = `pkg-config --libs servicelog-1` $(top_builddir)/lib/common/libcrmcommon.la
endif
if BUILD_OPENIPMI_SERICELOG
ipmiservicelogd_SOURCES = ipmiservicelogd.c
ipmiservicelogd_CFLAGS = `pkg-config --cflags OpenIPMI OpenIPMIposix servicelog-1`
ipmiservicelogd_LDFLAGS = `pkg-config --libs OpenIPMI OpenIPMIposix servicelog-1` $(top_builddir)/lib/common/libcrmcommon.la
endif
%.8: %
echo Creating $@
help2man --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(top_builddir)/tools/$<
clean-generic:
rm -f *.log *.debug *.xml *~
install-exec-local:
uninstall-local:
.PHONY: install-exec-hook
diff --git a/tools/ccm_epoche.c b/tools/ccm_epoche.c
index 827bbb5d18..1c604e5ea8 100644
--- a/tools/ccm_epoche.c
+++ b/tools/ccm_epoche.c
@@ -1,446 +1,446 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h> /* for basename() */
#include <crm/crm.h>
#include <crm/ais.h>
#include <crm/common/cluster.h>
#include <crm/cib.h>
int command = 0;
int ccm_fd = 0;
int try_hb = 1;
int try_ais = 1;
gboolean do_quiet = FALSE;
char *target_uuid = NULL;
char *target_uname = NULL;
const char *standby_value = NULL;
const char *standby_scope = NULL;
void ais_membership_destroy(gpointer user_data);
gboolean ais_membership_dispatch(AIS_Message *wrapper, char *data, int sender);
#include <../lib/common/stack.h>
#if SUPPORT_HEARTBEAT
# include <ocf/oc_event.h>
# include <ocf/oc_membership.h>
# include <clplumbing/cl_uuid.h>
# define UUID_LEN 16
oc_ev_t *ccm_token = NULL;
void oc_ev_special(const oc_ev_t *, oc_ev_class_t , int );
void ccm_age_callback(
oc_ed_t event, void *cookie, size_t size, const void *data);
gboolean ccm_age_connect(int *ccm_fd);
static int read_local_hb_uuid(void)
{
int rc = 0;
cl_uuid_t uuid;
char *buffer = NULL;
long start = 0, read_len = 0;
FILE *input = fopen(UUID_FILE, "r");
if(input == NULL) {
cl_perror("Could not open UUID file %s\n", UUID_FILE);
return 1;
}
/* see how big the file is */
start = ftell(input);
fseek(input, 0L, SEEK_END);
if(UUID_LEN != ftell(input)) {
fprintf(stderr, "%s must contain exactly %d bytes\n", UUID_FILE, UUID_LEN);
abort();
}
fseek(input, 0L, start);
if(start != ftell(input)) {
fprintf(stderr, "fseek not behaving: %ld vs. %ld\n", start, ftell(input));
rc = 2;
goto bail;
}
buffer = malloc(50);
read_len = fread(uuid.uuid, 1, UUID_LEN, input);
if(read_len != UUID_LEN) {
fprintf(stderr, "Expected and read bytes differ: %d vs. %ld\n",
UUID_LEN, read_len);
rc = 3;
goto bail;
} else if(buffer != NULL) {
cl_uuid_unparse(&uuid, buffer);
fprintf(stdout, "%s\n", buffer);
} else {
fprintf(stderr, "No buffer to unparse\n");
rc = 4;
}
bail:
free(buffer);
fclose(input);
return rc;
}
#endif
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"quiet", 0, 0, 'Q', "\tEssential output only"},
{"-spacer-", 1, 0, '-', "\nStack:", SUPPORT_HEARTBEAT},
{"openais", 0, 0, 'A', "\tOnly try connecting to an OpenAIS-based cluster", SUPPORT_HEARTBEAT},
{"heartbeat", 0, 0, 'H', "Only try connecting to a Heartbeat-based cluster", SUPPORT_HEARTBEAT},
{"-spacer-", 1, 0, '-', "\nCommands:"},
{"epoch", 0, 0, 'e', "\tDisplay the epoch during which this node joined the cluster"},
{"quorum", 0, 0, 'q', "\tDisplay a 1 if our partition has quorum, 0 if not"},
{"list", 0, 0, 'l', "(AIS-Only) Display all known members (past and present) of this cluster"},
{"partition", 0, 0, 'p', "Display the members of this partition"},
{"cluster-id", 0, 0, 'i', "Display this node's cluster id"},
{"remove", 1, 0, 'R', "(Advanced, AIS-Only) Remove the (stopped) node with the specified nodeid from the cluster"},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"force", 0, 0, 'f'},
{0, 0, 0, 0}
};
int local_id = 0;
int
main(int argc, char ** argv)
{
int flag = 0;
int argerr = 0;
gboolean force_flag = FALSE;
gboolean dangerous_cmd = FALSE;
int option_index = 0;
crm_peer_init();
- crm_log_init(basename(argv[0]), LOG_WARNING, FALSE, FALSE, 0, NULL);
+ crm_log_init(basename(argv[0]), LOG_WARNING, FALSE, FALSE, argc, argv);
crm_set_options("?V$qepHR:ifl", "command [options]", long_options,
"Tool for displaying low-level node information");
while (flag >= 0) {
flag = crm_get_option(argc, argv, &option_index);
switch(flag) {
case -1:
break;
case 'V':
cl_log_enable_stderr(TRUE);
alter_debug(DEBUG_INC);
break;
case '$':
case '?':
crm_help(flag, LSB_EXIT_OK);
break;
case 'Q':
do_quiet = TRUE;
break;
case 'H':
try_ais = 0;
break;
case 'A':
try_hb = 0;
break;
case 'f':
force_flag = TRUE;
break;
case 'R':
dangerous_cmd = TRUE;
command = flag;
target_uname = optarg;
break;
case 'p':
case 'e':
case 'q':
case 'i':
case 'l':
command = flag;
break;
default:
++argerr;
break;
}
}
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', LSB_EXIT_GENERIC);
}
if(dangerous_cmd && force_flag == FALSE) {
fprintf(stderr, "The supplied command is considered dangerous."
" To prevent accidental destruction of the cluster,"
" the --force flag is required in order to proceed.\n");
fflush(stderr);
exit(LSB_EXIT_GENERIC);
}
#if SUPPORT_AIS
if(try_ais && init_ais_connection(
ais_membership_dispatch, ais_membership_destroy, NULL, NULL, &local_id)) {
GMainLoop* amainloop = NULL;
switch(command) {
case 'R':
send_ais_text(crm_class_rmpeer, target_uname, TRUE, NULL, crm_msg_ais);
return 0;
case 'e':
/* Age makes no sense (yet) in an AIS cluster */
fprintf(stdout, "1\n");
return 0;
case 'q':
send_ais_text(crm_class_quorum, NULL, TRUE, NULL, crm_msg_ais);
break;
case 'l':
case 'p':
crm_info("Requesting the list of configured nodes");
send_ais_text(crm_class_members, __FUNCTION__, TRUE, NULL, crm_msg_ais);
break;
case 'i':
printf("%d\n", local_id);
default:
fprintf(stderr, "Unknown option '%c'\n", command);
crm_help('?', LSB_EXIT_GENERIC);
}
amainloop = g_main_new(FALSE);
g_main_run(amainloop);
}
#endif
#if SUPPORT_HEARTBEAT
if(try_hb && command == 'i') {
return read_local_hb_uuid();
} else if(try_hb && ccm_age_connect(&ccm_fd)) {
int rc = 0;
fd_set rset;
oc_ev_t *ccm_token = NULL;
while (1) {
sleep(1);
FD_ZERO(&rset);
FD_SET(ccm_fd, &rset);
errno = 0;
rc = select(ccm_fd + 1, &rset, NULL,NULL,NULL);
if(rc > 0 && oc_ev_handle_event(ccm_token) != 0) {
crm_err("oc_ev_handle_event failed");
return 1;
} else if(rc < 0 && errno != EINTR) {
crm_perror(LOG_ERR, "select failed");
return 1;
}
}
}
#endif
return(1);
}
#if SUPPORT_HEARTBEAT
gboolean
ccm_age_connect(int *ccm_fd)
{
gboolean did_fail = FALSE;
int ret = 0;
crm_debug("Registering with CCM");
ret = oc_ev_register(&ccm_token);
if (ret != 0) {
crm_warn("CCM registration failed");
did_fail = TRUE;
}
if(did_fail == FALSE) {
crm_debug("Setting up CCM callbacks");
ret = oc_ev_set_callback(ccm_token, OC_EV_MEMB_CLASS,
ccm_age_callback, NULL);
if (ret != 0) {
crm_warn("CCM callback not set");
did_fail = TRUE;
}
}
if(did_fail == FALSE) {
oc_ev_special(ccm_token, OC_EV_MEMB_CLASS, 0/*don't care*/);
crm_debug("Activating CCM token");
ret = oc_ev_activate(ccm_token, ccm_fd);
if (ret != 0){
crm_warn("CCM Activation failed");
did_fail = TRUE;
}
}
return !did_fail;
}
void
ccm_age_callback(oc_ed_t event, void *cookie, size_t size, const void *data)
{
int lpc;
int node_list_size;
const oc_ev_membership_t *oc = (const oc_ev_membership_t *)data;
node_list_size = oc->m_n_member;
if(command == 'q') {
crm_debug("Processing \"%s\" event.",
event==OC_EV_MS_NEW_MEMBERSHIP?"NEW MEMBERSHIP":
event==OC_EV_MS_NOT_PRIMARY?"NOT PRIMARY":
event==OC_EV_MS_PRIMARY_RESTORED?"PRIMARY RESTORED":
event==OC_EV_MS_EVICTED?"EVICTED":
"NO QUORUM MEMBERSHIP");
if(ccm_have_quorum(event)) {
fprintf(stdout, "1\n");
} else {
fprintf(stdout, "0\n");
}
} else if(command == 'e') {
crm_debug("Searching %d members for our birth", oc->m_n_member);
}
for(lpc=0; lpc<node_list_size; lpc++) {
if(command == 'p') {
fprintf(stdout, "%s ",
oc->m_array[oc->m_memb_idx+lpc].node_uname);
} else if(command == 'e') {
if(oc_ev_is_my_nodeid(ccm_token, &(oc->m_array[lpc]))){
crm_debug("MATCH: nodeid=%d, uname=%s, born=%d",
oc->m_array[oc->m_memb_idx+lpc].node_id,
oc->m_array[oc->m_memb_idx+lpc].node_uname,
oc->m_array[oc->m_memb_idx+lpc].node_born_on);
fprintf(stdout, "%d\n",
oc->m_array[oc->m_memb_idx+lpc].node_born_on);
}
}
}
oc_ev_callback_done(cookie);
if(command == 'p') {
fprintf(stdout, "\n");
}
fflush(stdout);
exit(0);
}
#endif
#if SUPPORT_AIS
void
ais_membership_destroy(gpointer user_data)
{
crm_err("AIS connection terminated");
ais_fd_sync = -1;
exit(1);
}
#endif
static gint member_sort(gconstpointer a, gconstpointer b)
{
const crm_node_t *node_a = a;
const crm_node_t *node_b = b;
return strcmp(node_a->uname, node_b->uname);
}
static void crm_add_member(
gpointer key, gpointer value, gpointer user_data)
{
GList **list = user_data;
crm_node_t *node = value;
if(node->uname != NULL) {
*list = g_list_insert_sorted(*list, node, member_sort);
}
}
gboolean
ais_membership_dispatch(AIS_Message *wrapper, char *data, int sender)
{
switch(wrapper->header.id) {
case crm_class_members:
case crm_class_notify:
case crm_class_quorum:
break;
default:
return TRUE;
break;
}
if(command == 'q') {
if(crm_have_quorum) {
fprintf(stdout, "1\n");
} else {
fprintf(stdout, "0\n");
}
} else if(command == 'l') {
GList *nodes = NULL;
g_hash_table_foreach(crm_peer_cache, crm_add_member, &nodes);
slist_iter(node, crm_node_t, nodes, lpc,
fprintf(stdout, "%u %s %s\n", node->id, node->uname, node->state);
);
fprintf(stdout, "\n");
} else if(command == 'p') {
GList *nodes = NULL;
g_hash_table_foreach(crm_peer_cache, crm_add_member, &nodes);
slist_iter(node, crm_node_t, nodes, lpc,
if(node->uname && crm_is_member_active(node)) {
fprintf(stdout, "%s ", node->uname);
}
);
fprintf(stdout, "\n");
}
exit(0);
return TRUE;
}
diff --git a/tools/crm.in b/tools/crm.in
index 4f937788c1..6b2f77cf34 100644
--- a/tools/crm.in
+++ b/tools/crm.in
@@ -1,7618 +1,7629 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Dejan Muhamedagic <dmuhamedagic@suse.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import shlex
import os
from tempfile import mkstemp
import subprocess
import sys
import time
import readline
import copy
import xml.dom.minidom
import signal
import re
import glob
def is_program(prog):
return subprocess.call("which %s >/dev/null 2>&1"%prog, shell=True) == 0
def prereqs():
proglist = "which cibadmin crm_resource crm_attribute crm_mon crm_standby crm_failcount"
for prog in proglist.split():
if not is_program(prog):
print >> sys.stderr, "%s not available, check your installation"%prog
sys.exit(1)
prereqs()
lineno = -1
regression_tests = False
class ErrorBuffer(object):
'''
Show error messages either immediately or buffered.
'''
def __init__(self):
self.msg_list = []
self.mode = "immediate"
def buffer(self):
self.mode = "keep"
def release(self):
if self.msg_list:
print >> sys.stderr, '\n'.join(self.msg_list)
if not batch:
try:
raw_input("Press enter to continue... ")
except EOFError:
pass
self.msg_list = []
self.mode = "immediate"
def writemsg(self,msg):
if self.mode == "immediate":
if regression_tests:
print msg
else:
print >> sys.stderr, msg
else:
self.msg_list.append(msg)
def error(self,s):
self.writemsg("ERROR: %s" % add_lineno(s))
def warning(self,s):
self.writemsg("WARNING: %s" % add_lineno(s))
def info(self,s):
self.writemsg("INFO: %s" % add_lineno(s))
def debug(self,s):
if user_prefs.get_debug():
self.writemsg("DEBUG: %s" % add_lineno(s))
err_buf = ErrorBuffer()
def add_lineno(s):
if lineno > 0:
return "%d: %s" % (lineno,s)
else: return s
def common_err(s):
err_buf.error(s)
def common_warn(s):
err_buf.warning(s)
def common_info(s):
err_buf.info(s)
def common_debug(s):
err_buf.debug(s)
def no_prog_err(name):
err_buf.error("%s not available, check your installation"%name)
def missing_prog_warn(name):
err_buf.warning("could not find any %s on the system"%name)
def no_attribute_err(attr,obj_type):
err_buf.error("required attribute %s not found in %s"%(attr,obj_type))
def bad_def_err(what,msg):
err_buf.error("bad %s definition: %s"%(what,msg))
def unsupported_err(name):
err_buf.error("%s is not supported"%name)
def no_such_obj_err(name):
err_buf.error("%s object is not supported"%name)
def obj_cli_err(name):
err_buf.error("object %s cannot be represented in the CLI notation"%name)
def missing_obj_err(node):
err_buf.error("object %s:%s missing (shouldn't have happened)"% \
(node.tagName,node.getAttribute("id")))
def constraint_norefobj_err(constraint_id,obj_id):
err_buf.error("constraint %s references a resource %s which doesn't exist"% \
(constraint_id,obj_id))
def obj_exists_err(name):
err_buf.error("object %s already exists"%name)
def no_object_err(name):
err_buf.error("object %s does not exist"%name)
def invalid_id_err(obj_id):
err_buf.error("%s: invalid object id"%obj_id)
def id_used_err(node_id):
err_buf.error("%s: id is already in use"%node_id)
def skill_err(s):
err_buf.error("%s: this command is not allowed at this skill level"%' '.join(s))
def syntax_err(s,token = '',context = ''):
pfx = "syntax"
if context:
pfx = "%s in %s" %(pfx,context)
if type(s) == type(''):
err_buf.error("%s near <%s>"%(pfx,s))
elif token:
err_buf.error("%s near <%s>: %s"%(pfx,token,' '.join(s)))
else:
err_buf.error("%s: %s"%(pfx,' '.join(s)))
def bad_usage(cmd,args):
err_buf.error("bad usage: %s %s"%(cmd,args))
def empty_cib_err():
err_buf.error("No CIB!")
def cib_parse_err(msg):
err_buf.error("%s"%msg)
def cib_no_elem_err(el_name):
err_buf.error("CIB contains no '%s' element!"%el_name)
def cib_ver_unsupported_err(validator,rel):
err_buf.error("CIB not supported: validator '%s', release '%s'"% (validator,rel))
err_buf.error("You may try the upgrade command")
def update_err(obj_id,cibadm_opt,xml):
if cibadm_opt == '-U':
task = "update"
elif cibadm_opt == '-D':
task = "delete"
else:
task = "replace"
err_buf.error("could not %s %s"%(task,obj_id))
err_buf.info("offending xml: %s" % xml)
def not_impl_info(s):
err_buf.info("%s is not implemented yet" % s)
def ask(msg):
print_msg = True
while True:
try:
ans = raw_input(msg + ' ')
except EOFError:
ans = 'n'
if not ans or ans[0].lower() not in ('n','y'):
if print_msg:
print "Please answer with y[es] or n[o]"
print_msg = False
else:
return ans[0].lower() == 'y'
def keyword_cmp(string1, string2):
return string1.lower() == string2.lower()
from UserDict import DictMixin
class odict(DictMixin):
def __init__(self, data=None, **kwdata):
self._keys = []
self._data = {}
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def __getitem__(self, key):
if key not in self._data:
return self._data[key.lower()]
return self._data[key]
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
def keys(self):
return list(self._keys)
def copy(self):
copyDict = odict()
copyDict._data = self._data.copy()
copyDict._keys = self._keys[:]
return copyDict
class olist(list):
def __init__(self, keys):
#print "Init %s" % (repr(keys))
super(olist, self).__init__()
for key in keys:
self.append(key)
self.append(key.upper())
def help_short(s):
r = re.search("help_[^,]+,(.*)\]\]", s)
return r and r.group(1) or ''
class HelpSystem(object):
'''
The help system. All help is in the following form in the
manual:
[[cmdhelp_<level>_<cmd>,<short help text>]]
=== ...
Long help text.
...
[[cmdhelp_<level>_<cmd>,<short help text>]]
Help for the level itself is like this:
[[cmdhelp_<level>,<short help text>]]
'''
- help_text_file = "@docdir@/crm_cli.txt"
+ help_text_file = "@datadir@/@PACKAGE@/crm_cli.txt"
index_file = "%s/%s" % (os.getenv("HOME"),".crm_help_index")
def __init__(self):
self.key_pos = {}
self.key_list = []
self.no_help_file = False # don't print repeatedly messages
self.bad_index = False # don't print repeatedly warnings for bad index
def open_file(self,name,mode):
try:
f = open(name,mode)
return f
except IOError,msg:
common_err("%s open: %s"%(name,msg))
common_err("extensive help system is not available")
self.no_help_file = True
return None
def drop_index(self):
common_info("removing index")
os.unlink(self.index_file)
self.key_pos = {}
self.key_list = []
self.bad_index = True
def mk_index(self):
'''
Prepare an index file, sorted by topic, with seek positions
Do we need a hash on content?
'''
if self.no_help_file:
return False
+ crm_help_v = os.getenv("CRM_HELP_FILE")
+ if crm_help_v:
+ self.help_text_file = crm_help_v
help_f = self.open_file(self.help_text_file,"r")
if not help_f:
return False
idx_f = self.open_file(self.index_file,"w")
if not idx_f:
return False
common_info("building help index")
key_pos = {}
while 1:
pos = help_f.tell()
s = help_f.readline()
if not s:
break
if s.startswith("[["):
r = re.search(r'..([^,]+),', s)
if r:
key_pos[r.group(1)] = pos
help_f.close()
l = key_pos.keys()
l.sort()
for key in l:
print >>idx_f, '%s %d' % (key,key_pos[key])
idx_f.close()
return True
def is_index_old(self):
try:
t_idx = os.path.getmtime(self.index_file)
except:
return True
try:
t_help = os.path.getmtime(self.help_text_file)
except:
return True
return t_help > t_idx
def load_index(self):
if self.is_index_old():
self.mk_index()
self.key_pos = {}
idx_f = self.open_file(self.index_file,"r")
if not idx_f:
return False
for s in idx_f:
a = s.split()
if len(a) != 2:
if not self.bad_index:
common_err("index file corrupt")
idx_f.close()
self.drop_index()
return self.load_index() # this runs only once
return False
self.key_pos[a[0]] = long(a[1])
idx_f.close()
self.key_list = self.key_pos.keys()
self.key_list.sort()
return True
def __filter(self,s):
if '<<' in s:
return re.sub(r'<<[^,]+,(.+)>>', r'\1', s)
else:
return s
def __find_key(self,key):
low = 0
high = len(self.key_list)-1
while low <= high:
mid = (low + high)/2
if self.key_list[mid] > key:
high = mid - 1
elif self.key_list[mid] < key:
low = mid + 1
else:
return mid
return -1
def __load_help_one(self,key,skip = 2):
longhelp = ''
self.help_f.seek(self.key_pos[key])
shorthelp = help_short(self.help_f.readline())
for i in range(skip-1):
self.help_f.readline()
l = []
for s in self.help_f:
if s.startswith("[[") or s.startswith("="):
break
l.append(self.__filter(s))
if l and l[-1] == '\n': # drop the last line of empty
l.pop()
if l:
longhelp = ''.join(l)
if not shorthelp or not longhelp:
if not self.bad_index:
common_warn("help topic %s not found" % key)
self.drop_index()
return shorthelp,longhelp
def cmdhelp(self,s):
if not self.key_pos and not self.load_index():
return None,None
if not s in self.key_pos:
if not self.bad_index:
common_warn("help topic %s not found" % s)
self.drop_index()
return None,None
return self.__load_help_one(s)
def __load_level(self,lvl):
'''
For the given level, create a help table.
'''
if wcache.is_cached("lvl_help_tab_%s" % lvl):
return wcache.retrieve("lvl_help_tab_%s" % lvl)
if not self.key_pos and not self.load_index():
return None
self.help_f = self.open_file(self.help_text_file,"r")
if not self.help_f:
return None
lvl_s = "cmdhelp_%s" % lvl
if not lvl_s in self.key_pos:
if not self.bad_index:
common_warn("help table for level %s not found" % lvl)
self.drop_index()
return None
common_debug("loading help table for level %s" % lvl)
help_tab = odict()
help_tab["."] = self.__load_help_one(lvl_s)
lvl_idx = self.__find_key(lvl_s)
lvl_idx += 1
while lvl_idx < len(self.key_list):
key = self.key_list[lvl_idx]
if not key.startswith(lvl_s):
break
cmd = key[len(lvl_s)+1:]
help_tab[cmd] = self.__load_help_one(key)
lvl_idx += 1
self.help_f.close()
help_tab["quit"] = ("exit the program", "")
help_tab["help"] = ("show help", "")
help_tab["end"] = ("go back one level", "")
return help_tab
def load_level(self,lvl):
help_tab = self.__load_level(lvl)
if self.bad_index: # try again
help_tab = self.__load_level(lvl)
return wcache.store("lvl_help_tab_%s" % lvl, help_tab)
# from: http://code.activestate.com/recipes/475116/
class TerminalController(object):
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print 'This is '+term.GREEN+'green'+term.NORMAL
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print term.render('This is ${GREEN}green${NORMAL}')
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print 'This terminal supports clearning the screen.'
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try: import curses
except:
common_info("no curses support: you won't see colors")
return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
def is_color(self, s):
try:
attr = getattr(self, s.upper())
return attr != None
except: return False
class CliDisplay(object):
"""
Display output for various syntax elements.
"""
def __init__(self):
self.no_pretty = False
def set_no_pretty(self):
self.no_pretty = True
def reset_no_pretty(self):
self.no_pretty = False
def colorstring(self, clrnum, s):
if self.no_pretty:
return s
else:
return termctrl.render("${%s}%s${NORMAL}" % \
(user_prefs.colorscheme[clrnum].upper(), s))
def keyword(self, kw):
s = kw
if "uppercase" in user_prefs.output:
s = s.upper()
if "color" in user_prefs.output:
s = self.colorstring(0, s)
return s
def otherword(self, n, s):
if "color" in user_prefs.output:
return self.colorstring(n, s)
else:
return s
def id(self, s):
return self.otherword(1, s)
def attr_name(self, s):
return self.otherword(2, s)
def attr_value(self, s):
return self.otherword(3, s)
def rscref(self, s):
return self.otherword(4, s)
def score(self, s):
return self.otherword(5, s)
global_aliases = {
"quit": ("bye","exit"),
"end": ("cd","up"),
}
def setup_aliases(obj):
for cmd in obj.cmd_aliases.keys():
for alias in obj.cmd_aliases[cmd]:
if obj.help_table:
obj.help_table[alias] = obj.help_table[cmd]
obj.cmd_table[alias] = obj.cmd_table[cmd]
#
# Resource Agents interface (meta-data, parameters, etc)
#
ocf_root = os.getenv("OCF_ROOT")
if not ocf_root:
ocf_root = "@OCF_ROOT_DIR@"
if not ocf_root:
ocf_root = "/usr/lib/ocf"
os.putenv("OCF_ROOT",ocf_root)
class RaLrmd(object):
'''
Getting information from the resource agents.
'''
lrmadmin_prog = "lrmadmin"
def __init__(self):
self.good = self.is_lrmd_accessible()
def lrmadmin(self, opts, xml = False):
'''
Get information directly from lrmd using lrmadmin.
'''
l = stdout2list("%s %s" % (self.lrmadmin_prog,opts))
if l and not xml:
l = l[1:] # skip the first line
return l
def is_lrmd_accessible(self):
if not (is_program(self.lrmadmin_prog) and is_process("lrmd")):
return False
return subprocess.call(\
add_sudo(">/dev/null 2>&1 %s -C" % self.lrmadmin_prog), \
shell=True) == 0
def meta(self, ra_class,ra_type,ra_provider):
return self.lrmadmin("-M %s %s %s"%(ra_class,ra_type,ra_provider),True)
def providers(self, ra_type,ra_class = "ocf"):
'List of providers for a class:type.'
return self.lrmadmin("-P %s %s" % (ra_class,ra_type),True)
def classes(self):
'List of providers for a class:type.'
return self.lrmadmin("-C")
def types(self, ra_class = "ocf", ra_provider = ""):
'List of types for a class.'
return self.lrmadmin("-T %s" % ra_class)
def os_types_list(path):
l = []
for f in glob.glob(path):
if os.access(f,os.X_OK) and os.path.isfile(f):
a = f.split("/")
l.append(a[-1])
return l
class RaOS(object):
'''
Getting information from the resource agents (direct).
'''
def __init__(self):
self.good = True
def meta(self, ra_class,ra_type,ra_provider):
l = []
if ra_class == "ocf":
l = stdout2list("%s/resource.d/%s/%s meta-data" % \
(ocf_root,ra_provider,ra_type))
elif ra_class == "stonith":
l = stdout2list("stonith -m -t %s" % ra_type)
return l
def providers(self, ra_type,ra_class = "ocf"):
'List of providers for a class:type.'
l = []
if ra_class == "ocf":
for s in glob.glob("%s/resource.d/*/%s" % (ocf_root,ra_type)):
a = s.split("/")
if len(a) == 7:
l.append(a[5])
return l
def classes(self):
'List of classes.'
return "heartbeat lsb ocf stonith".split()
def types(self, ra_class = "ocf", ra_provider = ""):
'List of types for a class.'
l = []
prov = ra_provider and ra_provider or "*"
if ra_class == "ocf":
l = os_types_list("%s/resource.d/%s/*" % (ocf_root,prov))
elif ra_class == "lsb":
l = os_types_list("/etc/init.d/*")
elif ra_class == "stonith":
l = stdout2list("stonith -L")
l = list(set(l))
l.sort()
return l
def ra_classes():
'''
List of RA classes.
'''
if wcache.is_cached("ra_classes"):
return wcache.retrieve("ra_classes")
l = ra_if.classes()
l.sort()
return wcache.store("ra_classes",l)
def ra_providers(ra_type,ra_class = "ocf"):
'List of providers for a class:type.'
id = "ra_providers-%s-%s" % (ra_class,ra_type)
if wcache.is_cached(id):
return wcache.retrieve(id)
l = ra_if.providers(ra_type,ra_class)
l.sort()
return wcache.store(id,l)
def ra_providers_all(ra_class = "ocf"):
'''
List of providers for a class.
'''
id = "ra_providers_all-%s" % ra_class
if wcache.is_cached(id):
return wcache.retrieve(id)
dir = ocf_root + "/resource.d"
l = []
for s in os.listdir(dir):
if os.path.isdir("%s/%s" % (dir,s)):
l.append(s)
l.sort()
return wcache.store(id,l)
def ra_types(ra_class = "ocf", ra_provider = ""):
'''
List of RA type for a class.
'''
if not ra_class:
ra_class = "ocf"
id = "ra_types-%s-%s" % (ra_class,ra_provider)
if wcache.is_cached(id):
return wcache.retrieve(id)
if ra_provider:
list = []
for ra in ra_if.types(ra_class):
if ra_provider in ra_providers(ra,ra_class):
list.append(ra)
else:
list = ra_if.types(ra_class)
list.sort()
return wcache.store(id,list)
def prog_meta(s):
'''
Do external program metadata.
'''
prog = "@CRM_DAEMON_DIR@/%s" % s
l = []
if is_program(prog):
l = stdout2list("%s metadata" % prog)
return l
def get_nodes_text(n,tag):
try:
node = n.getElementsByTagName(tag)[0]
for c in node.childNodes:
if c.nodeType == c.TEXT_NODE:
return c.data.strip()
except: return ''
def mk_monitor_name(role,depth):
depth = depth == "0" and "" or ("_%s" % depth)
return role and role != "Started" and \
"monitor_%s%s" % (role,depth) or \
"monitor%s" % depth
def monitor_name_node(node):
depth = node.getAttribute("depth") or '0'
role = node.getAttribute("role")
return mk_monitor_name(role,depth)
def monitor_name_pl(pl):
depth = find_value(pl, "depth") or '0'
role = find_value(pl, "role")
return mk_monitor_name(role,depth)
def crm_msec(t):
'''
See lib/common/utils.c:crm_get_msec().
'''
convtab = {
'ms': (1,1),
'msec': (1,1),
'us': (1,1000),
'usec': (1,1000),
'': (1000,1),
's': (1000,1),
'sec': (1000,1),
'm': (60*1000,1),
'min': (60*1000,1),
'h': (60*60*1000,1),
'hr': (60*60*1000,1),
}
if not t:
return -1
r = re.match("\s*(\d+)\s*([a-zA-Z]+)?", t)
if not r:
return -1
if not r.group(2):
q = ''
else:
q = r.group(2).lower()
try:
mult,div = convtab[q]
except:
return -1
return (int(r.group(1))*mult)/div
def crm_time_cmp(a, b):
return crm_msec(a) - crm_msec(b)
class RAInfo(object):
'''
A resource agent and whatever's useful about it.
'''
ra_tab = " " # four horses
required_ops = ("start", "stop")
skip_ops = ("meta-data", "validate-all")
skip_op_attr = ("name", "depth", "role")
def __init__(self,ra_class,ra_type,ra_provider = "heartbeat"):
self.ra_class = ra_class
self.ra_type = ra_type
self.ra_provider = ra_provider
if not self.ra_provider:
self.ra_provider = "heartbeat"
self.ra_node = None
def ra_string(self):
return self.ra_class == "ocf" and \
"%s:%s:%s" % (self.ra_class, self.ra_provider, self.ra_type) or \
"%s:%s" % (self.ra_class, self.ra_type)
def error(self, s):
common_err("%s: %s" % (self.ra_string(), s))
def warn(self, s):
common_warn("%s: %s" % (self.ra_string(), s))
def add_extra_stonith_params(self):
if not stonithd_metadata.mk_ra_node():
return
try:
params_node = self.doc.getElementsByTagName("parameters")[0]
except:
params_node = self.doc.createElement("parameters")
self.ra_node.appendChild(params_node)
for n in stonithd_metadata.ra_node.getElementsByTagName("parameter"):
params_node.appendChild(self.doc.importNode(n,1))
def mk_ra_node(self):
'''
Return the resource_agent node.
'''
if self.ra_node:
return self.ra_node
meta = self.meta()
try:
self.doc = xml.dom.minidom.parseString('\n'.join(meta))
except:
#common_err("could not parse meta-data for (%s,%s,%s)" \
# % (self.ra_class,self.ra_type,self.ra_provider))
self.ra_node = None
return None
try:
self.ra_node = self.doc.getElementsByTagName("resource-agent")[0]
except:
self.error("meta-data contains no resource-agent element")
self.ra_node = None
return None
if self.ra_class == "stonith":
self.add_extra_stonith_params()
return self.ra_node
def param_type_default(self,n):
try:
content = n.getElementsByTagName("content")[0]
type = content.getAttribute("type")
default = content.getAttribute("default")
return type,default
except:
return None,None
def params(self):
'''
Construct a dict of dicts: parameters are keys and
dictionary of attributes/values are values. Cached too.
'''
id = "ra_params-%s" % self.ra_string()
if wcache.is_cached(id):
return wcache.retrieve(id)
if not self.mk_ra_node():
return None
d = {}
for pset in self.ra_node.getElementsByTagName("parameters"):
for c in pset.getElementsByTagName("parameter"):
name = c.getAttribute("name")
if not name:
continue
required = c.getAttribute("required")
unique = c.getAttribute("unique")
type,default = self.param_type_default(c)
d[name] = {
"required": required,
"unique": unique,
"type": type,
"default": default,
}
return wcache.store(id,d)
def actions(self):
'''
Construct a dict of dicts: actions are keys and
dictionary of attributes/values are values. Cached too.
'''
id = "ra_actions-%s" % self.ra_string()
if wcache.is_cached(id):
return wcache.retrieve(id)
if not self.mk_ra_node():
return None
d = {}
for pset in self.ra_node.getElementsByTagName("actions"):
for c in pset.getElementsByTagName("action"):
name = c.getAttribute("name")
if not name or name in self.skip_ops:
continue
if name == "monitor":
name = monitor_name_node(c)
d[name] = {}
for a in c.attributes.keys():
if a in self.skip_op_attr:
continue
v = c.getAttribute(a)
if v:
d[name][a] = v
# add monitor ops without role, if they don't already
# exist
d2 = {}
for op in d.keys():
if re.match("monitor_[^0-9]", op):
norole_op = re.sub(r'monitor_[^0-9_]+_(.*)', r'monitor_\1', op)
if not norole_op in d:
d2[norole_op] = d[op]
d.update(d2)
return wcache.store(id,d)
def reqd_params_list(self):
'''
List of required parameters.
'''
d = self.params()
if not d: return []
return [x for x in d if d[x]["required"] == '1']
def param_default(self,pname):
'''
Parameter's default.
'''
d = self.params()
if not d: return None
return d[pname]["default"]
def sanity_check_params(self, id, pl):
'''
pl is a list of (attribute,value) pairs.
- are all required parameters defined
- do all parameters exist
'''
rc = 0
d = {}
for p,v in pl:
d[p] = v
for p in self.reqd_params_list():
if p not in d:
common_err("%s: required parameter %s not defined" % (id,p))
rc |= user_prefs.get_check_rc()
for p in d:
if p not in self.params():
common_err("%s: parameter %s does not exist" % (id,p))
rc |= user_prefs.get_check_rc()
return rc
def sanity_check_ops(self, id, ops):
'''
ops is a dict, operation names are keys and values are
lists of (attribute,value) pairs.
- do all operations exist
- are timeouts sensible
'''
rc = 0
n_ops = {}
for op in ops:
n_op = op == "monitor" and monitor_name_pl(ops[op]) or op
n_ops[n_op] = {}
for p,v in ops[op]:
if p in self.skip_op_attr:
continue
n_ops[n_op][p] = v
default_timeout = get_default("default-action-timeout")
for req_op in self.required_ops:
if req_op not in n_ops:
n_ops[req_op] = {}
for op in n_ops:
if op not in self.actions():
common_warn("%s: action %s not advertised in meta-data, it may not be supported by the RA" % (id,op))
rc |= 1
continue
try:
adv_timeout = self.actions()[op]["timeout"]
except:
continue
for a in n_ops[op]:
v = n_ops[op][a]
if a == "timeout":
if crm_msec(v) < 0:
continue
if crm_time_cmp(adv_timeout,v) > 0:
common_warn("%s: timeout %s for %s is smaller than the advised %s" % \
(id,v,op,adv_timeout))
rc |= 1
return rc
def meta(self):
'''
RA meta-data as raw xml.
'''
id = "ra_meta-%s" % self.ra_string()
if wcache.is_cached(id):
return wcache.retrieve(id)
if self.ra_class in ("pengine","stonithd"):
l = prog_meta(self.ra_class)
else:
l = ra_if.meta(self.ra_class,self.ra_type,self.ra_provider)
return wcache.store(id, l)
def meta_pretty(self):
'''
Print the RA meta-data in a human readable form.
'''
if not self.mk_ra_node():
return ''
l = []
title = self.meta_title()
l.append(title)
longdesc = get_nodes_text(self.ra_node,"longdesc")
if longdesc:
l.append(longdesc)
if self.ra_class != "heartbeat":
params = self.meta_parameters()
if params:
l.append(params.rstrip())
actions = self.meta_actions()
if actions:
l.append(actions)
return '\n\n'.join(l)
def get_shortdesc(self,n):
name = n.getAttribute("name")
shortdesc = get_nodes_text(n,"shortdesc")
longdesc = get_nodes_text(n,"longdesc")
if shortdesc and shortdesc not in (name,longdesc,self.ra_type):
return shortdesc
return ''
def meta_title(self):
s = self.ra_string()
shortdesc = self.get_shortdesc(self.ra_node)
if shortdesc:
s = "%s (%s)" % (shortdesc,s)
return s
def meta_param_head(self,n):
name = n.getAttribute("name")
if not name:
return None
s = name
if n.getAttribute("required") == "1":
s = s + "*"
type,default = self.param_type_default(n)
if type and default:
s = "%s (%s, [%s])" % (s,type,default)
elif type:
s = "%s (%s)" % (s,type)
shortdesc = self.get_shortdesc(n)
s = "%s: %s" % (s,shortdesc)
return s
def format_parameter(self,n):
l = []
head = self.meta_param_head(n)
if not head:
self.error("no name attribute for parameter")
return ""
l.append(head)
longdesc = get_nodes_text(n,"longdesc")
if longdesc:
longdesc = self.ra_tab + longdesc.replace("\n","\n"+self.ra_tab) + '\n'
l.append(longdesc)
return '\n'.join(l)
def meta_parameter(self,param):
if not self.mk_ra_node():
return ''
l = []
for pset in self.ra_node.getElementsByTagName("parameters"):
for c in pset.getElementsByTagName("parameter"):
if c.getAttribute("name") == param:
return self.format_parameter(c)
def meta_parameters(self):
if not self.mk_ra_node():
return ''
l = []
for pset in self.ra_node.getElementsByTagName("parameters"):
for c in pset.getElementsByTagName("parameter"):
s = self.format_parameter(c)
if s:
l.append(s)
if l:
return "Parameters (* denotes required, [] the default):\n\n" + '\n'.join(l)
def meta_action_head(self,n):
name = n.getAttribute("name")
if not name:
return ''
if name in self.skip_ops:
return ''
if name == "monitor":
name = monitor_name_node(n)
s = "%-13s" % name
for a in n.attributes.keys():
if a in self.skip_op_attr:
continue
v = n.getAttribute(a)
if v:
s = "%s %s=%s" % (s,a,v)
return s
def meta_actions(self):
l = []
for aset in self.ra_node.getElementsByTagName("actions"):
for c in aset.getElementsByTagName("action"):
s = self.meta_action_head(c)
if s:
l.append(self.ra_tab + s)
if l:
return "Operations' defaults (advisory minimum):\n\n" + '\n'.join(l)
def cmd_end(cmd,dir = ".."):
"Go up one level."
levels.droplevel()
def cmd_exit(cmd):
"Exit the crm program"
cmd_end(cmd)
if interactive:
print "bye"
try:
readline.write_history_file(hist_file)
except:
pass
for f in tmpfiles:
os.unlink(f)
sys.exit()
#
# help or make users feel less lonely
#
def add_shorthelp(topic,shorthelp,topic_help):
'''
Join topics ("%s,%s") if they share the same short
description.
'''
for i in range(len(topic_help)):
if topic_help[i][1] == shorthelp:
topic_help[i][0] = "%s,%s" % (topic_help[i][0], topic)
return
topic_help.append([topic, shorthelp])
def dump_short_help(help_tab):
topic_help = []
for topic in help_tab:
if topic == '.':
continue
# with odict, for whatever reason, python parses differently:
# help_tab["..."] = ("...","...") and
# help_tab["..."] = ("...","""
# ...""")
# a parser bug?
if type(help_tab[topic][0]) == type(()):
shorthelp = help_tab[topic][0][0]
else:
shorthelp = help_tab[topic][0]
add_shorthelp(topic,shorthelp,topic_help)
for t,d in topic_help:
print "\t%-16s %s" % (t,d)
def overview(help_tab):
print ""
print help_tab['.'][1]
print ""
print "Available commands:"
print ""
dump_short_help(help_tab)
print ""
def topic_help(help_tab,topic):
if topic not in help_tab:
print "There is no help for topic %s" % topic
return
if type(help_tab[topic][0]) == type(()):
shorthelp = help_tab[topic][0][0]
longhelp = help_tab[topic][0][1]
else:
shorthelp = help_tab[topic][0]
longhelp = help_tab[topic][1]
if longhelp:
page_string(longhelp)
else:
print shorthelp
def cmd_help(help_tab,topic = ''):
"help!"
# help_tab is an odict (ordered dictionary):
# help_tab[topic] = (short_help,long_help)
# topic '.' is a special entry for the top level
if not help_tab:
common_info("sorry, help not available")
return
if not topic:
overview(help_tab)
else:
topic_help(help_tab,topic)
class UserInterface(object):
'''
Stuff common to all user interface classes.
'''
def __init__(self):
self.cmd_table = odict()
self.cmd_table["help"] = (self.help,(0,1),0)
self.cmd_table["quit"] = (self.exit,(0,0),0)
self.cmd_table["end"] = (self.end,(0,1),0)
self.cmd_aliases = global_aliases.copy()
def end_game(self, no_questions_asked = False):
pass
def help(self,cmd,topic = ''):
"usage: help [<topic>]"
cmd_help(self.help_table,topic)
def end(self,cmd,dir = ".."):
"usage: end"
self.end_game()
cmd_end(cmd,dir)
def exit(self,cmd):
"usage: exit"
self.end_game()
cmd_exit(cmd)
def add_sudo(cmd):
if user_prefs.crm_user:
return "sudo -E -u %s %s"%(user_prefs.crm_user,cmd)
return cmd
def pipe_string(cmd,s):
rc = -1 # command failed
cmd = add_sudo(cmd)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
p.communicate(s)
p.wait()
rc = p.returncode
except IOError, msg:
common_err(msg)
return rc
def cibdump2doc(section = None):
doc = None
if section:
cmd = "%s -o %s" % (cib_dump,section)
else:
cmd = cib_dump
cmd = add_sudo(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
try:
doc = xmlparse(p.stdout)
p.wait()
except IOError, msg:
common_err(msg)
return None
return doc
def file2doc(s):
try: f = open(s,'r')
except IOError, msg:
common_err(msg)
return None
doc = xmlparse(f)
f.close()
return doc
def shadow2doc(name):
return file2doc(shadowfile(name))
def str2tmp(s):
'''
Write the given string to a temporary file. Return the name
of the file.
'''
fd,tmp = mkstemp()
try: f = os.fdopen(fd,"w")
except IOError, msg:
common_err(msg)
return
f.write(s)
f.close()
return tmp
def is_filename_sane(name):
if re.search("['`/#*?$\[\]]",name):
common_err("%s: bad name"%name)
return False
return True
def is_name_sane(name):
if re.search("[']",name):
common_err("%s: bad name"%name)
return False
return True
def is_value_sane(name):
if re.search("[']",name):
common_err("%s: bad name"%name)
return False
return True
def ext_cmd(cmd):
if regression_tests:
print ".EXT", cmd
return subprocess.call(add_sudo(cmd), shell=True)
def get_stdout(cmd, stderr_on = True):
'''
Run a cmd, return stdin output.
stderr_on controls whether to show output which comes on stderr.
'''
if stderr_on:
stderr = None
else:
stderr = subprocess.PIPE
proc = subprocess.Popen(cmd, shell = True, \
stdout = subprocess.PIPE, stderr = stderr)
outp = proc.communicate()[0]
proc.wait()
outp = outp.strip()
return outp
def stdout2list(cmd, stderr_on = True):
'''
Run a cmd, fetch output, return it as a list of lines.
stderr_on controls whether to show output which comes on stderr.
'''
s = get_stdout(add_sudo(cmd), stderr_on)
return s.split('\n')
def find_program(envvar,*args):
if envvar and os.getenv(envvar):
return os.getenv(envvar)
for prog in args:
if is_program(prog):
return prog
def is_id_valid(id):
"""
Verify that the id follows the definition:
http://www.w3.org/TR/1999/REC-xml-names-19990114/#ns-qualnames
"""
if not id:
return False
id_re = "^[A-Za-z_][\w._-]*$"
return re.match(id_re,id)
def check_filename(fname):
"""
Verify that the string is a filename.
"""
fname_re = "^[^/]+$"
return re.match(fname_re,id)
class UserPrefs(object):
'''
Keep user preferences here.
'''
dflt_colorscheme = "yellow,normal,cyan,red,green,magenta".split(',')
skill_levels = {"operator":0, "administrator":1, "expert":2}
output_types = ("plain", "color", "uppercase")
check_frequencies = ("always", "on-verify", "never")
check_modes = ("strict", "relaxed")
def __init__(self):
self.skill_level = 2 #TODO: set back to 0?
self.editor = find_program("EDITOR","vim","vi","emacs","nano")
self.pager = find_program("PAGER","less","more","pg")
self.dotty = find_program("","dotty")
if not self.editor:
missing_prog_warn("editor")
if not self.pager:
missing_prog_warn("pager")
self.crm_user = ""
self.xmlindent = " " # two spaces
# keywords,ids,attribute names,values
self.colorscheme = self.dflt_colorscheme
# plain or color
self.output = ['color',]
# the semantic checks preferences
self.check_frequency = "always"
self.check_mode = "strict"
self.debug = False
self.force = False
def check_skill_level(self,n):
return self.skill_level >= n
def set_skill_level(self,skill_level):
if skill_level in self.skill_levels:
self.skill_level = self.skill_levels[skill_level]
else:
common_err("no %s skill level"%skill_level)
return False
def get_skill_level(self):
for s in self.skill_levels:
if self.skill_level == self.skill_levels[s]:
return s
def set_editor(self,prog):
if is_program(prog):
self.editor = prog
else:
common_err("program %s does not exist"% prog)
return False
def set_pager(self,prog):
if is_program(prog):
self.pager = prog
else:
common_err("program %s does not exist"% prog)
return False
def set_crm_user(self,user = ''):
self.crm_user = user
def set_output(self,otypes):
l = otypes.split(',')
for otype in l:
if not otype in self.output_types:
common_err("no %s output type" % otype)
return False
self.output = l
def set_colors(self,scheme):
colors = scheme.split(',')
if len(colors) != 6:
common_err("bad color scheme: %s"%scheme)
colors = UserPrefs.dflt_colorscheme
rc = True
for c in colors:
if not termctrl.is_color(c):
common_err("%s is not a recognized color" % c)
rc = False
if rc:
self.colorscheme = colors
else:
self.output.remove("color")
return rc
def is_check_always(self):
'''
Even though the frequency may be set to always, it doesn't
make sense to do that with non-interactive sessions.
'''
return interactive and self.check_frequency == "always"
def get_check_rc(self):
'''
If the check mode is set to strict, then on errors we
return 2 which is the code for error. Otherwise, we
pretend that errors are warnings.
'''
return self.check_mode == "strict" and 2 or 1
def set_check_freq(self,frequency):
if frequency not in self.check_frequencies:
common_err("no %s check frequency"%frequency)
return False
self.check_frequency = frequency
def set_check_mode(self,mode):
if mode not in self.check_modes:
common_err("no %s check mode"%mode)
return False
self.check_mode = mode
def set_debug(self):
self.debug = True
def get_debug(self):
return self.debug
def set_force(self):
self.force = True
def get_force(self):
return self.force
def write_rc(self,f):
print >>f, '%s "%s"' % ("editor",self.editor)
print >>f, '%s "%s"' % ("pager",self.pager)
print >>f, '%s "%s"' % ("user",self.crm_user)
print >>f, '%s "%s"' % ("skill-level",self.get_skill_level())
print >>f, '%s "%s"' % ("output", ','.join(self.output))
print >>f, '%s "%s"' % ("colorscheme", ','.join(self.colorscheme))
print >>f, '%s "%s"' % ("check-frequency",self.check_frequency)
print >>f, '%s "%s"' % ("check-mode",self.check_mode)
def save_options(self):
try: f = open(rc_file,"w")
- except os.error,msg:
+ except IOError,msg:
common_err("open: %s"%msg)
return
print >>f, 'options'
self.write_rc(f)
print >>f, 'end'
f.close()
class CliOptions(UserInterface):
'''
Manage user preferences
'''
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("options")
self.cmd_table["skill-level"] = (self.set_skill_level,(1,1),0,(skills_list,))
self.cmd_table["editor"] = (self.set_editor,(1,1),0)
self.cmd_table["pager"] = (self.set_pager,(1,1),0)
self.cmd_table["user"] = (self.set_crm_user,(0,1),0)
self.cmd_table["output"] = (self.set_output,(1,1),0)
self.cmd_table["colorscheme"] = (self.set_colors,(1,1),0)
self.cmd_table["check-frequency"] = (self.set_check_frequency,(1,1),0)
self.cmd_table["check-mode"] = (self.set_check_mode,(1,1),0)
self.cmd_table["save"] = (self.save_options,(0,0),0)
self.cmd_table["show"] = (self.show_options,(0,0),0)
setup_aliases(self)
def set_skill_level(self,cmd,skill_level):
"""usage: skill-level <level>
level: operator | administrator | expert"""
return user_prefs.set_skill_level(skill_level)
def set_editor(self,cmd,prog):
"usage: editor <program>"
return user_prefs.set_editor(prog)
def set_pager(self,cmd,prog):
"usage: pager <program>"
return user_prefs.set_pager(prog)
def set_crm_user(self,cmd,user = ''):
"usage: user [<crm_user>]"
return user_prefs.set_crm_user(user)
def set_output(self,cmd,otypes):
"usage: output <type>"
return user_prefs.set_output(otypes)
def set_colors(self,cmd,scheme):
"usage: colorscheme <colors>"
return user_prefs.set_colors(scheme)
def set_check_frequency(self,cmd,freq):
"usage: check-frequence <freq>"
return user_prefs.set_check_freq(freq)
def set_check_mode(self,cmd,mode):
"usage: check-mode <mode>"
return user_prefs.set_check_mode(mode)
def show_options(self,cmd):
"usage: show"
return user_prefs.write_rc(sys.stdout)
def save_options(self,cmd):
"usage: save"
return user_prefs.save_options()
def end_game(self, no_questions_asked = False):
if no_questions_asked and not interactive:
self.save_options("save")
cib_dump = "cibadmin -Ql"
cib_piped = "cibadmin -p"
cib_upgrade = "cibadmin --upgrade --force"
cib_verify = "crm_verify -V -p"
class WCache(object):
"Cache stuff. A naive implementation."
def __init__(self):
self.lists = {}
self.stamp = time.time()
self.max_cache_age = 600 # seconds
def is_cached(self,name):
if time.time() - self.stamp > self.max_cache_age:
self.stamp = time.time()
self.clear()
return name in self.lists
def store(self,name,lst):
self.lists[name] = lst
return lst
def retrieve(self,name):
if self.is_cached(name):
return self.lists[name]
else:
return None
def clear(self):
self.lists = {}
def listshadows():
return stdout2list("ls @CRM_CONFIG_DIR@ | fgrep shadow. | sed 's/^shadow\.//'")
def shadowfile(name):
return "@CRM_CONFIG_DIR@/shadow.%s" % name
class CibShadow(UserInterface):
'''
CIB shadow management class
'''
envvar = "CIB_shadow"
extcmd = ">/dev/null </dev/null crm_shadow"
extcmd_stdout = "</dev/null crm_shadow"
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("cib")
self.cmd_table["new"] = (self.new,(1,3),1)
self.cmd_table["delete"] = (self.delete,(1,1),1,(shadows_list,))
self.cmd_table["reset"] = (self.reset,(1,1),1,(shadows_list,))
self.cmd_table["commit"] = (self.commit,(1,1),1,(shadows_list,))
self.cmd_table["use"] = (self.use,(0,2),1,(shadows_live_list,))
self.cmd_table["diff"] = (self.diff,(0,0),1)
self.cmd_table["list"] = (self.list,(0,0),1)
self.cmd_table["cibstatus"] = StatusMgmt
self.chkcmd()
setup_aliases(self)
def chkcmd(self):
try:
ext_cmd("%s 2>&1" % self.extcmd)
except os.error:
no_prog_err(self.extcmd)
return False
return True
def new(self,cmd,name,*args):
"usage: new <shadow_cib> [withstatus] [force]"
if not is_filename_sane(name):
return False
new_cmd = "%s -c '%s'" % (self.extcmd,name)
for par in args:
if not par in ("force","--force","withstatus"):
syntax_err((cmd,name,par), context = 'new')
return False
if user_prefs.get_force() or "force" in args or "--force" in args:
new_cmd = "%s --force" % new_cmd
if ext_cmd(new_cmd) == 0:
common_info("%s shadow CIB created"%name)
self.use("use",name)
if "withstatus" in args:
cib_status.load("shadow:%s" % name)
def delete(self,cmd,name):
"usage: delete <shadow_cib>"
if not is_filename_sane(name):
return False
if cib_in_use == name:
common_err("%s shadow CIB is in use"%name)
return False
if ext_cmd("%s -D '%s' --force" % (self.extcmd,name)) == 0:
common_info("%s shadow CIB deleted"%name)
else:
common_err("failed to delete %s shadow CIB"%name)
return False
def reset(self,cmd,name):
"usage: reset <shadow_cib>"
if not is_filename_sane(name):
return False
if ext_cmd("%s -r '%s'" % (self.extcmd,name)) == 0:
common_info("copied live CIB to %s"%name)
else:
common_err("failed to copy live CIB to %s"%name)
return False
def commit(self,cmd,name):
"usage: commit <shadow_cib>"
if not is_filename_sane(name):
return False
if ext_cmd("%s -C '%s' --force" % (self.extcmd,name)) == 0:
common_info("commited '%s' shadow CIB to the cluster"%name)
wcache.clear()
else:
common_err("failed to commit the %s shadow CIB"%name)
return False
def diff(self,cmd):
"usage: diff"
s = get_stdout(add_sudo("%s -d" % self.extcmd_stdout))
page_string(s)
def list(self,cmd):
"usage: list"
if regression_tests:
for t in listshadows():
print t
else:
multicolumn(listshadows())
def _use(self,name,withstatus):
# Choose a shadow cib for further changes. If the name
# provided is empty, then choose the live (cluster) cib.
# Don't allow ' in shadow names
global cib_in_use
if not name or name == "live":
os.unsetenv(self.envvar)
cib_in_use = ""
if withstatus:
cib_status.load("live")
else:
os.putenv(self.envvar,name)
cib_in_use = name
if withstatus:
cib_status.load("shadow:%s" % name)
def use(self,cmd,name = '', withstatus = ''):
"usage: use [<shadow_cib>] [withstatus]"
# check the name argument
if name and not is_filename_sane(name):
return False
if name and name != "live":
if not os.access(shadowfile(name),os.F_OK):
common_err("%s: no such shadow CIB"%name)
return False
if withstatus and withstatus != "withstatus":
syntax_err((cmd,withstatus), context = 'use')
return False
# If invoked from configure
# take special precautions
try:
prev_level = levels.previous().myname()
except:
prev_level = ''
if prev_level != "cibconfig":
self._use(name,withstatus)
return True
if not cib_factory.has_cib_changed():
self._use(name,withstatus)
# new CIB: refresh the CIB factory
cib_factory.refresh()
return True
saved_cib = cib_in_use
self._use(name,'') # don't load the status yet
if not cib_factory.is_current_cib_equal(silent = True):
# user made changes and now wants to switch to a
# different and unequal CIB; we refuse to cooperate
common_err("the requested CIB is different from the current one")
if user_prefs.get_force():
common_info("CIB overwrite forced")
elif not ask("All changes will be dropped. Do you want to proceed?"):
self._use(saved_cib,'') # revert to the previous CIB
return False
self._use(name,withstatus) # now load the status too
return True
def get_var(l,key):
for s in l:
a = s.split()
if len(a) == 2 and a[0] == key:
return a[1]
return ''
def chk_var(l,key):
for s in l:
a = s.split()
if len(a) == 2 and a[0] == key and a[1]:
return True
return False
def chk_key(l,key):
for s in l:
a = s.split()
if len(a) >= 1 and a[0] == key:
return True
return False
def validate_template(l):
'Test for required stuff in a template.'
if not chk_var(l,'%name'):
common_err("invalid template: missing '%name'")
return False
if not chk_key(l,'%generate'):
common_err("invalid template: missing '%generate'")
return False
g = l.index('%generate')
if not (chk_key(l[0:g],'%required') or chk_key(l[0:g],'%optional')):
common_err("invalid template: missing '%required' or '%optional'")
return False
return True
def fix_tmpl_refs(l,id,pfx):
for i in range(len(l)):
l[i] = l[i].replace(id,pfx)
def fix_tmpl_refs_re(l,regex,repl):
for i in range(len(l)):
l[i] = re.sub(regex,repl,l[i])
class LoadTemplate(object):
'''
Load a template and its dependencies, generate a
configuration file which should be relatively easy and
straightforward to parse.
'''
edit_instructions = '''# Edit instructions:
#
# Add content only at the end of lines starting with '%%'.
# Only add content, don't remove or replace anything.
# The parameters following '%required' are not optional,
# unlike those following '%optional'.
# You may also add comments for future reference.'''
no_more_edit = '''# Don't edit anything below this line.'''
def __init__(self,name):
self.name = name
self.all_pre_gen = []
self.all_post_gen = []
self.all_pfx = []
def new_pfx(self,name):
i = 1
pfx = name
while pfx in self.all_pfx:
pfx = "%s_%d" % (name,i)
i += 1
self.all_pfx.append(pfx)
return pfx
def generate(self):
return '\n'.join([ \
"# Configuration: %s" % self.name, \
'', \
self.edit_instructions, \
'', \
'\n'.join(self.all_pre_gen), \
self.no_more_edit, \
'', \
'%generate', \
'\n'.join(self.all_post_gen)])
def write_config(self,name):
try:
f = open("%s/%s" % (Template.conf_dir, name),"w")
- except os.error,msg:
+ except IOError,msg:
common_err("open: %s"%msg)
return False
print >>f, self.generate()
f.close()
return True
def load_template(self,tmpl):
try:
f = open("%s/%s" % (Template.tmpl_dir, tmpl))
- except os.error,msg:
+ except IOError,msg:
common_err("open: %s"%msg)
return ''
l = (''.join(f)).split('\n')
if not validate_template(l):
return ''
common_info("pulling in template %s" % tmpl)
g = l.index('%generate')
pre_gen = l[0:g]
post_gen = l[g+1:]
name = get_var(pre_gen,'%name')
for s in l[0:g]:
if s.startswith('%depends_on'):
a = s.split()
if len(a) != 2:
common_warn("%s: wrong usage" % s)
continue
tmpl_id = a[1]
tmpl_pfx = self.load_template(a[1])
if tmpl_pfx:
fix_tmpl_refs(post_gen,'%'+tmpl_id,'%'+tmpl_pfx)
pfx = self.new_pfx(name)
fix_tmpl_refs(post_gen, '%_:', '%'+pfx+':')
# replace remaining %_, it may be useful at times
fix_tmpl_refs(post_gen, '%_', pfx)
v_idx = pre_gen.index('%required') or pre_gen.index('%optional')
pre_gen.insert(v_idx,'%pfx ' + pfx)
self.all_pre_gen += pre_gen
self.all_post_gen += post_gen
return pfx
def post_process(self, params):
pfx_re = '(%s)' % '|'.join(self.all_pfx)
for n in params:
fix_tmpl_refs(self.all_pre_gen, '%% '+n, "%% "+n+" "+params[n])
fix_tmpl_refs_re(self.all_post_gen, \
'%'+pfx_re+'([^:]|$)', r'\1\2')
# process %if ... [%else] ... %fi
rmidx_l = []
if_seq = False
for i in range(len(self.all_post_gen)):
s = self.all_post_gen[i]
if if_seq:
a = s.split()
if len(a) >= 1 and a[0] == '%fi':
if_seq = False
rmidx_l.append(i)
elif len(a) >= 1 and a[0] == '%else':
outcome = not outcome
rmidx_l.append(i)
else:
if not outcome:
rmidx_l.append(i)
continue
if not s:
continue
a = s.split()
if len(a) == 2 and a[0] == '%if':
outcome = not a[1].startswith('%') # not replaced -> false
if_seq = True
rmidx_l.append(i)
rmidx_l.reverse()
for i in rmidx_l:
del self.all_post_gen[i]
def listtemplates():
l = []
for f in os.listdir(Template.tmpl_dir):
if os.path.isfile("%s/%s" % (Template.tmpl_dir,f)):
l.append(f)
return l
def listconfigs():
l = []
for f in os.listdir(Template.conf_dir):
if os.path.isfile("%s/%s" % (Template.conf_dir,f)):
l.append(f)
return l
def check_transition(inp,state,possible_l):
if not state in possible_l:
common_err("input (%s) in wrong state %s" % (inp,state))
return False
return True
class Template(UserInterface):
'''
Configuration templates.
'''
conf_dir = "%s/%s" % (os.getenv("HOME"),".crmconf")
tmpl_dir = "@datadir@/@PACKAGE@/templates"
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("template")
self.cmd_table["new"] = (self.new,(2,),1,(null_list,templates_list,loop))
self.cmd_table["load"] = (self.load,(0,1),1,(config_list,))
self.cmd_table["edit"] = (self.edit,(0,1),1,(config_list,))
self.cmd_table["delete"] = (self.delete,(1,2),1,(config_list,))
self.cmd_table["show"] = (self.show,(0,1),0,(config_list,))
self.cmd_table["apply"] = (self.apply,(0,2),1,(config_list_method,config_list))
self.cmd_table["list"] = (self.list,(0,1),0)
setup_aliases(self)
self.init_dir()
self.curr_conf = ''
def init_dir(self):
'''Create the conf directory, link to templates'''
if not os.path.isdir(self.conf_dir):
try:
os.makedirs(self.conf_dir)
except os.error,msg:
common_err("makedirs: %s"%msg)
return
def get_depends(self,tmpl):
'''return a list of required templates'''
# Not used. May need it later.
try:
tf = open("%s/%s" % (self.tmpl_dir, tmpl),"r")
- except os.error,msg:
+ except IOError,msg:
common_err("open: %s"%msg)
return
l = []
for s in tf:
a = s.split()
if len(a) >= 2 and a[0] == '%depends_on':
l += a[1:]
tf.close()
return l
def replace_params(self,s,user_data):
change = False
for i in range(len(s)):
word = s[i]
for p in user_data:
# is parameter in the word?
pos = word.find('%' + p)
if pos < 0:
continue
endpos = pos + len('%' + p)
# and it isn't part of another word?
if re.match("[A-Za-z0-9]", word[endpos:endpos+1]):
continue
# if the value contains a space or
# it is a value of an attribute
# put quotes around it
if user_data[p].find(' ') >= 0 or word[pos-1:pos] == '=':
v = '"' + user_data[p] + '"'
else:
v = user_data[p]
word = word.replace('%' + p, v)
change = True # we did replace something
if change:
s[i] = word
if 'opt' in s:
if not change:
s = []
else:
s.remove('opt')
return s
def generate(self,l,user_data):
'''replace parameters (user_data) and generate output
'''
l2 = []
for piece in l:
piece2 = []
for s in piece:
s = self.replace_params(s,user_data)
if s:
piece2.append(' '.join(s))
if piece2:
l2.append(' \\\n\t'.join(piece2))
return '\n'.join(l2)
def process(self,config = ''):
'''Create a cli configuration from the current config'''
try:
f = open("%s/%s" % (self.conf_dir, config or self.curr_conf),'r')
- except os.error,msg:
+ except IOError,msg:
common_err("open: %s"%msg)
return ''
l = []
piece = []
user_data = {}
# states
START = 0; PFX = 1; DATA = 2; GENERATE = 3
state = START
global lineno
save_lineno = lineno
lineno = 0
rc = True
for inp in f:
lineno += 1
if inp.startswith('#'):
continue
if type(inp) == type(u''):
inp = inp.encode('ascii')
inp = inp.strip()
try:
s = shlex.split(inp)
except ValueError, msg:
common_err(msg)
continue
while '\n' in s:
s.remove('\n')
if not s:
if state == GENERATE and piece:
l.append(piece)
piece = []
elif s[0] in ("%name","%depends_on","%suggests"):
continue
elif s[0] == "%pfx":
if check_transition(inp,state,(START,DATA)) and len(s) == 2:
pfx = s[1]
state = PFX
elif s[0] == "%required":
if check_transition(inp,state,(PFX,)):
state = DATA
data_reqd = True
elif s[0] == "%optional":
if check_transition(inp,state,(PFX,DATA)):
state = DATA
data_reqd = False
elif s[0] == "%%":
if state != DATA:
common_warn("user data in wrong state %s" % state)
if len(s) < 2:
common_warn("parameter name missing")
elif len(s) == 2:
if data_reqd:
common_err("required parameter %s not set" % s[1])
rc = False
elif len(s) == 3:
user_data["%s:%s" % (pfx,s[1])] = s[2]
else:
common_err("%s: syntax error" % inp)
elif s[0] == "%generate":
if check_transition(inp,state,(DATA,)):
state = GENERATE
piece = []
elif state == GENERATE:
if s:
piece.append(s)
else:
common_err("<%s> unexpected" % inp)
if piece:
l.append(piece)
lineno = save_lineno
f.close()
if not rc:
return ''
return self.generate(l,user_data)
def new(self,cmd,name,*args):
"usage: new <config> <template> [<template> ...] [params name=value ...]"
if not is_filename_sane(name):
return False
if os.path.isfile("%s/%s" % (self.conf_dir, name)):
common_err("config %s exists; delete it first" % name)
return False
lt = LoadTemplate(name)
rc = True
mode = 0
params = {}
for s in args:
if mode == 0 and s == "params":
params["id"] = name
mode = 1
elif mode == 1:
a = s.split('=')
if len(a) != 2:
syntax_err(args, context = 'new')
rc = False
else:
params[a[0]] = a[1]
elif not lt.load_template(s):
rc = False
if rc:
lt.post_process(params)
if not rc or not lt.write_config(name):
return False
self.curr_conf = name
def config_exists(self,name):
if not is_filename_sane(name):
return False
if not os.path.isfile("%s/%s" % (self.conf_dir, name)):
common_err("%s: no such config" % name)
return False
return True
def delete(self,cmd,name,force = ''):
"usage: delete <config> [force]"
if force:
if force != "force" and force != "--force":
syntax_err((cmd,force), context = 'delete')
return False
if not self.config_exists(name):
return False
if name == self.curr_conf:
if not force and not user_prefs.get_force() and \
not ask("Do you really want to remove config %s which is in use?" % self.curr_conf):
return False
else:
self.curr_conf = ''
os.remove("%s/%s" % (self.conf_dir, name))
def load(self,cmd,name = ''):
"usage: load [<config>]"
if not name:
self.curr_conf = ''
return True
if not self.config_exists(name):
return False
self.curr_conf = name
def edit(self,cmd,name = ''):
"usage: edit [<config>]"
if not name and not self.curr_conf:
common_err("please load a config first")
return False
if name:
if not self.config_exists(name):
return False
edit_file("%s/%s" % (self.conf_dir, name))
else:
edit_file("%s/%s" % (self.conf_dir, self.curr_conf))
def show(self,cmd,name = ''):
"usage: show [<config>]"
if not name and not self.curr_conf:
common_err("please load a config first")
return False
if name:
if not self.config_exists(name):
return False
print self.process(name)
else:
print self.process()
def apply(self,cmd,*args):
"usage: apply [<method>] [<config>]"
method = "replace"
name = ''
if len(args) > 0:
i = 0
if args[0] in ("replace","update"):
method = args[0]
i += 1
if len(args) > i:
name = args[i]
if not name and not self.curr_conf:
common_err("please load a config first")
return False
if name:
if not self.config_exists(name):
return False
s = self.process(name)
else:
s = self.process()
if not s:
return False
tmp = str2tmp(s)
if not tmp:
return False
set_obj = mkset_obj("NOOBJ")
- set_obj.import_file(method,tmp)
+ rc = set_obj.import_file(method,tmp)
try: os.unlink(tmp)
except: pass
+ return rc
def list(self,cmd,templates = ''):
"usage: list [templates]"
if templates == "templates":
multicolumn(listtemplates())
else:
multicolumn(listconfigs())
def manage_attr(cmd,attr_ext_commands,*args):
if len(args) < 3:
bad_usage(cmd,' '.join(args))
return False
attr_cmd = None
try:
attr_cmd = attr_ext_commands[args[1]]
except KeyError:
bad_usage(cmd,' '.join(args))
return False
if not attr_cmd:
bad_usage(cmd,' '.join(args))
return False
if args[1] == 'set':
if len(args) == 4:
if not is_name_sane(args[0]) \
or not is_name_sane(args[2]) \
or not is_value_sane(args[3]):
return False
return ext_cmd(attr_cmd%(args[0],args[2],args[3])) == 0
else:
bad_usage(cmd,' '.join(args))
return False
elif args[1] in ('delete','show'):
if len(args) == 3:
if not is_name_sane(args[0]) \
or not is_name_sane(args[2]):
return False
return ext_cmd(attr_cmd%(args[0],args[2])) == 0
else:
bad_usage(cmd,' '.join(args))
return False
else:
bad_usage(cmd,' '.join(args))
return False
def resources_xml():
if wcache.is_cached("rsc_xml"):
return wcache.retrieve("rsc_xml")
doc = cibdump2doc("resources")
if not doc:
return []
return wcache.store("rsc_xml",doc)
def rsc2node(id):
if wcache.is_cached("rsc_%s_node" % id):
return wcache.retrieve("rsc_%s_node" % id)
doc = resources_xml()
if not doc:
return []
nodes = get_interesting_nodes(doc,[])
for n in nodes:
if is_resource(n) and n.getAttribute("id") == id:
return wcache.store("rsc_%s_node" % id, n)
def get_meta_param(id,param):
return get_stdout(RscMgmt.rsc_meta['show'] % (id,param), stderr_on = False)
def is_live_cib():
'''We working with the live cluster?'''
return not cib_in_use and not os.getenv("CIB_file")
def is_rsc_running(id):
if not is_live_cib():
return False
rsc_node = rsc2node(id)
if not rsc_node:
return False
if not is_resource(rsc_node):
return False
test_id = rsc_clone(id) or id
outp = get_stdout(RscMgmt.rsc_status % test_id, stderr_on = False)
return outp.find("running") > 0 and outp.find("NOT") == -1
def is_rsc_clone(rsc_id):
rsc_node = rsc2node(rsc_id)
return is_clone(rsc_node)
def is_rsc_ms(rsc_id):
rsc_node = rsc2node(rsc_id)
return is_ms(rsc_node)
def rsc_clone(rsc_id):
'''Get a clone of a resource.'''
rsc_node = rsc2node(rsc_id)
if not rsc_node or not rsc_node.parentNode:
return None
pnode = rsc_node.parentNode
if is_group(pnode):
pnode = pnode.parentNode
if is_clonems(pnode):
return pnode.getAttribute("id")
def is_process(s):
proc = subprocess.Popen("ps -e -o pid,command | grep -qs '%s'" % s, \
shell=True, stdout=subprocess.PIPE)
proc.wait()
return proc.returncode == 0
def cluster_stack():
if is_process("heartbeat:.[m]aster"):
return "heartbeat"
elif is_process("[a]isexec"):
return "openais"
return ""
def get_cloned_rsc(rsc_id):
rsc_node = rsc2node(rsc_id)
if not rsc_node:
return ""
for c in rsc_node.childNodes:
if is_child_rsc(c):
return c.getAttribute("id")
return ""
def get_max_clone(id):
v = get_meta_param(id,"clone-max")
try:
cnt = int(v)
except:
cnt = len(listnodes())
return cnt
def cleanup_resource(rsc,node):
if not is_name_sane(rsc) or not is_name_sane(node):
return False
if is_rsc_clone(rsc) or is_rsc_ms(rsc):
base = get_cloned_rsc(rsc)
if not base:
return False
clone_max = get_max_clone(rsc)
rc = True
for n in range(clone_max):
if ext_cmd(RscMgmt.rsc_cleanup % ("%s:%d" % (base,n), node)) != 0:
rc = False
else:
rc = ext_cmd(RscMgmt.rsc_cleanup%(rsc,node)) != 0
return rc
class RscMgmt(UserInterface):
'''
Resources management class
'''
rsc_status_all = "crm_resource -L"
rsc_status = "crm_resource -W -r '%s'"
rsc_showxml = "crm_resource -q -r '%s'"
rsc_setrole = "crm_resource --meta -r '%s' -p target-role -v '%s'"
rsc_manage = "crm_resource --meta -r '%s' -p is-managed -v '%s'"
rsc_migrate = "crm_resource -M -r '%s'"
rsc_migrateto = "crm_resource -M -r '%s' -H '%s'"
rsc_unmigrate = "crm_resource -U -r '%s'"
rsc_cleanup = "crm_resource -C -r '%s' -H '%s'"
rsc_param = {
'set': "crm_resource -r '%s' -p '%s' -v '%s'",
'delete': "crm_resource -r '%s' -d '%s'",
'show': "crm_resource -r '%s' -g '%s'",
}
rsc_meta = {
'set': "crm_resource --meta -r '%s' -p '%s' -v '%s'",
'delete': "crm_resource --meta -r '%s' -d '%s'",
'show': "crm_resource --meta -r '%s' -g '%s'",
}
rsc_failcount = {
'set': "crm_failcount -r '%s' -N '%s' -v '%s'",
'delete': "crm_failcount -r '%s' -N '%s' -D",
'show': "crm_failcount -r '%s' -N '%s' -G",
}
rsc_refresh = "crm_resource -R"
rsc_refresh_node = "crm_resource -R -H '%s'"
rsc_reprobe = "crm_resource -P"
rsc_reprobe_node = "crm_resource -P -H '%s'"
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("resource")
self.cmd_table["status"] = (self.status,(0,1),0,(rsc_list,))
self.cmd_table["start"] = (self.start,(1,1),0,(rsc_list,))
self.cmd_table["stop"] = (self.stop,(1,1),0,(rsc_list,))
self.cmd_table["restart"] = (self.restart,(1,1),0,(rsc_list,))
self.cmd_table["promote"] = (self.promote,(1,1),0,(rsc_list,))
self.cmd_table["demote"] = (self.demote,(1,1),0,(rsc_list,))
self.cmd_table["manage"] = (self.manage,(1,1),0,(rsc_list,))
self.cmd_table["unmanage"] = (self.unmanage,(1,1),0,(rsc_list,))
self.cmd_table["migrate"] = (self.migrate,(1,2),0,(rsc_list,nodes_list))
self.cmd_table["unmigrate"] = (self.unmigrate,(1,1),0,(rsc_list,))
self.cmd_table["param"] = (self.param,(3,4),1,(rsc_list,attr_cmds))
self.cmd_table["meta"] = (self.meta,(3,4),1,(rsc_list,attr_cmds))
self.cmd_table["failcount"] = (self.failcount,(3,4),0,(rsc_list,attr_cmds,nodes_list))
self.cmd_table["cleanup"] = (self.cleanup,(1,2),1,(rsc_list,nodes_list))
self.cmd_table["refresh"] = (self.refresh,(0,1),0,(nodes_list,))
self.cmd_table["reprobe"] = (self.reprobe,(0,1),0,(nodes_list,))
self.cmd_aliases.update({
"status": ("show","list",),
"migrate": ("move",),
"unmigrate": ("unmove",),
})
setup_aliases(self)
def status(self,cmd,rsc = None):
"usage: status [<rsc>]"
if rsc:
if not is_name_sane(rsc):
return False
return ext_cmd(self.rsc_status % rsc) == 0
else:
return ext_cmd(self.rsc_status_all) == 0
def start(self,cmd,rsc):
"usage: start <rsc>"
if not is_name_sane(rsc):
return False
return ext_cmd(self.rsc_setrole%(rsc,"Started")) == 0
def restart(self,cmd,rsc):
"usage: restart <rsc>"
if not is_name_sane(rsc):
return False
if not self.stop("stop",rsc):
return False
return self.start("start",rsc)
def stop(self,cmd,rsc):
"usage: stop <rsc>"
if not is_name_sane(rsc):
return False
return ext_cmd(self.rsc_setrole%(rsc,"Stopped")) == 0
def promote(self,cmd,rsc):
"usage: promote <rsc>"
if not is_name_sane(rsc):
return False
if not is_rsc_ms(rsc):
common_err("%s is not a master-slave resource" % rsc)
return False
return ext_cmd(self.rsc_setrole%(rsc,"Master")) == 0
def demote(self,cmd,rsc):
"usage: demote <rsc>"
if not is_name_sane(rsc):
return False
if not is_rsc_ms(rsc):
common_err("%s is not a master-slave resource" % rsc)
return False
return ext_cmd(self.rsc_setrole%(rsc,"Slave")) == 0
def manage(self,cmd,rsc):
"usage: manage <rsc>"
if not is_name_sane(rsc):
return False
return ext_cmd(self.rsc_manage%(rsc,"true")) == 0
def unmanage(self,cmd,rsc):
"usage: unmanage <rsc>"
if not is_name_sane(rsc):
return False
return ext_cmd(self.rsc_manage%(rsc,"false")) == 0
def migrate(self,cmd,*args):
"""usage: migrate <rsc> [<node>]"""
if not is_name_sane(args[0]):
return False
if len(args) == 1:
return ext_cmd(self.rsc_migrate%args[0]) == 0
else:
if not is_name_sane(args[1]):
return False
return ext_cmd(self.rsc_migrateto%(args[0],args[1])) == 0
def unmigrate(self,cmd,rsc):
"usage: unmigrate <rsc>"
if not is_name_sane(rsc):
return False
return ext_cmd(self.rsc_unmigrate%rsc) == 0
def cleanup(self,cmd,*args):
"usage: cleanup <rsc> [<node>]"
# Cleanup a resource on a node. Omit node to cleanup on
# all live nodes.
if len(args) == 2: # remove
return cleanup_resource(args[0],args[1])
else:
rv = True
for n in listnodes():
if not cleanup_resource(args[0],n):
rv = False
return rv
def failcount(self,cmd,*args):
"""usage:
failcount <rsc> set <node> <value>
failcount <rsc> delete <node>
failcount <rsc> show <node>"""
d = lambda: manage_attr(cmd,self.rsc_failcount,*args)
return d()
def param(self,cmd,*args):
"""usage:
param <rsc> set <param> <value>
param <rsc> delete <param>
param <rsc> show <param>"""
d = lambda: manage_attr(cmd,self.rsc_param,*args)
return d()
def meta(self,cmd,*args):
"""usage:
meta <rsc> set <attr> <value>
meta <rsc> delete <attr>
meta <rsc> show <attr>"""
d = lambda: manage_attr(cmd,self.rsc_meta,*args)
return d()
def refresh(self,cmd,*args):
'usage: refresh [<node>]'
if len(args) == 1:
if not is_name_sane(args[0]):
return False
return ext_cmd(self.rsc_refresh_node%args[0]) == 0
else:
return ext_cmd(self.rsc_refresh) == 0
def reprobe(self,cmd,*args):
'usage: reprobe [<node>]'
if len(args) == 1:
if not is_name_sane(args[0]):
return False
return ext_cmd(self.rsc_reprobe_node%args[0]) == 0
else:
return ext_cmd(self.rsc_reprobe) == 0
def print_node(uname,id,node_type,other,inst_attr,offline):
"""
Try to pretty print a node from the cib. Sth like:
uname(id): node_type
attr1: v1
attr2: v2
"""
s_offline = offline and "(offline)" or ""
if uname == id:
print "%s: %s%s" % (uname,node_type,s_offline)
else:
print "%s(%s): %s%s" % (uname,id,node_type,s_offline)
for a in other:
print "\t%s: %s" % (a,other[a])
for a,v in inst_attr:
print "\t%s: %s" % (a,v)
class NodeMgmt(UserInterface):
'''
Nodes management class
'''
node_standby = "crm_standby -N '%s' -v '%s'"
node_delete = "cibadmin -D -o nodes -X '<node uname=\"%s\"/>'"
node_delete_status = "cibadmin -D -o status -X '<node_state uname=\"%s\"/>'"
hb_delnode = "@libdir@/heartbeat/hb_delnode '%s'"
crm_node = "crm_node"
node_fence = "crm_attribute -t status -U '%s' -n terminate -v true"
dc = "crmadmin -D"
node_attr = {
'set': "crm_attribute -t nodes -U '%s' -n '%s' -v '%s'",
'delete': "crm_attribute -D -t nodes -U '%s' -n '%s'",
'show': "crm_attribute -G -t nodes -U '%s' -n '%s'",
}
node_status = {
'set': "crm_attribute -t status -U '%s' -n '%s' -v '%s'",
'delete': "crm_attribute -D -t status -U '%s' -n '%s'",
'show': "crm_attribute -G -t status -U '%s' -n '%s'",
}
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("node")
self.cmd_table["status"] = (self.status,(0,1),0,(nodes_list,))
self.cmd_table["show"] = (self.show,(0,1),0,(nodes_list,))
self.cmd_table["standby"] = (self.standby,(0,1),0,(nodes_list,))
self.cmd_table["online"] = (self.online,(0,1),0,(nodes_list,))
self.cmd_table["fence"] = (self.fence,(1,1),0,(nodes_list,))
self.cmd_table["delete"] = (self.delete,(1,1),0,(nodes_list,))
self.cmd_table["attribute"] = (self.attribute,(3,4),0,(nodes_list,attr_cmds))
self.cmd_table["status-attr"] = (self.status_attr,(3,4),0,(nodes_list,attr_cmds))
self.cmd_aliases.update({
"show": ("list",),
})
setup_aliases(self)
def status(self,cmd,node = None):
'usage: status [<node>]'
return ext_cmd("%s -o nodes"%cib_dump) == 0
def show(self,cmd,node = None):
'usage: show [<node>]'
doc = cibdump2doc()
if not doc:
return False
nodes_node = get_conf_elem(doc, "nodes")
status = get_conf_elem(doc, "status")
if not nodes_node:
return False
for c in nodes_node.childNodes:
if not is_element(c) or c.tagName != "node":
continue
if node and c.getAttribute("uname") != node:
continue
type = uname = id = ""
inst_attr = []
other = {}
for attr in c.attributes.keys():
v = c.getAttribute(attr)
if attr == "type":
type = v
elif attr == "uname":
uname = v
elif attr == "id":
id = v
else:
other[attr] = v
for c2 in c.childNodes:
if not is_element(c2):
continue
if c2.tagName == "instance_attributes":
inst_attr += nvpairs2list(c2)
offline = False
for c2 in status.getElementsByTagName("node_state"):
if uname != c2.getAttribute("uname"):
continue
offline = c2.getAttribute("crmd") == "offline"
print_node(uname,id,type,other,inst_attr,offline)
def standby(self,cmd,node = None):
'usage: standby [<node>]'
if not node:
node = this_node
if not is_name_sane(node):
return False
return ext_cmd(self.node_standby%(node,"on")) == 0
def online(self,cmd,node = None):
'usage: online [<node>]'
if not node:
node = this_node
if not is_name_sane(node):
return False
return ext_cmd(self.node_standby%(node,"off")) == 0
def fence(self,cmd,node):
'usage: fence <node>'
if not node:
node = this_node
if not is_name_sane(node):
return False
return ext_cmd(self.node_fence%(node)) == 0
def delete(self,cmd,node):
'usage: delete <node>'
if not is_name_sane(node):
return False
rc = True
if cluster_stack() == "heartbeat":
rc = ext_cmd(self.hb_delnode%node) == 0
else:
node_states = {}
for s in stdout2list("%s -l" % self.crm_node):
a = s.split()
if len(a) != 3:
common_warn("%s bad format: %s" % (self.crm_node,s))
continue
# fmt: id uname status
# remove only those in state "lost"
if a[1] == node:
node_states[a[2]] = 1
if a[2] == "lost":
if ext_cmd("%s --force -R %s" % (self.crm_node,a[0])) != 0:
rc = False
if not "lost" in node_states:
common_err('node %s/state "lost" not found in the id list' % node)
if "member" in node_states:
common_info("node %s appears to be still active" % node)
common_info("check output of %s -l" % self.crm_node)
rc = False
if rc:
if ext_cmd(self.node_delete%node) != 0 or \
ext_cmd(self.node_delete_status%node) != 0:
rc = False
return rc
def attribute(self,cmd,*args):
"""usage:
attribute <node> set <rsc> <value>
attribute <node> delete <rsc>
attribute <node> show <rsc>"""
d = lambda: manage_attr(cmd,self.node_attr,*args)
return d()
def status_attr(self,cmd,*args):
"""usage:
status-attr <node> set <rsc> <value>
status-attr <node> delete <rsc>
status-attr <node> show <rsc>"""
d = lambda: manage_attr(cmd,self.node_status,*args)
return d()
def edit_file(fname):
'Edit a file.'
if not fname:
return
if not user_prefs.editor:
return
return ext_cmd("%s %s" % (user_prefs.editor,fname))
def page_string(s):
'Write string through a pager.'
if not s:
return
w,h = get_winsize()
if s.count('\n') <= h:
print s
elif not user_prefs.pager or not interactive:
print s
else:
opts = ""
if user_prefs.pager == "less":
opts = "-R"
pipe_string("%s %s" % (user_prefs.pager,opts), s)
def lines2cli(s):
'''
Convert a string into a list of lines. Replace continuation
characters. Strip white space, left and right. Drop empty lines.
'''
cl = []
l = s.split('\n')
cum = []
for p in l:
p = p.strip()
if p.endswith('\\'):
p = p.rstrip('\\')
cum.append(p)
else:
cum.append(p)
cl.append(''.join(cum).strip())
cum = []
if cum: # in case s ends with backslash
cl.append(''.join(cum))
return [x for x in cl if x]
def get_winsize():
try:
import curses
curses.setupterm()
w = curses.tigetnum('cols')
h = curses.tigetnum('lines')
except:
try:
w = os.environ['COLS']
h = os.environ['LINES']
except:
w = 80; h = 25
return w,h
def multicolumn(l):
'''
A ls-like representation of a list of strings.
A naive approach.
'''
min_gap = 2
w,h = get_winsize()
max_len = 8
for s in l:
if len(s) > max_len:
max_len = len(s)
cols = w/(max_len + min_gap) # approx.
col_len = w/cols
for i in range(len(l)/cols + 1):
s = ''
for j in range(i*cols,(i+1)*cols):
if not j < len(l):
break
if not s:
s = "%-*s" % (col_len,l[j])
elif (j+1)%cols == 0:
s = "%s%s" % (s,l[j])
else:
s = "%s%-*s" % (s,col_len,l[j])
if s:
print s
class RA(UserInterface):
'''
CIB shadow management class
'''
provider_classes = ["ocf"]
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("ra")
self.cmd_table["classes"] = (self.classes,(0,0),0)
self.cmd_table["list"] = (self.list,(1,2),1)
self.cmd_table["providers"] = (self.providers,(1,1),1)
self.cmd_table["meta"] = (self.meta,(1,3),1)
self.cmd_aliases.update({
"meta": ("info",),
})
setup_aliases(self)
def classes(self,cmd):
"usage: classes"
for c in ra_classes():
if c in self.provider_classes:
print "%s / %s" % (c,' '.join(ra_providers_all(c)))
else:
print "%s" % c
def providers(self,cmd,ra_type):
"usage: providers <ra>"
print ' '.join(ra_providers(ra_type))
def list(self,cmd,c,p = None):
"usage: list <class> [<provider>]"
if not c in ra_classes():
common_err("class %s does not exist" % c)
return False
if p and not p in ra_providers_all(c):
common_err("there is no provider %s for class %s" % (p,c))
return False
if regression_tests:
for t in ra_types(c,p):
print t
else:
multicolumn(ra_types(c,p))
def meta(self,cmd,*args):
"usage: meta [<class>:[<provider>:]]<type>"
if len(args) > 1: # obsolete syntax
ra_type = args[0]
ra_class = args[1]
if len(args) < 3:
ra_provider = "heartbeat"
else:
ra_provider = args[2]
else:
ra_class,ra_provider,ra_type = disambiguate_ra_type(args[0])
ra = RAInfo(ra_class,ra_type,ra_provider)
if not ra.mk_ra_node():
return False
try:
page_string(ra.meta_pretty())
except:
return False
class StatusMgmt(UserInterface):
'''
The CIB status section management user interface class
'''
lrm_exit_codes = {
"success": "0",
"unknown": "1",
"args": "2",
"unimplemented": "3",
"perm": "4",
"installed": "5",
"configured": "6",
"not_running": "7",
"master": "8",
"failed_master": "9",
}
lrm_status_codes = {
"pending": "-1",
"done": "0",
"cancelled": "1",
"timeout": "2",
"notsupported": "3",
"error": "4",
}
ra_operations = ("probe", "monitor", "start", "stop",
"promote", "demote", "notify", "migrate_to", "migrate_from")
node_states = ("online", "offline", "unclean")
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("cibstatus")
self.cmd_table["show"] = (self.show,(0,1),1)
self.cmd_table["save"] = (self.save,(0,1),2)
self.cmd_table["load"] = (self.load,(1,1),2)
self.cmd_table["origin"] = (self.origin,(0,0),1)
self.cmd_table["node"] = (self.edit_node,(2,2),2,(status_node_list,node_states_list))
self.cmd_table["op"] = (self.edit_op,(3,5),2,(ra_operations_list,status_rsc_list,lrm_exit_codes_list,lrm_status_codes_list,status_node_list))
setup_aliases(self)
def myname(self):
'''Just return some id.'''
return "cibstatus"
def load(self,cmd,org):
"usage: load {<file>|shadow:<cib>|live}"
return cib_status.load(org)
def save(self,cmd,dest = None):
"usage: save [<file>|shadow:<cib>]"
return cib_status.save(dest)
def origin(self,cmd):
"usage: origin"
state = cib_status.modified and " (modified)" or ""
print "%s%s" % (cib_status.origin,state)
def show(self,cmd,changed = ""):
"usage: show [changed]"
if changed:
if changed != "changed":
syntax_err((cmd,changed))
return False
else:
return cib_status.list_changes()
return cib_status.show()
def edit_node(self,cmd,node,state):
"usage: node <node> {online|offline|unclean}"
return cib_status.edit_node(node,state)
def edit_op(self,cmd,op,rsc,rc,op_status = None,node = ''):
"usage: op <operation> <resource> <exit_code> [<op_status>] [<node>]"
if rc in self.lrm_exit_codes:
num_rc = self.lrm_exit_codes[rc]
else:
num_rc = rc
if not num_rc.isdigit():
common_err("%s exit code invalid" % num_rc)
return False
num_op_status = op_status
if op_status:
if op_status in self.lrm_status_codes:
num_op_status = self.lrm_status_codes[op_status]
if not num_op_status.isdigit():
common_err("%s operation status invalid" % num_op_status)
return False
return cib_status.edit_op(op,rsc,num_rc,num_op_status,node)
class CibConfig(UserInterface):
'''
The configuration class
'''
def __init__(self):
UserInterface.__init__(self)
self.help_table = help_sys.load_level("configure")
self.cmd_table["erase"] = (self.erase,(0,1),1)
self.cmd_table["verify"] = (self.verify,(0,0),1)
self.cmd_table["refresh"] = (self.refresh,(0,0),1)
self.cmd_table["ptest"] = (self.ptest,(0,3),1)
self.cmd_table["commit"] = (self.commit,(0,1),1)
self.cmd_table["upgrade"] = (self.upgrade,(0,1),1)
self.cmd_table["show"] = (self.show,(0,),1,(id_xml_list,id_list,loop))
self.cmd_table["edit"] = (self.edit,(0,),1,(id_xml_list,id_list,loop))
self.cmd_table["delete"] = (self.delete,(1,),1,(id_list,loop))
self.cmd_table["rename"] = (self.rename,(2,2),1,(id_list,))
self.cmd_table["save"] = (self.save,(1,2),1)
self.cmd_table["load"] = (self.load,(2,3),1)
self.cmd_table["node"] = (self.conf_node,(1,),1)
self.cmd_table["primitive"] = (self.conf_primitive,(2,),1,(null_list, \
ra_classes_list, primitive_complete_complex, loop))
self.cmd_table["group"] = (self.conf_group,(2,),1,(null_list,f_prim_id_list,loop))
self.cmd_table["clone"] = (self.conf_clone,(2,),1,(null_list,f_children_id_list))
self.cmd_table["ms"] = (self.conf_ms,(2,),1,(null_list,f_children_id_list))
self.cmd_table["location"] = (self.conf_location,(2,),1,(null_list,rsc_id_list))
self.cmd_table["colocation"] = (self.conf_colocation,(2,),1,(null_list,null_list,rsc_id_list,loop))
self.cmd_table["order"] = (self.conf_order,(2,),1,(null_list,null_list,rsc_id_list,loop))
self.cmd_table["property"] = (self.conf_property,(1,),1,(property_complete,loop))
self.cmd_table["rsc_defaults"] = (self.conf_rsc_defaults,(1,),1)
self.cmd_table["op_defaults"] = (self.conf_op_defaults,(1,),1)
self.cmd_table["monitor"] = (self.conf_monitor,(2,2),1)
self.cmd_table["ra"] = RA
self.cmd_table["cib"] = CibShadow
self.cmd_table["cibstatus"] = StatusMgmt
self.cmd_table["template"] = Template
self.cmd_table["_test"] = (self.check_structure,(0,0),1)
self.cmd_table["_regtest"] = (self.regression_testing,(1,1),1)
self.cmd_table["_queues"] = (self.showqueues,(0,0),1)
self.cmd_table["_objects"] = (self.showobjects,(0,0),1)
self.cmd_aliases.update({
"colocation": ("collocation",),
"ms": ("master",),
})
setup_aliases(self)
cib_factory.initialize()
def myname(self):
'''Just return some id.'''
return "cibconfig"
def check_structure(self,cmd):
return cib_factory.check_structure()
def regression_testing(self,cmd,param):
return cib_factory.regression_testing(param)
def showqueues(self,cmd):
cib_factory.showqueues()
def showobjects(self,cmd):
cib_factory.showobjects()
def show(self,cmd,*args):
"usage: show [xml] [<id>...]"
if not cib_factory.is_cib_sane():
return False
err_buf.buffer() # keep error messages
set_obj = mkset_obj(*args)
err_buf.release() # show them, but get an ack from the user
return set_obj.show()
def edit(self,cmd,*args):
"usage: edit [xml] [<id>...]"
if not cib_factory.is_cib_sane():
return False
err_buf.buffer() # keep error messages
set_obj = mkset_obj(*args)
err_buf.release() # show them, but get an ack from the user
return set_obj.edit()
def verify(self,cmd):
"usage: verify"
if not cib_factory.is_cib_sane():
return False
set_obj = mkset_obj("xml")
rc1 = set_obj.verify()
if user_prefs.check_frequency != "never":
rc2 = set_obj.verify2()
else:
rc2 = 0
return rc1 and rc2 <= 1
def save(self,cmd,*args):
"usage: save [xml] <filename>"
if not cib_factory.is_cib_sane():
return False
if args[0] == "xml":
f = args[1]
set_obj = mkset_obj("xml")
else:
f = args[0]
set_obj = mkset_obj()
return set_obj.save_to_file(f)
def load(self,cmd,*args):
"usage: load [xml] {replace|update} {<url>|<path>}"
if not cib_factory.is_cib_sane():
return False
if args[0] == "xml":
if len(args) != 3:
syntax_err(args, context = 'load')
return False
url = args[2]
method = args[1]
set_obj = mkset_obj("xml","NOOBJ")
else:
if len(args) != 2:
syntax_err(args, context = 'load')
return False
url = args[1]
method = args[0]
set_obj = mkset_obj("NOOBJ")
return set_obj.import_file(method,url)
def delete(self,cmd,*args):
"usage: delete <id> [<id>...]"
if not cib_factory.is_cib_sane():
return False
return cib_factory.delete(*args)
def rename(self,cmd,old_id,new_id):
"usage: rename <old_id> <new_id>"
if not cib_factory.is_cib_sane():
return False
return cib_factory.rename(old_id,new_id)
def erase(self,cmd,nodes = None):
"usage: erase [nodes]"
if not cib_factory.is_cib_sane():
return False
if nodes:
if nodes == "nodes":
return cib_factory.erase_nodes()
else:
syntax_err((cmd,nodes), context = 'erase')
else:
return cib_factory.erase()
def refresh(self,cmd):
"usage: refresh"
if not cib_factory.is_cib_sane():
return False
if interactive and cib_factory.has_cib_changed():
if not ask("All changes will be dropped. Do you want to proceed?"):
return
cib_factory.refresh()
def ptest(self,cmd,*args):
"usage: ptest [nograph] [v...] [scores]"
if not cib_factory.is_cib_sane():
return False
verbosity = 'vv' # default verbosity
nograph = False
scores = False
for p in args:
if p == "nograph":
nograph = True
elif p == "scores":
scores = True
elif re.match("^vv*$", p):
verbosity = p
else:
bad_usage(cmd,' '.join(args))
return False
set_obj = mkset_obj("xml")
return set_obj.ptest(nograph, scores, verbosity)
def commit(self,cmd,force = None):
"usage: commit [force]"
if force and force != "force":
syntax_err((cmd,force))
return False
if not cib_factory.is_cib_sane():
return False
if not cib_factory.has_cib_changed():
common_info("apparently there is nothing to commit")
common_info("try changing something first")
return
wcache.clear()
rc1 = cib_factory.is_current_cib_equal()
rc2 = self.verify("verify")
if rc1 and rc2:
return cib_factory.commit()
if force or user_prefs.get_force():
common_info("commit forced")
return cib_factory.commit()
if ask("Do you still want to commit?"):
return cib_factory.commit()
return False
def upgrade(self,cmd,force = None):
"usage: upgrade [force]"
if not cib_factory.is_cib_sane():
return False
if force and force != "force":
syntax_err((cmd,force))
return False
if user_prefs.get_force() or force:
return cib_factory.upgrade_cib_06to10(True)
else:
return cib_factory.upgrade_cib_06to10()
def __conf_object(self,cmd,*args):
"The configure object command."
if not cib_factory.is_cib_sane():
return False
f = lambda: cib_factory.create_object(cmd,*args)
return f()
def conf_node(self,cmd,*args):
"""usage: node <uname>[:<type>]
[attributes <param>=<value> [<param>=<value>...]]"""
return self.__conf_object(cmd,*args)
def conf_primitive(self,cmd,*args):
"""usage: primitive <rsc> [<class>:[<provider>:]]<type>
[params <param>=<value> [<param>=<value>...]]
[meta <attribute>=<value> [<attribute>=<value>...]]
[utilization <attribute>=<value> [<attribute>=<value>...]]
[operations id_spec
[op op_type [<attribute>=<value>...] ...]]"""
return self.__conf_object(cmd,*args)
def conf_group(self,cmd,*args):
"""usage: group <name> <rsc> [<rsc>...]
[params <param>=<value> [<param>=<value>...]]
[meta <attribute>=<value> [<attribute>=<value>...]]"""
return self.__conf_object(cmd,*args)
def conf_clone(self,cmd,*args):
"""usage: clone <name> <rsc>
[params <param>=<value> [<param>=<value>...]]
[meta <attribute>=<value> [<attribute>=<value>...]]"""
return self.__conf_object(cmd,*args)
def conf_ms(self,cmd,*args):
"""usage: ms <name> <rsc>
[params <param>=<value> [<param>=<value>...]]
[meta <attribute>=<value> [<attribute>=<value>...]]"""
return self.__conf_object(cmd,*args)
def conf_location(self,cmd,*args):
"""usage: location <id> <rsc> {node_pref|rules}
node_pref :: <score>: <node>
rules ::
rule [id_spec] [$role=<role>] <score>: <expression>
[rule [id_spec] [$role=<role>] <score>: <expression> ...]
id_spec :: $id=<id> | $id-ref=<id>
score :: <number> | <attribute> | [-]inf
expression :: <simple_exp> [bool_op <simple_exp> ...]
bool_op :: or | and
simple_exp :: <attribute> [type:]<binary_op> <value>
| <unary_op> <attribute>
| date <date_expr>
type :: string | version | number
binary_op :: lt | gt | lte | gte | eq | ne
unary_op :: defined | not_defined"""
return self.__conf_object(cmd,*args)
def conf_colocation(self,cmd,*args):
"""usage: colocation <id> <score>: <rsc>[:<role>] <rsc>[:<role>]
"""
return self.__conf_object(cmd,*args)
def conf_order(self,cmd,*args):
"""usage: order <id> score-type: <first-rsc>[:<action>] <then-rsc>[:<action>]
[symmetrical=<bool>]"""
return self.__conf_object(cmd,*args)
def conf_property(self,cmd,*args):
"usage: property [$id=<set_id>] <option>=<value>"
return self.__conf_object(cmd,*args)
def conf_rsc_defaults(self,cmd,*args):
"usage: rsc_defaults [$id=<set_id>] <option>=<value>"
return self.__conf_object(cmd,*args)
def conf_op_defaults(self,cmd,*args):
"usage: op_defaults [$id=<set_id>] <option>=<value>"
return self.__conf_object(cmd,*args)
def conf_monitor(self,cmd,*args):
"usage: monitor <rsc>[:<role>] <interval>[:<timeout>]"
return self.__conf_object(cmd,*args)
def end_game(self, no_questions_asked = False):
if cib_factory.has_cib_changed():
if no_questions_asked or not interactive or \
ask("There are changes pending. Do you want to commit them?"):
self.commit("commit")
cib_factory.reset()
wcache.clear()
attr_defaults_missing = {
}
def add_missing_attr(node):
try:
for defaults in attr_defaults_missing[node.tagName]:
if not node.hasAttribute(defaults[0]):
node.setAttribute(defaults[0],defaults[1])
except: pass
attr_defaults = {
"rule": (("boolean-op","and"),),
"expression": (("type","string"),),
}
def drop_attr_defaults(node, ts = 0):
try:
for defaults in attr_defaults[node.tagName]:
if node.getAttribute(defaults[0]) == defaults[1]:
node.removeAttribute(defaults[0])
except: pass
def is_element(xmlnode):
return xmlnode and xmlnode.nodeType == xmlnode.ELEMENT_NODE
def nameandid(xmlnode,level):
if xmlnode.nodeType == xmlnode.ELEMENT_NODE:
print level*' ',xmlnode.tagName,xmlnode.getAttribute("id"),xmlnode.getAttribute("name")
def xmltraverse(xmlnode,fun,ts=0):
for c in xmlnode.childNodes:
if is_element(c):
fun(c,ts)
xmltraverse(c,fun,ts+1)
def xmltraverse_thin(xmlnode,fun,ts=0):
'''
Skip elements which may be resources themselves.
NB: Call this only on resource (or constraint) nodes, but
never on cib or configuration!
'''
for c in xmlnode.childNodes:
if is_element(c) and not c.tagName in ('primitive','group'):
xmltraverse_thin(c,fun,ts+1)
fun(xmlnode,ts)
def xml_processnodes(xmlnode,node_filter,proc):
'''
Process with proc all nodes that match filter.
'''
node_list = []
for child in xmlnode.childNodes:
if node_filter(child):
node_list.append(child)
if child.hasChildNodes():
xml_processnodes(child,node_filter,proc)
if node_list:
proc(node_list)
# filter the cib
def is_whitespace(node):
return node.nodeType == node.TEXT_NODE and not node.data.strip()
def is_comment(node):
return node.nodeType == node.COMMENT_NODE
def is_status_node(node):
return is_element(node) and node.tagName == "status"
container_tags = ("group", "clone", "ms", "master")
clonems_tags = ("clone", "ms", "master")
resource_tags = ("primitive","group","clone","ms","master")
constraint_tags = ("rsc_location","rsc_colocation","rsc_order")
constraint_rsc_refs = ("rsc","with-rsc","first","then")
children_tags = ("group", "primitive")
nvpairs_tags = ("meta_attributes", "instance_attributes", "utilization")
defaults_tags = ("rsc_defaults","op_defaults")
precious_attrs = ("id-ref",)
def is_emptynvpairs(node):
if is_element(node) and node.tagName in nvpairs_tags:
for a in precious_attrs:
if node.getAttribute(a):
return False
for n in node.childNodes:
if is_element(n):
return False
return True
else:
return False
def is_group(node):
return is_element(node) \
and node.tagName == "group"
def is_ms(node):
return is_element(node) \
and node.tagName in ("master","ms")
def is_clone(node):
return is_element(node) \
and node.tagName == "clone"
def is_clonems(node):
return is_element(node) \
and node.tagName in clonems_tags
def is_container(node):
return is_element(node) \
and node.tagName in container_tags
def is_primitive(node):
return is_element(node) \
and node.tagName == "primitive"
def is_resource(node):
return is_element(node) \
and node.tagName in resource_tags
def is_child_rsc(node):
return is_element(node) \
and node.tagName in children_tags
def is_constraint(node):
return is_element(node) \
and node.tagName in constraint_tags
def is_defaults(node):
return is_element(node) \
and node.tagName in defaults_tags
def rsc_constraint(rsc_id,cons_node):
if not is_element(cons_node):
return False
for attr in cons_node.attributes.keys():
if attr in constraint_rsc_refs \
and rsc_id == cons_node.getAttribute(attr):
return True
for rref in cons_node.getElementsByTagName("resource_ref"):
if rsc_id == rref.getAttribute("id"):
return True
return False
resource_cli_names = olist(["primitive","group","clone","ms","master"])
constraint_cli_names = olist(["location","colocation","collocation","order"])
nvset_cli_names = olist(["property","rsc_defaults","op_defaults"])
op_cli_names = (["monitor", "start", "stop", "migrate_to", "migrate_from","promote","demote","notify"])
def is_resource_cli(s):
return s in resource_cli_names
def is_constraint_cli(s):
return s in constraint_cli_names
def sort_container_children(node_list):
'''
Make sure that attributes's nodes are first, followed by the
elements (primitive/group). The order of elements is not
disturbed, they are just shifted to end!
'''
for node in node_list:
children = []
for c in node.childNodes:
if is_element(c) and c.tagName in children_tags:
children.append(c)
for c in children:
node.removeChild(c)
for c in children:
node.appendChild(c)
def rmnode(node):
if node and node.parentNode:
if node.parentNode:
node.parentNode.removeChild(node)
node.unlink()
def rmnodes(node_list):
for node in node_list:
rmnode(node)
def printid(node_list):
for node in node_list:
id = node.getAttribute("id")
if id: print "node id:",id
def sanitize_cib(doc):
xml_processnodes(doc,is_status_node,rmnodes)
#xml_processnodes(doc,is_element,printid)
xml_processnodes(doc,is_emptynvpairs,rmnodes)
xml_processnodes(doc,is_whitespace,rmnodes)
xml_processnodes(doc,is_comment,rmnodes)
xml_processnodes(doc,is_container,sort_container_children)
xmltraverse(doc,drop_attr_defaults)
def is_simpleconstraint(node):
return len(node.getElementsByTagName("resource_ref")) == 0
def rename_id(node,old_id,new_id):
if node.getAttribute("id") == old_id:
node.setAttribute("id", new_id)
def rename_rscref_simple(c_obj,old_id,new_id):
c_modified = False
for attr in c_obj.node.attributes.keys():
if attr in constraint_rsc_refs and \
c_obj.node.getAttribute(attr) == old_id:
c_obj.node.setAttribute(attr, new_id)
c_obj.updated = True
c_modified = True
return c_modified
def delete_rscref_simple(c_obj,rsc_id):
c_modified = False
for attr in c_obj.node.attributes.keys():
if attr in constraint_rsc_refs and \
c_obj.node.getAttribute(attr) == rsc_id:
c_obj.node.removeAttribute(attr)
c_obj.updated = True
c_modified = True
return c_modified
def rset_uniq(c_obj,d):
'''
Drop duplicate resource references.
'''
l = []
for rref in c_obj.node.getElementsByTagName("resource_ref"):
rsc_id = rref.getAttribute("id")
if d[rsc_id] > 1: # drop one
l.append(rref)
d[rsc_id] -= 1
rmnodes(l)
def delete_rscref_rset(c_obj,rsc_id):
'''
Drop all reference to rsc_id.
'''
c_modified = False
l = []
for rref in c_obj.node.getElementsByTagName("resource_ref"):
if rsc_id == rref.getAttribute("id"):
l.append(rref)
c_obj.updated = True
c_modified = True
rmnodes(l)
l = []
for rset in c_obj.node.getElementsByTagName("resource_set"):
if len(rset.getElementsByTagName("resource_ref")) == 0:
l.append(rset)
c_obj.updated = True
c_modified = True
rmnodes(l)
return c_modified
def rset_convert(c_obj):
l = c_obj.node.getElementsByTagName("resource_ref")
if len(l) != 2:
return # eh?
c_obj.modified = True
cli = c_obj.repr_cli()
newnode = c_obj.cli2node(cli)
if newnode:
c_obj.node.parentNode.replaceChild(newnode,c_obj.node)
c_obj.node.unlink()
def rename_rscref_rset(c_obj,old_id,new_id):
c_modified = False
d = {}
for rref in c_obj.node.getElementsByTagName("resource_ref"):
rsc_id = rref.getAttribute("id")
if rsc_id == old_id:
rref.setAttribute("id", new_id)
- rsc_id = old_id
+ rsc_id = new_id
c_obj.updated = True
c_modified = True
if not rsc_id in d:
d[rsc_id] = 0
else:
d[rsc_id] += 1
rset_uniq(c_obj,d)
# if only two resource references remained then, to preserve
# sanity, convert it to a simple constraint (sigh)
cnt = 0
for key in d:
cnt += d[key]
if cnt == 2:
rset_convert(c_obj)
return c_modified
def rename_rscref(c_obj,old_id,new_id):
if rename_rscref_simple(c_obj,old_id,new_id) or \
rename_rscref_rset(c_obj,old_id,new_id):
err_buf.info("resource references in %s updated" % c_obj.obj_string())
def delete_rscref(c_obj,rsc_id):
return delete_rscref_simple(c_obj,rsc_id) or \
delete_rscref_rset(c_obj,rsc_id)
def silly_constraint(c_node,rsc_id):
'''
Remove a constraint from rsc_id to rsc_id.
Or an invalid one.
'''
if c_node.getElementsByTagName("resource_ref"):
# it's a resource set
# the resource sets have already been uniq-ed
return len(c_node.getElementsByTagName("resource_ref")) <= 1
cnt = 0 # total count of referenced resources have to be at least two
rsc_cnt = 0
for attr in c_node.attributes.keys():
if attr in constraint_rsc_refs:
cnt += 1
if c_node.getAttribute(attr) == rsc_id:
rsc_cnt += 1
if c_node.tagName == "rsc_location": # locations are never silly
return cnt < 1
else:
return rsc_cnt == 2 or cnt < 2
class IdMgmt(object):
'''
Make sure that ids are unique.
'''
def __init__(self):
self._id_store = {}
self.ok = True # error var
def new(self,node,pfx):
'''
Create a unique id for the xml node.
'''
name = node.getAttribute("name")
if node.tagName == "nvpair":
node_id = "%s-%s" % (pfx,name)
elif node.tagName == "op":
interval = node.getAttribute("interval")
if interval:
node_id = "%s-%s-%s" % (pfx,name,interval)
else:
node_id = "%s-%s" % (pfx,name)
else:
try:
hint = hints_list[node.tagName]
except: hint = ''
if hint:
node_id = "%s-%s" % (pfx,hint)
else:
node_id = "%s" % pfx
if self.is_used(node_id):
for cnt in range(99): # shouldn't really get here
try_id = "%s-%d" % (node_id,cnt)
if not self.is_used(try_id):
node_id = try_id
break
self.save(node_id)
return node_id
def check_node(self,node,lvl):
node_id = node.getAttribute("id")
if not node_id:
return
if id_in_use(node_id):
self.ok = False
return
def _store_node(self,node,lvl):
self.save(node.getAttribute("id"))
def _drop_node(self,node,lvl):
self.remove(node.getAttribute("id"))
def check_xml(self,node):
self.ok = True
xmltraverse_thin(node,self.check_node)
return self.ok
def store_xml(self,node):
if not self.check_xml(node):
return False
xmltraverse_thin(node,self._store_node)
return True
def remove_xml(self,node):
xmltraverse_thin(node,self._drop_node)
def replace_xml(self,oldnode,newnode):
self.remove_xml(oldnode)
if not self.store_xml(newnode):
self.store_xml(oldnode)
return False
return True
def is_used(self,node_id):
return node_id in self._id_store
def save(self,node_id):
if not node_id: return
self._id_store[node_id] = 1
def rename(self,old_id,new_id):
if not old_id or not new_id: return
if not self.is_used(old_id): return
if self.is_used(new_id): return
self.remove(old_id)
self.save(new_id)
def remove(self,node_id):
if not node_id: return
try:
del self._id_store[node_id]
except KeyError:
pass
def clear(self):
self._id_store = {}
def id_in_use(obj_id):
if id_store.is_used(obj_id):
id_used_err(obj_id)
return True
return False
#
# resource type definition
#
def disambiguate_ra_type(s):
'''
Unravel [class:[provider:]]type
'''
l = s.split(':')
if not l or len(l) > 3:
return None
if len(l) == 3:
return l
elif len(l) == 2:
ra_class,ra_type = l
else:
ra_class = "ocf"
ra_type = l[0]
ra_provider = ''
if ra_class == "ocf":
pl = ra_providers(ra_type,ra_class)
if pl and len(pl) == 1:
ra_provider = pl[0]
elif not pl:
ra_provider = 'heartbeat'
return ra_class,ra_provider,ra_type
def ra_type_validate(s, ra_class, provider, rsc_type):
'''
Only ocf ra class supports providers.
'''
if not rsc_type:
common_err("bad resource type specification %s"%s)
return False
if ra_class == "ocf":
if not provider:
common_err("provider could not be determined for %s"%s)
return False
else:
if provider:
common_warn("ra class %s does not support providers"%ra_class)
return True
return True
req_op_attributes = olist([\
"name", \
"id", \
])
op_attributes = olist([\
"interval", \
"timeout", \
"requires", \
"enabled", \
"role", \
"on-fail", \
"start-delay", \
"allow-migrate", \
"interval-origin", \
"record-pending", \
"description", \
])
#
# CLI parsing utilities
# WARNING: ugly code ahead (to be replaced some day by a proper
# yacc parser, if there's such a thing)
#
def cli_parse_rsctype(s, pl):
'''
Parse the resource type.
'''
ra_class,provider,rsc_type = disambiguate_ra_type(s)
if not ra_type_validate(s,ra_class,provider,rsc_type):
return None
pl.append(["class",ra_class])
if ra_class == "ocf":
pl.append(["provider",provider])
pl.append(["type",rsc_type])
def is_attribute(p,a):
return p.startswith(a + '=')
def cli_parse_attr_strict(s,pl):
'''
Parse attributes in the 'p=v' form.
'''
if s and '=' in s[0]:
n,v = s[0].split('=',1)
if not n:
return
pl.append([n,v])
cli_parse_attr_strict(s[1:],pl)
def cli_parse_attr(s,pl):
'''
Parse attributes in the 'p=v' form.
Allow also the 'p' form (no value) unless p is one of the
attr_list_keyw words.
'''
attr_lists_keyw = olist(["params","meta","utilization","operations","op","attributes"])
if s:
if s[0] in attr_lists_keyw:
return
if '=' in s[0]:
n,v = s[0].split('=',1)
else:
n = s[0]; v = None
if not n:
return
pl.append([n,v])
cli_parse_attr(s[1:],pl)
def is_only_id(pl,keyw):
if len(pl) > 1:
common_err("%s: only single $id or $id-ref attribute is allowed" % keyw)
return False
if len(pl) == 1 and pl[0][0] not in ("$id","$id-ref"):
common_err("%s: only single $id or $id-ref attribute is allowed" % keyw)
return False
return True
time_op_attr = ("timeout")
def check_operation(pl):
op_name = find_value(pl,"name")
if not op_name in op_cli_names:
common_warn("%s: operation not recognized" % op_name)
if op_name == "monitor" and not find_value(pl,"interval"):
common_err("monitor requires interval")
return False
rc = True
for a,v in pl:
if a in time_op_attr and crm_msec(v) < 0:
common_err("%s: bad time in operation %s, attribute %s" % \
(v,op_name,a))
rc = False
return rc
def parse_resource(s):
el_type = s[0].lower()
if el_type == "master": # ugly kludge :(
el_type = "ms"
attr_lists_keyw = olist(["params","meta","utilization"])
cli_list = []
# the head
head = []
head.append(["id",s[1]])
i = 3
if el_type == "primitive":
cli_parse_rsctype(s[2],head)
if not find_value(head,"type"):
syntax_err(s[2:], context = "primitive")
return False
else:
cl = []
cl.append(s[2])
if el_type == "group":
while i < len(s):
if s[i] in attr_lists_keyw:
break
elif is_attribute(s[i],"description"):
break
else:
cl.append(s[i])
i += 1 # skip to the next token
head.append(["$children",cl])
try: # s[i] may be out of range
if is_attribute(s[i],"description"):
cli_parse_attr(s[i:i+1],head)
i += 1 # skip to the next token
except: pass
cli_list.append([el_type,head])
# the rest
state = 0 # 1: reading operations; 2: operations read
while len(s) > i+1:
pl = []
keyw = s[i].lower()
if keyw in attr_lists_keyw:
if state == 1:
state = 2
elif el_type == "primitive" and state == 0 and keyword_cmp(keyw, "operations"):
state = 1
elif el_type == "primitive" and state <= 1 and keyword_cmp(keyw, "op"):
if state == 0:
state = 1
pl.append(["name",s[i+1]])
else:
syntax_err(s[i:], context = 'primitive')
return False
if keyword_cmp(keyw, "op"):
if len(s) > i+2:
cli_parse_attr(s[i+2:],pl)
if not check_operation(pl):
return False
else:
cli_parse_attr(s[i+1:],pl)
if len(pl) == 0:
syntax_err(s[i:], context = 'primitive')
return False
if keyword_cmp(keyw, "operations") and not is_only_id(pl,keyw):
return False
i += len(pl)+1
# interval is obligatory for ops, supply 0 if not there
if keyword_cmp(keyw, "op") and not find_value(pl,"interval"):
pl.append(["interval","0"])
cli_list.append([keyw,pl])
if len(s) > i:
syntax_err(s[i:], context = 'primitive')
return False
return cli_list
def parse_op(s):
if len(s) != 3:
syntax_err(s, context = s[0])
return False
cli_list = []
head_pl = []
# this is an op
cli_list.append(["op",head_pl])
if not cli_parse_rsc_role(s[1],head_pl):
return False
if not cli_parse_op_times(s[2],head_pl):
return False
# rename rsc-role to role
for i in range(len(head_pl)):
if head_pl[i][0] == "rsc-role":
head_pl[i][0] = "role"
break
# add the operation name
head_pl.append(["name",s[0]])
return cli_list
score_type = {'advisory': '0','mandatory': 'INFINITY'}
-def cli_parse_score(score,pl):
+def cli_parse_score(score,pl,noattr = False):
if score.endswith(':'):
score = score.rstrip(':')
else:
syntax_err(score, context = 'score')
return False
if score in score_type:
pl.append(["score",score_type[score]])
elif re.match("^[+-]?(inf|infinity|INFINITY|[[0-9]+)$",score):
score = score.replace("infinity","INFINITY")
score = score.replace("inf","INFINITY")
pl.append(["score",score])
elif score:
- pl.append(["score-attribute",score])
+ if noattr:
+ common_err("attribute not allowed for score in orders")
+ return False
+ else:
+ pl.append(["score-attribute",score])
return True
boolean_ops = olist(['or','and'])
binary_ops = olist(['lt','gt','lte','gte','eq','ne'])
binary_types = ('string' , 'version' , 'number')
unary_ops = olist(['defined','not_defined'])
def is_binary_op(s):
l = s.split(':')
if len(l) == 2:
return l[0] in binary_types and l[1] in binary_ops
elif len(l) == 1:
return l[0] in binary_ops
else:
return False
def cli_parse_binary_op(s,pl):
l = s.split(':')
if len(l) == 2:
pl.append(["type",l[0]])
pl.append(["operation",l[1]])
else:
pl.append(["operation",l[0]])
def cli_parse_expression(s,pl):
if len(s) > 1 and s[0] in unary_ops:
pl.append(["operation",s[0]])
pl.append(["attribute",s[1]])
elif len(s) > 2 and is_binary_op(s[1]):
pl.append(["attribute",s[0]])
cli_parse_binary_op(s[1],pl)
pl.append(["value",s[2]])
else:
return False
return True
simple_date_ops = olist(['lt','gt'])
date_ops = olist(['lt','gt','in_range','date_spec'])
date_spec_names = '''hours monthdays weekdays yearsdays months \
weeks years weekyears moon'''.split()
in_range_attrs = ('start','end')
def cli_parse_dateexpr(s,pl):
if len(s) < 3:
return False
if s[1] not in date_ops:
return False
pl.append(["operation",s[1]])
if s[1] in simple_date_ops:
pl.append([keyword_cmp(s[1], 'lt') and "end" or "start",s[2]])
return True
cli_parse_attr_strict(s[2:],pl)
return True
def parse_rule(s):
if not keyword_cmp(s[0], "rule"):
syntax_err(s,context = "rule")
return 0,None
rule_list = []
head_pl = []
rule_list.append([s[0].lower(),head_pl])
i = 1
cli_parse_attr_strict(s[i:],head_pl)
i += len(head_pl)
if find_value(head_pl,"$id-ref"):
return i,rule_list
if not cli_parse_score(s[i],head_pl):
return i,None
i += 1
bool_op = ''
while len(s) > i+1:
pl = []
if keyword_cmp(s[i], "date"):
fun = cli_parse_dateexpr
elem = "date_expression"
else:
fun = cli_parse_expression
elem = "expression"
if not fun(s[i:],pl):
syntax_err(s[i:],context = "rule")
return i,None
rule_list.append([elem,pl])
i += len(pl)
if find_value(pl, "type"):
i -= 1 # reduce no of tokens by one if there was "type:op"
if elem == "date_expression":
i += 1 # increase no of tokens by one if it was date expression
if len(s) > i and s[i] in boolean_ops:
if bool_op and not keyword_cmp(bool_op, s[i]):
common_err("rule contains different bool operations: %s" % ' '.join(s))
return i,None
else:
bool_op = s[i].lower()
i += 1
if len(s) > i and keyword_cmp(s[i], "rule"):
break
if bool_op and not keyword_cmp(bool_op, 'and'):
head_pl.append(["boolean-op",bool_op])
return i,rule_list
def parse_location(s):
cli_list = []
head_pl = []
head_pl.append(["id",s[1]])
head_pl.append(["rsc",s[2]])
cli_list.append([s[0].lower(),head_pl])
if len(s) == 5 and not keyword_cmp(s[3], "rule"): # the short node preference form
if not cli_parse_score(s[3],head_pl):
return False
head_pl.append(["node",s[4]])
return cli_list
i = 3
while i < len(s):
numtoks,l = parse_rule(s[i:])
if not l:
return False
cli_list += l
i += numtoks
if len(s) < i:
syntax_err(s[i:],context = "location")
return False
return cli_list
def cli_opt_symmetrical(p,pl):
if not p:
return True
pl1 = []
cli_parse_attr([p],pl1)
if len(pl1) != 1 or not find_value(pl1,"symmetrical"):
syntax_err(p,context = "order")
return False
pl += pl1
return True
-roles = ('Stopped', 'Started', 'Master', 'Slave')
+roles_names = ('Stopped', 'Started', 'Master', 'Slave')
def cli_parse_rsc_role(s,pl,attr_pfx = ''):
l = s.split(':')
pl.append([attr_pfx+"rsc",l[0]])
if len(l) == 2:
- if l[1] not in roles:
+ if l[1] not in roles_names:
bad_def_err("resource role",s)
return False
pl.append([attr_pfx+"rsc-role",l[1]])
elif len(l) > 2:
bad_def_err("resource role",s)
return False
return True
def cli_parse_op_times(s,pl):
l = s.split(':')
pl.append(["interval",l[0]])
if len(l) == 2:
pl.append(["timeout",l[1]])
elif len(l) > 2:
bad_def_err("op times",s)
return False
return True
class ResourceSet(object):
'''
Constraint resource set parser. Parses sth like:
a ( b c:start ) d:Master e ...
Appends one or more lists to cli_list.
Lists are in form:
list :: ["resource_set",set_pl]
set_pl :: [["sequential","false"], ["action"|"role",action|role],
["resource_ref",["id",rsc]], ...]
(the first two elements of set_pl are optional)
Action/role change makes a new resource set.
'''
- def __init__(self,type,s,cli_list,score_list):
+ def __init__(self,type,s,cli_list):
self.type = type
+ self.valid_q = (type == "order") and actions_names or roles_names
+ self.q_attr = (type == "order") and "action" or "role"
self.tokens = s
self.cli_list = cli_list
- self.score_list = score_list
self.reset_set()
self.sequential = True
self.fix_parentheses()
def fix_parentheses(self):
newtoks = []
for p in self.tokens:
if p.startswith('(') and len(p) > 1:
newtoks.append('(')
newtoks.append(p[1:])
elif p.endswith(')') and len(p) > 1:
newtoks.append(p[0:len(p)-1])
newtoks.append(')')
else:
newtoks.append(p)
self.tokens = newtoks
def reset_set(self):
self.set_pl = []
self.prev_q = '' # previous qualifier (action or role)
- self.attr = '' # attribute (action or role)
+ self.curr_attr = '' # attribute (action or role)
def save_set(self):
- if self.score_list:
- self.set_pl = self.score_list + self.set_pl
- if self.attr:
- self.set_pl.insert(0,[self.attr,self.prev_q])
+ if not self.set_pl:
+ return
+ if self.curr_attr:
+ self.set_pl.insert(0,[self.curr_attr,self.prev_q])
if not self.sequential:
self.set_pl.insert(0,["sequential","false"])
self.cli_list.append(["resource_set",self.set_pl])
self.reset_set()
def splitrsc(self,p):
l = p.split(':')
return (len(l) == 1) and [p,''] or l
def parse(self):
+ tokpos = -1
for p in self.tokens:
+ tokpos += 1
if p == "_rsc_set_":
continue # a degenerate resource set
if p == '(':
if self.set_pl: # save the set before
self.save_set()
self.sequential = False
continue
if p == ')':
if self.sequential: # no '('
- syntax_err(self.tokens,context = type)
+ syntax_err(self.tokens[tokpos:],context = self.type)
return False
if not self.set_pl: # empty sets not allowed
- syntax_err(self.tokens,context = type)
+ syntax_err(self.tokens[tokpos:],context = self.type)
return False
self.save_set()
self.sequential = True
continue
rsc,q = self.splitrsc(p)
if q != self.prev_q: # one set can't have different roles/actions
self.save_set()
self.prev_q = q
if q:
- if not self.attr:
- self.attr = (self.type == "order") and "action" or "role"
+ if q not in self.valid_q:
+ common_err("%s: invalid %s in %s" % (q,self.q_attr,self.type))
+ return False
+ if not self.curr_attr:
+ self.curr_attr = self.q_attr
else:
- self.attr = ''
+ self.curr_attr = ''
self.set_pl.append(["resource_ref",["id",rsc]])
if not self.sequential: # no ')'
- syntax_err(self.tokens,context = type)
+ syntax_err(self.tokens[tokpos:],context = self.type)
return False
if self.set_pl: # save the final set
self.save_set()
return True
def parse_colocation(s):
cli_list = []
head_pl = []
type = s[0]
if type == "collocation": # another ugly :(
type = "colocation"
cli_list.append([type,head_pl])
if len(s) < 5:
syntax_err(s,context = "colocation")
return False
head_pl.append(["id",s[1]])
- score_pl = []
- if not cli_parse_score(s[2],score_pl):
+ if not cli_parse_score(s[2],head_pl):
return False
if len(s) == 5:
- head_pl += score_pl
if not cli_parse_rsc_role(s[3],head_pl):
return False
if not cli_parse_rsc_role(s[4],head_pl,'with-'):
return False
else:
- resource_set_obj = ResourceSet(type,s[3:],cli_list,score_pl)
+ resource_set_obj = ResourceSet(type,s[3:],cli_list)
if not resource_set_obj.parse():
return False
return cli_list
-actions = ( 'start', 'promote', 'demote', 'stop')
+actions_names = ( 'start', 'promote', 'demote', 'stop')
def cli_parse_rsc_action(s,pl,rsc_pos):
l = s.split(':')
pl.append([rsc_pos,l[0]])
if len(l) == 2:
- if l[1] not in actions:
+ if l[1] not in actions_names:
bad_def_err("resource action",s)
return False
pl.append([rsc_pos+"-action",l[1]])
elif len(l) > 1:
bad_def_err("resource action",s)
return False
return True
def parse_order(s):
cli_list = []
head_pl = []
type = "order"
cli_list.append([s[0],head_pl])
if len(s) < 5:
syntax_err(s,context = "order")
return False
head_pl.append(["id",s[1]])
- score_pl = []
- if not cli_parse_score(s[2],score_pl):
+ if not cli_parse_score(s[2],head_pl,noattr = True):
return False
# save symmetrical for later (if it exists)
symm = ""
if is_attribute(s[len(s)-1],"symmetrical"):
symm = s.pop()
if len(s) == 5:
- head_pl += score_pl
if not cli_parse_rsc_action(s[3],head_pl,'first'):
return False
if not cli_parse_rsc_action(s[4],head_pl,'then'):
return False
else:
- resource_set_obj = ResourceSet(type,s[3:],cli_list,score_pl)
+ resource_set_obj = ResourceSet(type,s[3:],cli_list)
if not resource_set_obj.parse():
return False
if not cli_opt_symmetrical(symm,head_pl):
return False
return cli_list
def parse_constraint(s):
if keyword_cmp(s[0], "location"):
return parse_location(s)
elif s[0] in olist(["colocation","collocation"]):
return parse_colocation(s)
elif keyword_cmp(s[0], "order"):
return parse_order(s)
def parse_property(s):
cli_list = []
head_pl = []
cli_list.append([s[0],head_pl])
cli_parse_attr(s[1:],head_pl)
if len(head_pl) < 0 or len(s) > len(head_pl)+1:
syntax_err(s, context = s[0])
return False
return cli_list
def cli_parse_uname(s, pl):
l = s.split(':')
if not l or len(l) > 2:
return None
pl.append(["uname",l[0]])
if len(l) == 2:
pl.append(["type",l[1]])
def parse_node(s):
cli_list = []
# the head
head = []
# optional $id
id = ''
opt_id_l = []
i = 1
cli_parse_attr_strict(s[i:],opt_id_l)
if opt_id_l:
id = find_value(opt_id_l,"$id")
i += 1
# uname[:type]
cli_parse_uname(s[i],head)
uname = find_value(head,"uname")
if not uname:
return False
head.append(["id",id and id or uname])
# drop type if default
type = find_value(head,"type")
if type == CibNode.default_type:
head.remove(["type",type])
cli_list.append([s[0],head])
if len(s) == i:
return cli_list
# the rest
i += 1
try: # s[i] may be out of range
if is_attribute(s[i],"description"):
cli_parse_attr(s[i:i+1],head)
i += 1 # skip to the next token
except: pass
while len(s) > i+1:
if not s[i] in CibNode.node_attributes_keyw:
syntax_err(s[i:], context = 'node')
return False
pl = []
cli_parse_attr(s[i+1:],pl)
if len(pl) == 0:
syntax_err(s[i:], context = 'node')
return False
cli_list.append([s[i],pl])
i += len(pl)+1
if len(s) > i:
syntax_err(s[i:], context = 'node')
return False
return cli_list
def parse_cli(s):
'''
Input: a list of tokens (or a CLI format string).
Return: a list of items; each item is a tuple
with two members: a string (tag) and a nvpairs or
attributes dict.
'''
if type(s) == type(u''):
s = s.encode('ascii')
if type(s) == type(''):
try: s = shlex.split(s)
except ValueError, msg:
common_err(msg)
return False
while '\n' in s:
s.remove('\n')
if s and s[0].startswith('#'):
return None
if len(s) > 1 and s[0] in nvset_cli_names:
return parse_property(s)
if len(s) > 1 and keyword_cmp(s[0], "node"):
return parse_node(s)
if len(s) < 3: # we want at least two tokens
syntax_err(s)
return False
if is_resource_cli(s[0]):
return parse_resource(s)
elif is_constraint_cli(s[0]):
return parse_constraint(s)
elif keyword_cmp(s[0], "monitor"):
return parse_op(s)
else:
syntax_err(s)
return False
#
# XML generate utilities
#
hints_list = {
"instance_attributes": "instance_attributes",
"meta_attributes": "meta_attributes",
"utilization": "utilization",
"operations": "ops",
"rule": "rule",
"expression": "expression",
}
match_list = {
"node": ("uname"),
"crm_config": (),
"rsc_defaults": (),
"op_defaults": (),
"cluster_property_set": (),
"instance_attributes": (),
"meta_attributes": (),
"utilization": (),
"operations": (),
"nvpair": ("name",),
"op": ("name","interval"),
"rule": ("score","score-attribute","role"),
"expression": ("attribute","operation","value"),
}
def set_id_used_attr(node):
node.setAttribute("__id_used", "Yes")
def is_id_used_attr(node):
return node.getAttribute("__id_used") == "Yes"
def remove_id_used_attr(node,lvl):
if is_element(node) and is_id_used_attr(node):
node.removeAttribute("__id_used")
def remove_id_used_attributes(node):
if node:
xmltraverse(node, remove_id_used_attr)
def lookup_node(node,oldnode,location_only = False):
'''
Find a child of oldnode which matches node.
'''
#print "lookup:",node.tagName,node.getAttribute("id"),oldnode.tagName,oldnode.getAttribute("id")
if not oldnode:
return None
try:
attr_list = match_list[node.tagName]
except KeyError:
attr_list = []
for c in oldnode.childNodes:
if not is_element(c):
continue
if not location_only and is_id_used_attr(c):
continue
#print "checking:",c.tagName,c.getAttribute("id")
if node.tagName == c.tagName:
failed = False
for a in attr_list:
if node.getAttribute(a) != c.getAttribute(a):
failed = True
break
if not failed:
#print "found:",c.tagName,c.getAttribute("id")
return c
return None
def set_id(node,oldnode,id_hint,id_required = True):
'''
Set the id attribute for the node.
Procedure:
- if the node already contains "id", keep it
- if the old node contains "id", copy that
- if neither is true, then create a new one using id_hint
(exception: if not id_required, then no new id is generated)
Finally, save the new id in id_store.
'''
old_id = None
new_id = node.getAttribute("id")
if oldnode and oldnode.getAttribute("id"):
old_id = oldnode.getAttribute("id")
if not new_id:
new_id = old_id
if not new_id:
if id_required:
new_id = id_store.new(node,id_hint)
else:
id_store.save(new_id)
if new_id:
node.setAttribute("id",new_id)
if oldnode and old_id == new_id:
set_id_used_attr(oldnode)
def mkxmlsimple(e,oldnode,id_hint):
'''
Create an xml node from the (name,dict) pair. The name is the
name of the element. The dict contains a set of attributes.
'''
node = cib_factory.createElement(e[0])
for n,v in e[1]:
if n == "$children": # this one's skipped
continue
if n == "operation":
v = v.lower()
if n.startswith('$'):
n = n.lstrip('$')
if (type(v) != type('') and type(v) != type(u'')) \
or v: # skip empty strings
node.setAttribute(n,v)
id_ref = node.getAttribute("id-ref")
if id_ref:
id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref)
node.setAttribute("id-ref",id_ref_2)
else:
set_id(node,lookup_node(node,oldnode),id_hint)
return node
def find_value(pl,name):
for n,v in pl:
if n == name:
return v
return None
def find_operation(rsc_node,name,interval):
op_node_l = rsc_node.getElementsByTagName("operations")
for ops in op_node_l:
for c in ops.childNodes:
if not is_element(c):
continue
if c.tagName != "op":
continue
if c.getAttribute("name") == name \
and c.getAttribute("interval") == interval:
return c
def mkxmlnvpairs(e,oldnode,id_hint):
'''
Create xml from the (name,dict) pair. The name is the name of
the element. The dict contains a set of nvpairs. Stuff such
as instance_attributes.
NB: Other tags not containing nvpairs are fine if the dict is empty.
'''
node = cib_factory.createElement(e[0])
match_node = lookup_node(node,oldnode)
#if match_node:
#print "found nvpairs set:",match_node.tagName,match_node.getAttribute("id")
id_ref = find_value(e[1],"$id-ref")
if id_ref:
id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref)
node.setAttribute("id-ref",id_ref_2)
if e[0] != "operations":
return node # id_ref is the only attribute (if not operations)
e[1].remove(["$id-ref",id_ref])
v = find_value(e[1],"$id")
if v:
node.setAttribute("id",v)
e[1].remove(["$id",v])
else:
if e[0] == "operations": # operations don't need no id
set_id(node,match_node,id_hint,id_required = False)
else:
set_id(node,match_node,id_hint)
try:
hint = hints_list[e[0]]
except: hint = ''
hint = hint and "%s_%s" % (id_hint,hint) or id_hint
nvpair_pfx = node.getAttribute("id") or hint
for n,v in e[1]:
nvpair = cib_factory.createElement("nvpair")
node.appendChild(nvpair)
nvpair.setAttribute("name",n)
if v != None:
nvpair.setAttribute("value",v)
set_id(nvpair,lookup_node(nvpair,match_node),nvpair_pfx)
return node
def mkxmlop(e,oldnode,id_hint):
'''
Create an operation xml from the (name,dict) pair.
'''
node = cib_factory.createElement(e[0])
inst_attr = []
for n,v in e[1]:
if n in req_op_attributes + op_attributes:
node.setAttribute(n,v)
else:
inst_attr.append([n,v])
tmp = cib_factory.createElement("operations")
oldops = lookup_node(tmp,oldnode) # first find old operations
oldop = lookup_node(node,oldops)
set_id(node,oldop,id_hint)
if inst_attr:
e = ["instance_attributes",inst_attr]
nia = mkxmlnvpairs(e,oldop,node.getAttribute("id"))
node.appendChild(nia)
return node
def mkxmldate(e,oldnode,id_hint):
'''
Create a date_expression xml from the (name,dict) pair.
'''
node = cib_factory.createElement(e[0])
operation = find_value(e[1],"operation").lower()
node.setAttribute("operation", operation)
old_date = lookup_node(node,oldnode) # first find old date element
set_id(node,old_date,id_hint)
date_spec_attr = []
for n,v in e[1]:
if n in date_ops or n == "operation":
continue
elif n in in_range_attrs:
node.setAttribute(n,v)
else:
date_spec_attr.append([n,v])
if not date_spec_attr:
return node
elem = operation == "date_spec" and "date_spec" or "duration"
tmp = cib_factory.createElement(elem)
old_date_spec = lookup_node(tmp,old_date) # first find old date element
set_id(tmp,old_date_spec,id_hint)
for n,v in date_spec_attr:
tmp.setAttribute(n,v)
node.appendChild(tmp)
return node
def mkxmlrsc_set(e,oldnode,id_hint):
'''
Create a resource_set xml from the (name,dict) pair.
'''
node = cib_factory.createElement(e[0])
old_rsc_set = lookup_node(node,oldnode) # first find old date element
set_id(node,old_rsc_set,id_hint)
for ref in e[1]:
if ref[0] == "resource_ref":
ref_node = cib_factory.createElement(ref[0])
ref_node.setAttribute(ref[1][0],ref[1][1])
node.appendChild(ref_node)
- elif ref[0] in ("sequential","action","role","score","score-attribute"):
+ elif ref[0] in ("sequential", "action", "role"):
node.setAttribute(ref[0], ref[1])
return node
conv_list = odict()
conv_list["params"] = "instance_attributes"
conv_list["meta"] = "meta_attributes"
conv_list["property"] = "cluster_property_set"
conv_list["rsc_defaults"] = "meta_attributes"
conv_list["op_defaults"] = "meta_attributes"
conv_list["attributes"] = "instance_attributes"
conv_list["operations"] = "operations"
conv_list["utilization"] = "utilization"
conv_list["op"] = "op"
def mkxmlnode(e,oldnode,id_hint):
'''
Create xml from the (name,dict) pair. The name is the name of
the element. The dict contains either a set of nvpairs or a
set of attributes. The id is either generated or copied if
found in the provided xml. Stuff such as instance_attributes.
'''
if e[0] in conv_list:
e[0] = conv_list[e[0]]
if e[0] in ("instance_attributes","meta_attributes","operations","cluster_property_set","utilization"):
return mkxmlnvpairs(e,oldnode,id_hint)
elif e[0] == "op":
return mkxmlop(e,oldnode,id_hint)
elif e[0] == "date_expression":
return mkxmldate(e,oldnode,id_hint)
elif e[0] == "resource_set":
return mkxmlrsc_set(e,oldnode,id_hint)
else:
return mkxmlsimple(e,oldnode,id_hint)
def new_cib():
doc = xml.dom.minidom.Document()
cib = doc.createElement("cib")
doc.appendChild(cib)
configuration = doc.createElement("configuration")
cib.appendChild(configuration)
crm_config = doc.createElement("crm_config")
configuration.appendChild(crm_config)
rsc_defaults = doc.createElement("rsc_defaults")
configuration.appendChild(rsc_defaults)
op_defaults = doc.createElement("op_defaults")
configuration.appendChild(op_defaults)
nodes = doc.createElement("nodes")
configuration.appendChild(nodes)
resources = doc.createElement("resources")
configuration.appendChild(resources)
constraints = doc.createElement("constraints")
configuration.appendChild(constraints)
return doc,cib,crm_config,rsc_defaults,op_defaults,nodes,resources,constraints
def mk_topnode(doc, tag):
"Get configuration element or create/append if there's none."
try:
e = doc.getElementsByTagName(tag)[0]
except:
e = doc.createElement(tag)
conf = doc.getElementsByTagName("configuration")[0]
if conf:
conf.appendChild(e)
else:
return None
return e
def get_conf_elem(doc, tag):
try:
return doc.getElementsByTagName(tag)[0]
except:
return None
def xmlparse(f):
try:
doc = xml.dom.minidom.parse(f)
except xml.parsers.expat.ExpatError,msg:
common_err("cannot parse xml: %s" % msg)
return None
return doc
def read_cib(fun, params = None):
doc = fun(params)
if not doc:
return doc,None
cib = doc.childNodes[0]
if not is_element(cib) or cib.tagName != "cib":
cib_no_elem_err("cib")
return doc,None
return doc,cib
def set_nvpair(set_node,name,value):
n_id = set_node.getAttribute("id")
for c in set_node.childNodes:
if is_element(c) and c.getAttribute("name") == name:
c.setAttribute("value",value)
return
np = cib_factory.createElement("nvpair")
np.setAttribute("name",name)
np.setAttribute("value",value)
new_id = id_store.new(np,n_id)
np.setAttribute("id",new_id)
set_node.appendChild(np)
def xml_cmp(n, m, show = False):
rc = hash(n.toxml()) == hash(m.toxml())
if not rc and show and user_prefs.get_debug():
print "original:",n.toprettyxml()
print "processed:",m.toprettyxml()
return hash(n.toxml()) == hash(m.toxml())
def show_unrecognized_elems(doc):
try:
conf = doc.getElementsByTagName("configuration")[0]
except:
common_warn("CIB has no configuration element")
return
for topnode in conf.childNodes:
if not is_element(topnode):
continue
if is_defaults(topnode):
continue
if not topnode.tagName in cib_topnodes:
common_warn("unrecognized CIB element %s" % c.tagName)
continue
for c in topnode.childNodes:
if not is_element(topnode):
continue
if not c.tagName in cib_object_map:
common_warn("unrecognized CIB element %s" % c.tagName)
def get_interesting_nodes(node,nodes):
for c in node.childNodes:
if is_element(c) and c.tagName in cib_object_map:
nodes.append(c)
get_interesting_nodes(c,nodes)
return nodes
def filter_on_tag(nl,tag):
return [node for node in nl if node.tagName == tag]
def nodes(node_list):
return filter_on_tag(node_list,"node")
def primitives(node_list):
return filter_on_tag(node_list,"primitive")
def groups(node_list):
return filter_on_tag(node_list,"group")
def clones(node_list):
return filter_on_tag(node_list,"clone")
def mss(node_list):
return filter_on_tag(node_list,"master")
def constraints(node_list):
return filter_on_tag(node_list,"rsc_location") \
+ filter_on_tag(node_list,"rsc_colocation") \
+ filter_on_tag(node_list,"rsc_order")
def properties(node_list):
return filter_on_tag(node_list,"cluster_property_set") \
+ filter_on_tag(node_list,"rsc_defaults") \
+ filter_on_tag(node_list,"op_defaults")
def processing_sort(nl):
'''
It's usually important to process cib objects in this order,
i.e. simple objects first.
'''
return nodes(nl) + primitives(nl) + groups(nl) + mss(nl) + clones(nl) \
+ constraints(nl) + properties(nl)
def obj_cmp(obj1,obj2):
return cmp(obj1.obj_id,obj2.obj_id)
def filter_on_type(cl,obj_type):
if type(cl[0]) == type([]):
l = [cli_list for cli_list in cl if cli_list[0][0] == obj_type]
l.sort(cmp = cmp)
else:
l = [obj for obj in cl if obj.obj_type == obj_type]
l.sort(cmp = obj_cmp)
return l
def nodes_cli(cl):
return filter_on_type(cl,"node")
def primitives_cli(cl):
return filter_on_type(cl,"primitive")
def groups_cli(cl):
return filter_on_type(cl,"group")
def clones_cli(cl):
return filter_on_type(cl,"clone")
def mss_cli(cl):
return filter_on_type(cl,"ms") + filter_on_type(cl,"master")
def constraints_cli(node_list):
return filter_on_type(node_list,"location") \
+ filter_on_type(node_list,"colocation") \
+ filter_on_type(node_list,"collocation") \
+ filter_on_type(node_list,"order")
def properties_cli(cl):
return filter_on_type(cl,"property") \
+ filter_on_type(cl,"rsc_defaults") \
+ filter_on_type(cl,"op_defaults")
def ops_cli(cl):
return filter_on_type(cl,"op")
def processing_sort_cli(cl):
'''
Return the given list in this order:
nodes, primitives, groups, ms, clones, constraints, rest
Both a list of objects (CibObject) and list of cli
representations accepted.
'''
return nodes_cli(cl) + primitives_cli(cl) + groups_cli(cl) + mss_cli(cl) + clones_cli(cl) \
+ constraints_cli(cl) + properties_cli(cl) + ops_cli(cl)
def referenced_resources_cli(cli_list):
id_list = []
head = cli_list[0]
obj_type = head[0]
if not is_constraint_cli(obj_type):
return []
if obj_type == "location":
id_list.append(find_value(head[1],"rsc"))
elif len(cli_list) > 1: # resource sets
for l in cli_list[1][1]:
if l[0] == "resource_ref":
id_list.append(l[1][1])
elif obj_type == "colocation":
id_list.append(find_value(head[1],"rsc"))
id_list.append(find_value(head[1],"with-rsc"))
elif obj_type == "order":
id_list.append(find_value(head[1],"first"))
id_list.append(find_value(head[1],"then"))
return id_list
#
# CLI format generation utilities (from XML)
#
def cli_format(pl,format):
if format:
return ' \\\n\t'.join(pl)
else:
return ' '.join(pl)
def nvpair_format(n,v):
return v == None and cli_display.attr_name(n) \
or '%s="%s"'%(cli_display.attr_name(n),cli_display.attr_value(v))
def cli_pairs(pl):
'Return a string of name="value" pairs (passed in a list of pairs).'
l = []
for n,v in pl:
l.append(nvpair_format(n,v))
return ' '.join(l)
def nvpairs2list(node, add_id = False):
'''
Convert nvpairs to a list of pairs.
The id attribute is normally skipped, since they tend to be
long and therefore obscure the relevant content. For some
elements, however, they are included (e.g. properties).
'''
pl = []
# if there's id-ref, there can be then _only_ id-ref
value = node.getAttribute("id-ref")
if value:
pl.append(["$id-ref",value])
return pl
if add_id or \
(not node.childNodes and len(node.attributes) == 1):
value = node.getAttribute("id")
if value:
pl.append(["$id",value])
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "attributes":
pl = nvpairs2list(c)
name = c.getAttribute("name")
if "value" in c.attributes.keys():
value = c.getAttribute("value")
else:
value = None
pl.append([name,value])
return pl
def op2list(node):
pl = []
action = ""
for name in node.attributes.keys():
if name == "name":
action = node.getAttribute(name)
elif name != "id": # skip the id
pl.append([name,node.getAttribute(name)])
if not action:
common_err("op is invalid (no name)")
return action,pl
def op_instattr(node):
pl = []
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName != "instance_attributes":
common_err("only instance_attributes are supported in operations")
else:
pl += nvpairs2list(c)
return pl
def cli_op(node):
action,pl = op2list(node)
if not action:
return ""
pl += op_instattr(node)
return "%s %s %s" % (cli_display.keyword("op"),action,cli_pairs(pl))
def cli_operations(node,format = True):
l = []
node_id = node.getAttribute("id")
s = ''
if node_id:
s = '$id="%s"' % node_id
idref = node.getAttribute("id-ref")
if idref:
s = '%s $id-ref="%s"' % (s,idref)
if s:
l.append("%s %s" % (cli_display.keyword("operations"),s))
for c in node.childNodes:
if is_element(c) and c.tagName == "op":
l.append(cli_op(c))
return cli_format(l,format)
def date_exp2cli(node):
l = []
operation = node.getAttribute("operation")
l.append(cli_display.keyword("date"))
l.append(cli_display.keyword(operation))
if operation in simple_date_ops:
value = node.getAttribute(operation == 'lt' and "end" or "start")
l.append('"%s"' % cli_display.attr_value(value))
else:
if operation == 'in_range':
for name in in_range_attrs:
v = node.getAttribute(name)
if v:
l.append(nvpair_format(name,v))
for c in node.childNodes:
if is_element(c) and c.tagName in ("duration","date_spec"):
pl = []
for name in c.attributes.keys():
if name != "id":
pl.append([name,c.getAttribute(name)])
l.append(cli_pairs(pl))
return ' '.join(l)
def binary_op_format(op):
l = op.split(':')
if len(l) == 2:
return "%s:%s" % (l[0], cli_display.keyword(l[1]))
else:
return cli_display.keyword(op)
def exp2cli(node):
operation = node.getAttribute("operation")
type = node.getAttribute("type")
if type:
operation = "%s:%s" % (type, operation)
attribute = node.getAttribute("attribute")
value = node.getAttribute("value")
if not value:
return "%s %s" % (binary_op_format(operation),attribute)
else:
return "%s %s %s" % (attribute,binary_op_format(operation),value)
def get_score(node):
score = node.getAttribute("score")
if not score:
score = node.getAttribute("score-attribute")
else:
if score.find("INFINITY") >= 0:
score = score.replace("INFINITY","inf")
return score + ":"
def cli_rule(node):
s = []
node_id = node.getAttribute("id")
if node_id:
s.append('$id="%s"' % node_id)
else:
idref = node.getAttribute("id-ref")
if idref:
return '$id-ref="%s"' % idref
rsc_role = node.getAttribute("role")
if rsc_role:
s.append('$role="%s"' % rsc_role)
s.append(cli_display.score(get_score(node)))
bool_op = node.getAttribute("boolean-op")
if not bool_op:
bool_op = "and"
exp = []
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "date_expression":
exp.append(date_exp2cli(c))
elif c.tagName == "expression":
exp.append(exp2cli(c))
expression = (" %s "%cli_display.keyword(bool_op)).join(exp)
return "%s %s" % (' '.join(s),expression)
def node_head(node):
obj_type = cib_object_map[node.tagName][0]
node_id = node.getAttribute("id")
uname = node.getAttribute("uname")
s = cli_display.keyword(obj_type)
if node_id != uname:
s = '%s $id="%s"' % (s, node_id)
s = '%s %s' % (s, cli_display.id(uname))
type = node.getAttribute("type")
if type != CibNode.default_type:
s = '%s:%s' % (s, type)
return s
def cli_add_description(node,l):
desc = node.getAttribute("description")
if desc:
l.append(nvpair_format("description",desc))
def primitive_head(node):
obj_type = cib_object_map[node.tagName][0]
node_id = node.getAttribute("id")
ra_type = node.getAttribute("type")
ra_class = node.getAttribute("class")
ra_provider = node.getAttribute("provider")
s1 = s2 = ''
if ra_class:
s1 = "%s:"%ra_class
if ra_provider:
s2 = "%s:"%ra_provider
s = cli_display.keyword(obj_type)
id = cli_display.id(node_id)
return "%s %s %s" % (s, id, ''.join((s1,s2,ra_type)))
def cont_head(node):
obj_type = cib_object_map[node.tagName][0]
node_id = node.getAttribute("id")
children = []
for c in node.childNodes:
if not is_element(c):
continue
if (obj_type == "group" and is_primitive(c)) or \
is_child_rsc(c):
children.append(cli_display.rscref(c.getAttribute("id")))
elif obj_type in clonems_tags and is_child_rsc(c):
children.append(cli_display.rscref(c.getAttribute("id")))
s = cli_display.keyword(obj_type)
id = cli_display.id(node_id)
return "%s %s %s" % (s, id, ' '.join(children))
def location_head(node):
obj_type = cib_object_map[node.tagName][0]
node_id = node.getAttribute("id")
rsc = cli_display.rscref(node.getAttribute("rsc"))
s = cli_display.keyword(obj_type)
id = cli_display.id(node_id)
s = "%s %s %s"%(s,id,rsc)
pref_node = node.getAttribute("node")
score = cli_display.score(get_score(node))
if pref_node:
return "%s %s %s" % (s,score,pref_node)
else:
return s
def mkrscrole(node,n):
rsc = cli_display.rscref(node.getAttribute(n))
rsc_role = node.getAttribute(n + "-role")
if rsc_role:
return "%s:%s"%(rsc,rsc_role)
else:
return rsc
def mkrscaction(node,n):
rsc = cli_display.rscref(node.getAttribute(n))
rsc_action = node.getAttribute(n + "-action")
if rsc_action:
return "%s:%s"%(rsc,rsc_action)
else:
return rsc
def rsc_set_constraint(node,obj_type):
col = []
cnt = 0
for n in node.getElementsByTagName("resource_set"):
sequential = True
if n.getAttribute("sequential") == "false":
sequential = False
if not sequential:
col.append("(")
role = n.getAttribute("role")
action = n.getAttribute("action")
for r in n.getElementsByTagName("resource_ref"):
rsc = cli_display.rscref(r.getAttribute("id"))
q = (obj_type == "colocation") and role or action
col.append(q and "%s:%s"%(rsc,q) or rsc)
cnt += 1
if not sequential:
col.append(")")
if cnt <= 2: # a degenerate thingie
col.insert(0,"_rsc_set_")
return col
def two_rsc_constraint(node,obj_type):
col = []
if obj_type == "colocation":
col.append(mkrscrole(node,"rsc"))
col.append(mkrscrole(node,"with-rsc"))
else:
col.append(mkrscaction(node,"first"))
col.append(mkrscaction(node,"then"))
return col
def simple_constraint_head(node):
obj_type = cib_object_map[node.tagName][0]
node_id = node.getAttribute("id")
- obj_type_s = cli_display.keyword(obj_type)
+ s = cli_display.keyword(obj_type)
id = cli_display.id(node_id)
+ score = cli_display.score(get_score(node))
if node.getElementsByTagName("resource_set"):
col = rsc_set_constraint(node,obj_type)
- d = {}
- for n in node.getElementsByTagName("resource_set"):
- s = get_score(n)
- if s != ":":
- d[get_score(n)] = 1
- if len(d) == 1:
- score = cli_display.score(d.keys()[0])
- elif len(d) == 0:
- score = cli_display.score(':')
- else: # more than one score (not really supported yet)
- score = cli_display.score(d.keys()[0])
else:
- score = cli_display.score(get_score(node))
col = two_rsc_constraint(node,obj_type)
symm = node.getAttribute("symmetrical")
if symm:
col.append("symmetrical=%s"%symm)
- return "%s %s %s %s" % (obj_type_s,id,score,' '.join(col))
+ return "%s %s %s %s" % (s,id,score,' '.join(col))
def get_tag_by_id(node,tag,id):
"Find a doc node which matches tag and id."
for n in node.getElementsByTagName(tag):
if n.getAttribute("id") == id:
return n
return None
def get_status_node(n):
try: n = n.parentNode
except: return None
if n.tagName != "node_state":
return get_status_node(n)
return n.getAttribute("id")
def get_status_ops(status_node,rsc,op,interval,node = ''):
'''
Find a doc node which matches the operation. interval set to
"-1" means to lookup an operation with non-zero interval (for
monitors). Empty interval means any interval is fine.
'''
l = []
for n in status_node.childNodes:
if not is_element(n) or n.tagName != "node_state":
continue
if node and n.getAttribute("id") != node:
continue
for r in n.getElementsByTagName("lrm_resource"):
if r.getAttribute("id") != rsc:
continue
for o in r.getElementsByTagName("lrm_rsc_op"):
if o.getAttribute("operation") != op:
continue
if (interval == "") or \
(interval == "-1" and o.getAttribute("interval") != "0") or \
(interval != "" and o.getAttribute("interval") == interval):
l.append(o)
return l
class CibStatus(object):
'''
CIB status management
'''
def __init__(self):
self.origin = "live"
self.status_node = None
self.doc = None
self.cib = None
self.modified = False
self.node_changes = {}
self.op_changes = {}
def _cib_path(self,source):
if source[0:7] == "shadow:":
return shadowfile(source[7:])
else:
return source
def _load_cib(self,source):
if source == "live":
doc,cib = read_cib(cibdump2doc)
else:
doc,cib = read_cib(file2doc,self._cib_path(source))
return doc,cib
def _load(self,source):
doc,cib = self._load_cib(source)
if not doc:
return False
status = get_conf_elem(doc, "status")
if not status:
return False
self.doc,self.cib = doc,cib
self.status_node = status
self.modified = False
self.node_changes = {}
self.op_changes = {}
return True
def status_node_list(self):
if not self.status_node and not self._load(self.origin):
return
return [x.getAttribute("id") for x in self.doc.getElementsByTagName("node_state")]
def status_rsc_list(self):
if not self.status_node and not self._load(self.origin):
return
rsc_list = [x.getAttribute("id") for x in self.doc.getElementsByTagName("lrm_resource")]
# how to uniq?
d = {}
for e in rsc_list:
d[e] = 0
return d.keys()
def load(self,source):
'''
Load the status section from the given source. The source
may be cluster ("live"), shadow CIB, or CIB in a file.
'''
if not self._load(source):
common_err("the cib contains no status")
return False
self.origin = source
return True
def save(self,dest = None):
'''
Save the modified status section to a file/shadow. If the
file exists, then it must be a cib file and the status
section is replaced with our status section. If the file
doesn't exist, then our section and some (?) configuration
is saved.
'''
if not self.modified:
common_info("apparently you didn't modify status")
return False
if (not dest and self.origin == "live") or dest == "live":
common_warn("cannot save status to the cluster")
return False
doc,cib = self.doc,self.cib
if dest:
dest_path = self._cib_path(dest)
if os.path.isfile(dest_path):
doc,cib = self._load_cib(dest)
if not doc or not cib:
common_err("%s exists, but no cib inside" % dest)
return False
else:
dest_path = self._cib_path(self.origin)
if doc != self.doc:
status = get_conf_elem(doc, "status")
rmnode(status)
cib.appendChild(doc.importNode(self.status_node,1))
xml = doc.toprettyxml(user_prefs.xmlindent)
try: f = open(dest_path,"w")
except IOError, msg:
common_err(msg)
return False
f.write(xml)
f.close()
return True
def get_status(self):
'''
Return the status section node.
'''
if not self.status_node and not self._load(self.origin):
return None
return self.status_node
def list_changes(self):
'''
Dump a set of changes done.
'''
if not self.modified:
return True
for node in self.node_changes:
print node,self.node_changes[node]
for op in self.op_changes:
print op,self.op_changes[op]
return True
def show(self):
'''
Page the "pretty" XML of the status section.
'''
if not self.status_node and not self._load(self.origin):
return
page_string(self.status_node.toprettyxml(user_prefs.xmlindent))
return True
def edit_node(self,node,state):
'''
Modify crmd, expected, and join attributes of node_state
to set the node's state to online, offline, or unclean.
'''
if not self.status_node and not self._load(self.origin):
return
node_node = get_tag_by_id(self.status_node,"node_state",node)
if not node_node:
common_err("node %s not found" % node)
return False
if state == "online":
node_node.setAttribute("crmd","online")
node_node.setAttribute("expected","member")
node_node.setAttribute("join","member")
elif state == "offline":
node_node.setAttribute("crmd","offline")
node_node.setAttribute("expected","")
elif state == "unclean":
node_node.setAttribute("crmd","offline")
node_node.setAttribute("expected","member")
else:
common_err("unknown state %s" % state)
return False
self.node_changes[node] = state
self.modified = True
return True
def edit_op(self,op,rsc,rc,op_status,node = ''):
'''
Set rc-code and op-status in the lrm_rsc_op status
section element.
'''
if not self.status_node and not self._load(self.origin):
return
l_op = op
l_int = ""
if op == "probe":
l_op = "monitor"
l_int = "0"
elif op == "monitor":
l_int = "-1"
elif op[0:8] == "monitor:":
l_op = "monitor"
l_int = op[8:]
op_nodes = get_status_ops(self.status_node,rsc,l_op,l_int,node)
if len(op_nodes) == 0:
common_err("operation %s not found" % op)
return False
elif len(op_nodes) > 1:
nodelist = [get_status_node(x) for x in op_nodes]
common_err("operation %s found at %s" % (op,' '.join(nodelist)))
return False
op_node = op_nodes[0]
if not node:
node = get_status_node(op_node)
prev_rc = op_node.getAttribute("rc-code")
op_node.setAttribute("rc-code",rc)
self.op_changes[node+":"+rsc+":"+op] = "rc="+rc
if op_status:
op_node.setAttribute("op-status",op_status)
self.op_changes[node+":"+rsc+":"+op] += "," "op-status="+op_status
op_node.setAttribute("last-run",str(int(time.time())))
if rc != prev_rc:
op_node.setAttribute("last-rc-change",str(int(time.time())))
self.modified = True
return True
class CibObjectSet(object):
'''
Edit or display a set of cib objects.
repr() for objects representation and
save() used to store objects into internal structures
are defined in subclasses.
'''
def __init__(self, *args):
self.obj_list = []
def _open_url(self,src):
import urllib
try:
return urllib.urlopen(src)
except:
pass
if src == "-":
return sys.stdin
try:
return open(src)
except:
pass
common_err("could not open %s" % src)
return False
def init_aux_lists(self):
'''
Before edit, initialize two auxiliary lists which will
hold a list of objects to be removed and a list of
objects which were created. Then, we can create a new
object list which will match the current state of
affairs, i.e. the object set after the last edit.
'''
self.remove_objs = copy.copy(self.obj_list)
self.add_objs = []
def recreate_obj_list(self):
'''
Recreate obj_list: remove deleted objects and add
created objects
'''
for obj in self.remove_objs:
self.obj_list.remove(obj)
self.obj_list += self.add_objs
rmlist = []
for obj in self.obj_list:
if obj.invalid:
rmlist.append(obj)
for obj in rmlist:
self.obj_list.remove(obj)
def edit_save(self,s,erase = False):
'''
Save string s to a tmp file. Invoke editor to edit it.
Parse/save the resulting file. In case of syntax error,
allow user to reedit. If erase is True, erase the CIB
first.
If no changes are done, return silently.
'''
tmp = str2tmp(s)
if not tmp:
return False
filehash = hash(s)
rc = False
while True:
if edit_file(tmp) != 0:
break
try: f = open(tmp,'r')
except IOError, msg:
common_err(msg)
break
s = ''.join(f)
f.close()
if hash(s) == filehash: # file unchanged
rc = True
break
if erase:
cib_factory.erase()
if not self.save(s):
if ask("Do you want to edit again?"):
continue
rc = True
break
try: os.unlink(tmp)
except: pass
return rc
def edit(self):
if batch:
common_info("edit not allowed in batch mode")
return False
cli_display.set_no_pretty()
s = self.repr()
cli_display.reset_no_pretty()
return self.edit_save(s)
def save_to_file(self,fname):
if fname == "-":
f = sys.stdout
else:
if not batch and os.access(fname,os.F_OK):
if not ask("File %s exists. Do you want to overwrite it?"%fname):
return False
try: f = open(fname,"w")
except IOError, msg:
common_err(msg)
return False
rc = True
cli_display.set_no_pretty()
s = self.repr()
cli_display.reset_no_pretty()
if s:
f.write(s)
f.write('\n')
elif self.obj_list:
rc = False
if f != sys.stdout:
f.close()
return rc
def show(self):
s = self.repr()
if not s:
if self.obj_list: # objects could not be displayed
return False
else:
return True
page_string(s)
def import_file(self,method,fname):
+ if not cib_factory.is_cib_sane():
+ return False
if method == "replace":
if interactive and cib_factory.has_cib_changed():
if not ask("This operation will erase all changes. Do you want to proceed?"):
return False
cib_factory.erase()
f = self._open_url(fname)
if not f:
return False
s = ''.join(f)
if f != sys.stdin:
f.close()
return self.save(s)
def repr(self):
'''
Return a string with objects's representations (either
CLI or XML).
'''
return ''
def save(self,s):
'''
For each object:
- try to find a corresponding object in obj_list
- if not found: create new
- if found: replace the object in the obj_list with
the new object
See below for specific implementations.
'''
pass
def verify2(self):
'''
Test objects for sanity. This is about semantics.
'''
rc = 0
for obj in self.obj_list:
rc |= obj.check_sanity()
return rc
def lookup_cli(self,cli_list):
for obj in self.obj_list:
if obj.matchcli(cli_list):
return obj
def lookup(self,xml_obj_type,obj_id):
for obj in self.obj_list:
if obj.match(xml_obj_type,obj_id):
return obj
def drop_remaining(self):
'Any remaining objects in obj_list are deleted.'
l = [x.obj_id for x in self.remove_objs]
return cib_factory.delete(*l)
def mkset_obj(*args):
if args and args[0] == "xml":
obj = lambda: CibObjectSetRaw(*args[1:])
else:
obj = lambda: CibObjectSetCli(*args)
return obj()
class CibObjectSetCli(CibObjectSet):
'''
Edit or display a set of cib objects (using cli notation).
'''
def __init__(self, *args):
CibObjectSet.__init__(self, *args)
self.obj_list = cib_factory.mkobj_list("cli",*args)
def repr(self):
"Return a string containing cli format of all objects."
if not self.obj_list:
return ''
return '\n'.join(obj.repr_cli() \
for obj in processing_sort_cli(self.obj_list))
def process(self,cli_list):
'''
Create new objects or update existing ones.
'''
obj = self.lookup_cli(cli_list)
if obj:
rc = obj.update_from_cli(cli_list) != False
self.remove_objs.remove(obj)
else:
new_obj = cib_factory.create_from_cli(cli_list)
rc = new_obj != None
if rc:
self.add_objs.append(new_obj)
return rc
def save(self,s):
'''
Save a user supplied cli format configuration.
On errors user is typically asked to review the
configuration (for instance on editting).
On syntax error (return code 1), no changes are done, but
on semantic errors (return code 2), some changes did take
place so object list must be updated properly.
Finally, once syntax check passed, there's no way back,
all changes are applied to the current configuration.
TODO: Implement undo configuration changes.
'''
global lineno
l = []
rc = True
save_lineno = lineno
lineno = 0
for cli_text in lines2cli(s):
lineno += 1
cli_list = parse_cli(cli_text)
if cli_list:
l.append(cli_list)
elif cli_list == False:
rc = False
lineno = save_lineno
# we can't proceed if there was a syntax error, but we
# can ask the user to fix problems
if not rc:
return rc
self.init_aux_lists()
if l:
for cli_list in processing_sort_cli(l):
if self.process(cli_list) == False:
rc = False
if not self.drop_remaining():
# this is tricky, we don't know what was removed!
# it could happen that the user dropped a resource
# which was running and therefore couldn't be removed
rc = False
self.recreate_obj_list()
return rc
class CibObjectSetRaw(CibObjectSet):
'''
Edit or display one or more CIB objects (XML).
'''
def __init__(self, *args):
CibObjectSet.__init__(self, *args)
self.obj_list = cib_factory.mkobj_list("xml",*args)
def repr(self):
"Return a string containing xml of all objects."
doc = cib_factory.objlist2doc(self.obj_list)
s = doc.toprettyxml(user_prefs.xmlindent)
doc.unlink()
return s
def repr_configure(self):
'''
Return a string containing xml of configure and its
children.
'''
doc = cib_factory.objlist2doc(self.obj_list)
conf_node = doc.getElementsByTagName("configuration")[0]
s = conf_node.toprettyxml(user_prefs.xmlindent)
doc.unlink()
return s
def process(self,node):
+ if not cib_factory.is_cib_sane():
+ return False
obj = self.lookup(node.tagName,node.getAttribute("id"))
if obj:
rc = obj.update_from_node(node) != False
self.remove_objs.remove(obj)
else:
new_obj = cib_factory.create_from_node(node)
rc = new_obj != None
if rc:
self.add_objs.append(new_obj)
return rc
def save(self,s):
try:
doc = xml.dom.minidom.parseString(s)
except xml.parsers.expat.ExpatError,msg:
cib_parse_err(msg)
return False
rc = True
sanitize_cib(doc)
show_unrecognized_elems(doc)
newnodes = get_interesting_nodes(doc,[])
self.init_aux_lists()
if newnodes:
for node in processing_sort(newnodes):
if not self.process(node):
rc = False
if not self.drop_remaining():
rc = False
doc.unlink()
self.recreate_obj_list()
return rc
def verify(self):
if not self.obj_list:
return True
cli_display.set_no_pretty()
rc = pipe_string(cib_verify,self.repr())
cli_display.reset_no_pretty()
return rc in (0,1)
def ptest(self, nograph, scores, verbosity):
+ if not cib_factory.is_cib_sane():
+ return False
ptest = "ptest -X -%s" % verbosity.upper()
if scores:
ptest = "%s -s" % ptest
if user_prefs.dotty and not nograph:
fd,tmpfile = mkstemp()
ptest = "%s -D %s" % (ptest,tmpfile)
else:
tmpfile = None
doc = cib_factory.objlist2doc(self.obj_list)
cib = doc.childNodes[0]
status = cib_status.get_status()
if not status:
common_err("no status section found")
return False
cib.appendChild(doc.importNode(status,1))
pipe_string(ptest,doc.toprettyxml())
doc.unlink()
if tmpfile:
p = subprocess.Popen("%s %s" % (user_prefs.dotty,tmpfile), shell=True, bufsize=0, stdin=None, stdout=None, stderr=None, close_fds=True)
common_info("starting %s to show transition graph"%user_prefs.dotty)
tmpfiles.append(tmpfile)
else:
if not nograph:
common_info("install graphviz to see a transition graph")
return True
class CibObject(object):
'''
The top level object of the CIB. Resources and constraints.
'''
state_fmt = "%16s %-8s%-8s%-8s%-8s%-8s%-4s"
def __init__(self,xml_obj_type,obj_id = None):
if not xml_obj_type in cib_object_map:
unsupported_err(xml_obj_type)
return
self.obj_type = cib_object_map[xml_obj_type][0]
self.parent_type = cib_object_map[xml_obj_type][2]
self.xml_obj_type = xml_obj_type
self.origin = "" # where did it originally come from?
self.nocli = False # we don't support this one
self.updated = False # was the object updated
self.invalid = False # the object has been invalidated (removed)
self.moved = False # the object has been moved (from/to a container)
self.recreate = False # constraints to be recreated
self.parent = None # object superior (group/clone/ms)
self.children = [] # objects inferior
if obj_id:
if not self.mknode(obj_id):
self = None # won't do :(
else:
self.obj_id = None
self.node = None
def dump_state(self):
'Print object status'
print self.state_fmt % \
(self.obj_id,self.origin,self.updated,self.moved,self.invalid, \
self.parent and self.parent.obj_id or "", \
len(self.children))
def repr_cli(self,node = None,format = True):
'''
CLI representation for the node. Defined in subclasses.
'''
return ''
def cli2node(self,cli,oldnode = None):
'''
Convert CLI representation to a DOM node.
Defined in subclasses.
'''
return None
def save_xml(self,node):
self.obj_id = node.getAttribute("id")
self.node = node
return self.cli_use_validate()
def mknode(self,obj_id):
+ if not cib_factory.is_cib_sane():
+ return False
if id_in_use(obj_id):
return False
if self.xml_obj_type in defaults_tags:
tag = "meta_attributes"
else:
tag = self.xml_obj_type
self.node = cib_factory.createElement(tag)
self.obj_id = obj_id
self.node.setAttribute("id",self.obj_id)
self.origin = "user"
return True
def mkcopy(self):
'''
Create a new object with the same obj_id and obj_type
(for the purpose of CibFactory.delete_objects)
'''
obj_copy = CibObject(self.xml_obj_type)
obj_copy.obj_id = self.obj_id
obj_copy.obj_type = self.obj_type
return obj_copy
def can_be_renamed(self):
'''
Return False if this object can't be renamed.
'''
if is_rsc_running(self.obj_id):
common_err("cannot rename a running resource (%s)" % self.obj_id)
return False
if not is_live_cib() and self.node.tagName == "node":
common_err("cannot rename nodes")
return False
return True
def attr_exists(self,attr):
if not attr in self.node.attributes.keys():
no_attribute_err(attr,self.obj_id)
return False
return True
def cli_use_validate(self):
'''
Check validity of the object, as we know it. It may
happen that we don't recognize a construct, but that the
object is still valid for the CRM. In that case, the
object is marked as "CLI read only", i.e. we will neither
convert it to CLI nor try to edit it in that format.
The validation procedure:
we convert xml to cli and then back to xml. If the two
xml representations match then we can understand the xml.
'''
if not self.node:
return True
if not self.attr_exists("id"):
return False
cli_display.set_no_pretty()
cli_text = self.repr_cli()
cli_display.reset_no_pretty()
if not cli_text:
return False
xml2 = self.cli2node(cli_text)
if not xml2:
return False
rc = xml_cmp(self.node, xml2, show = True)
xml2.unlink()
return rc
def check_sanity(self):
'''
Right now, this is only for primitives.
'''
return 0
def matchcli(self,cli_list):
head = cli_list[0]
return self.obj_type == head[0] \
and self.obj_id == find_value(head[1],"id")
def match(self,xml_obj_type,obj_id):
return self.xml_obj_type == xml_obj_type and self.obj_id == obj_id
def obj_string(self):
return "%s:%s" % (self.obj_type,self.obj_id)
def reset_updated(self):
self.updated = False
self.moved = False
self.recreate = False
for child in self.children:
child.reset_updated()
def propagate_updated(self):
if self.parent:
self.parent.updated = self.updated
self.parent.propagate_updated()
def update_links(self):
'''
Update the structure links for the object (self.children,
self.parent). Update also the dom nodes, if necessary.
'''
self.children = []
if self.obj_type not in container_tags:
return
for c in self.node.childNodes:
if is_child_rsc(c):
child = cib_factory.find_object_for_node(c)
if not child:
missing_obj_err(c)
continue
child.parent = self
self.children.append(child)
if not c.isSameNode(child.node):
rmnode(child.node)
child.node = c
def update_from_cli(self,cli_list):
'Update ourselves from the cli intermediate.'
+ if not cib_factory.is_cib_sane():
+ return False
if not cib_factory.verify_cli(cli_list):
return False
oldnode = self.node
id_store.remove_xml(oldnode)
newnode = self.cli2node(cli_list)
if not newnode:
id_store.store_xml(oldnode)
return False
if xml_cmp(oldnode,newnode):
newnode.unlink()
return True # the new and the old versions are equal
self.node = newnode
if user_prefs.is_check_always() \
and self.check_sanity() > 1:
id_store.remove_xml(newnode)
id_store.store_xml(oldnode)
self.node = oldnode
newnode.unlink()
return False
oldnode.parentNode.replaceChild(newnode,oldnode)
cib_factory.adjust_children(self,cli_list)
oldnode.unlink()
self.updated = True
self.propagate_updated()
return True
def update_from_node(self,node):
'Update ourselves from a doc node.'
if not node:
return False
+ if not cib_factory.is_cib_sane():
+ return False
oldxml = self.node
newxml = node
if xml_cmp(oldxml,newxml):
return True # the new and the old versions are equal
if not id_store.replace_xml(oldxml,newxml):
return False
oldxml.unlink()
self.node = cib_factory.doc.importNode(newxml,1)
cib_factory.topnode[self.parent_type].appendChild(self.node)
self.update_links()
self.updated = True
self.propagate_updated()
def top_parent(self):
'''Return the top parent or self'''
if self.parent:
return self.parent.top_parent()
else:
return self
def find_child_in_node(self,child):
for c in self.node.childNodes:
if not is_element(c):
continue
if c.tagName == child.obj_type and \
c.getAttribute("id") == child.obj_id:
return c
return None
def filter(self,*args):
"Filter objects."
if not args:
return True
if args[0] == "NOOBJ":
return False
if args[0] == "changed":
return self.updated or self.origin == "user"
return self.obj_id in args
def mk_cli_list(cli):
'Sometimes we get a string and sometimes a list.'
if type(cli) == type('') or type(cli) == type(u''):
return parse_cli(cli)
else:
return cli
class CibNode(CibObject):
'''
Node and node's attributes.
'''
default_type = "normal"
node_attributes_keyw = olist(["attributes","utilization"])
def repr_cli(self,node = None,format = True):
'''
We assume that uname is unique.
'''
if not node:
node = self.node
l = []
l.append(node_head(node))
cli_add_description(node,l)
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "instance_attributes":
l.append("%s %s" % \
(cli_display.keyword("attributes"), \
cli_pairs(nvpairs2list(c))))
elif c.tagName == "utilization":
l.append("%s %s" % \
(cli_display.keyword("utilization"), \
cli_pairs(nvpairs2list(c))))
return cli_format(l,format)
def cli2node(self,cli,oldnode = None):
cli_list = mk_cli_list(cli)
if not cli_list:
return None
if not oldnode:
oldnode = self.node
head = copy.copy(cli_list[0])
head[0] = backtrans[head[0]]
obj_id = find_value(head[1],"$id")
if not obj_id:
obj_id = find_value(head[1],"uname")
if not obj_id:
return None
type = find_value(head[1],"type")
if not type:
type = self.default_type
head[1].append(["type",type])
headnode = mkxmlsimple(head,cib_factory.topnode[cib_object_map[self.xml_obj_type][2]],'node')
id_hint = headnode.getAttribute("id")
for e in cli_list[1:]:
n = mkxmlnode(e,oldnode,id_hint)
headnode.appendChild(n)
remove_id_used_attributes(cib_factory.topnode[cib_object_map[self.xml_obj_type][2]])
return headnode
class CibPrimitive(CibObject):
'''
Primitives.
'''
def repr_cli(self,node = None,format = True):
if not node:
node = self.node
l = []
l.append(primitive_head(node))
cli_add_description(node,l)
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "instance_attributes":
l.append("%s %s" % \
(cli_display.keyword("params"), \
cli_pairs(nvpairs2list(c))))
elif c.tagName == "meta_attributes":
l.append("%s %s" % \
(cli_display.keyword("meta"), \
cli_pairs(nvpairs2list(c))))
elif c.tagName == "utilization":
l.append("%s %s" % \
(cli_display.keyword("utilization"), \
cli_pairs(nvpairs2list(c))))
elif c.tagName == "operations":
l.append(cli_operations(c,format))
return cli_format(l,format)
def cli2node(self,cli,oldnode = None):
'''
Convert a CLI description to DOM node.
Try to preserve as many ids as possible in case there's
an old XML version.
'''
cli_list = mk_cli_list(cli)
if not cli_list:
return None
if not oldnode:
oldnode = self.node
head = copy.copy(cli_list[0])
head[0] = backtrans[head[0]]
headnode = mkxmlsimple(head,oldnode,'rsc')
id_hint = headnode.getAttribute("id")
operations = None
for e in cli_list[1:]:
n = mkxmlnode(e,oldnode,id_hint)
if keyword_cmp(e[0], "operations"):
operations = n
if not keyword_cmp(e[0], "op"):
headnode.appendChild(n)
else:
if not operations:
operations = mkxmlnode(["operations",{}],oldnode,id_hint)
headnode.appendChild(operations)
operations.appendChild(n)
remove_id_used_attributes(oldnode)
return headnode
def check_sanity(self):
'''
Check operation timeouts and if all required parameters
are defined.
'''
if not self.node: # eh?
common_err("%s: no xml (strange)" % self.obj_id)
return user_prefs.get_check_rc()
ra_type = self.node.getAttribute("type")
ra_class = self.node.getAttribute("class")
ra_provider = self.node.getAttribute("provider")
ra = RAInfo(ra_class,ra_type,ra_provider)
if not ra.mk_ra_node(): # no RA found?
ra.error("no such resource agent")
return user_prefs.get_check_rc()
params = []
for c in self.node.childNodes:
if not is_element(c):
continue
if c.tagName == "instance_attributes":
params += nvpairs2list(c)
rc1 = ra.sanity_check_params(self.obj_id, params)
actions = {}
for c in self.node.childNodes:
if not is_element(c):
continue
if c.tagName == "operations":
for c2 in c.childNodes:
if is_element(c2) and c2.tagName == "op":
op,pl = op2list(c2)
if op:
actions[op] = pl
rc2 = ra.sanity_check_ops(self.obj_id, actions)
return rc1 | rc2
class CibContainer(CibObject):
'''
Groups and clones and ms.
'''
def repr_cli(self,node = None,format = True):
if not node:
node = self.node
l = []
l.append(cont_head(node))
cli_add_description(node,l)
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "instance_attributes":
l.append("%s %s" % \
(cli_display.keyword("params"), \
cli_pairs(nvpairs2list(c))))
elif c.tagName == "meta_attributes":
l.append("%s %s" % \
(cli_display.keyword("meta"), \
cli_pairs(nvpairs2list(c))))
return cli_format(l,format)
def cli2node(self,cli,oldnode = None):
cli_list = mk_cli_list(cli)
if not cli_list:
return None
if not oldnode:
oldnode = self.node
head = copy.copy(cli_list[0])
head[0] = backtrans[head[0]]
headnode = mkxmlsimple(head,oldnode,'grp')
id_hint = headnode.getAttribute("id")
for e in cli_list[1:]:
n = mkxmlnode(e,oldnode,id_hint)
headnode.appendChild(n)
v = find_value(head[1],"$children")
if v:
for child_id in v:
obj = cib_factory.find_object(child_id)
if obj:
n = obj.node.cloneNode(1)
headnode.appendChild(n)
else:
no_object_err(child_id)
remove_id_used_attributes(oldnode)
return headnode
class CibLocation(CibObject):
'''
Location constraint.
'''
def repr_cli(self,node = None,format = True):
if not node:
node = self.node
l = []
l.append(location_head(node))
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "rule":
l.append("%s %s" % \
(cli_display.keyword("rule"), cli_rule(c)))
return cli_format(l,format)
def cli2node(self,cli,oldnode = None):
cli_list = mk_cli_list(cli)
if not cli_list:
return None
if not oldnode:
oldnode = self.node
head = copy.copy(cli_list[0])
head[0] = backtrans[head[0]]
headnode = mkxmlsimple(head,oldnode,'location')
id_hint = headnode.getAttribute("id")
oldrule = None
for e in cli_list[1:]:
if e[0] in ("expression","date_expression"):
n = mkxmlnode(e,oldrule,id_hint)
else:
n = mkxmlnode(e,oldnode,id_hint)
if keyword_cmp(e[0], "rule"):
add_missing_attr(n)
rule = n
headnode.appendChild(n)
oldrule = lookup_node(rule,oldnode,location_only=True)
else:
rule.appendChild(n)
remove_id_used_attributes(oldnode)
return headnode
class CibSimpleConstraint(CibObject):
'''
Colocation and order constraints.
'''
def repr_cli(self,node = None,format = True):
if not node:
node = self.node
l = []
l.append(simple_constraint_head(node))
return cli_format(l,format)
def cli2node(self,cli,oldnode = None):
if type(cli) == type('') or type(cli) == type(u''):
cli_list = parse_cli(cli)
else:
cli_list = cli
if not cli_list:
return None
if not oldnode:
oldnode = self.node
head = copy.copy(cli_list[0])
head[0] = backtrans[head[0]]
headnode = mkxmlsimple(head,oldnode,'')
id_hint = headnode.getAttribute("id")
for e in cli_list[1:]:
# if more than one element, it's a resource set
n = mkxmlnode(e,oldnode,id_hint)
headnode.appendChild(n)
remove_id_used_attributes(oldnode)
return headnode
class CibProperty(CibObject):
'''
Cluster properties.
'''
def repr_cli(self,node = None,format = True):
if not node:
node = self.node
l = []
l.append(cli_display.keyword(self.obj_type))
properties = nvpairs2list(node, add_id = True)
for n,v in properties:
if n == "$id":
l[0] = '%s %s="%s"' % (l[0],n,v)
else:
l.append(nvpair_format(n,v))
return cli_format(l,format)
def cli2node(self,cli,oldnode = None):
cli_list = mk_cli_list(cli)
if not cli_list:
return None
if not oldnode:
oldnode = cib_factory.topnode[cib_object_map[self.xml_obj_type][2]]
head = copy.copy(cli_list[0])
head[0] = backtrans[head[0]]
obj_id = find_value(head[1],"$id")
if not obj_id:
obj_id = cib_object_map[self.xml_obj_type][3]
headnode = mkxmlnode(head,oldnode,obj_id)
remove_id_used_attributes(oldnode)
return headnode
def matchcli(self,cli_list):
head = cli_list[0]
return self.obj_type == head[0] \
and self.obj_id == find_value(head[1],"$id")
def get_default(property):
v = None
if cib_factory.is_cib_sane():
v = cib_factory.get_property(property)
if not v:
v = pe_metadata.param_default(property)
return v
def cib_delete_element(obj):
'Remove one element from the CIB.'
if obj.xml_obj_type in defaults_tags:
node = cib_factory.createElement("meta_attributes")
else:
node = cib_factory.createElement(obj.xml_obj_type)
node.setAttribute("id",obj.obj_id)
rc = pipe_string("%s -D" % cib_piped, node.toxml())
if rc != 0:
update_err(obj.obj_id,'-D',node.toprettyxml())
node.unlink()
return rc
def cib_update_elements(upd_list):
'Update a set of objects in the CIB.'
l = [x.obj_id for x in upd_list]
o = CibObjectSetRaw(*l)
xml = o.repr_configure()
rc = pipe_string("%s -U" % cib_piped, xml)
if rc != 0:
update_err(' '.join(l),'-U',xml)
return rc
def cib_replace_element(obj):
rc = pipe_string("%s -R -o %s" % \
(cib_piped, obj.parent_type), obj.node.toxml())
if rc != 0:
update_err(obj.obj_id,'-R',obj.node.toprettyxml())
return rc
def cib_delete_moved_children(obj):
for c in obj.children:
if c.origin == "cib" and c.moved:
cib_delete_element(c)
# xml -> cli translations (and classes)
cib_object_map = {
"node": ( "node", CibNode, "nodes" ),
"primitive": ( "primitive", CibPrimitive, "resources" ),
"group": ( "group", CibContainer, "resources" ),
"clone": ( "clone", CibContainer, "resources" ),
"master": ( "ms", CibContainer, "resources" ),
"rsc_location": ( "location", CibLocation, "constraints" ),
"rsc_colocation": ( "colocation", CibSimpleConstraint, "constraints" ),
"rsc_order": ( "order", CibSimpleConstraint, "constraints" ),
"cluster_property_set": ( "property", CibProperty, "crm_config", "cib-bootstrap-options" ),
"rsc_defaults": ( "rsc_defaults", CibProperty, "rsc_defaults", "rsc-options" ),
"op_defaults": ( "op_defaults", CibProperty, "op_defaults", "op-options" ),
}
backtrans = odict() # generate a translation cli -> tag
for key in cib_object_map:
backtrans[cib_object_map[key][0]] = key
cib_topnodes = [] # get a list of parents
for key in cib_object_map:
if not cib_object_map[key][2] in cib_topnodes:
cib_topnodes.append(cib_object_map[key][2])
class CibFactory(object):
'''
Juggle with CIB objects.
See check_structure below for details on the internal cib
representation.
'''
def __init__(self):
self.init_vars()
self.regtest = regression_tests
self.all_committed = True # has commit produced error
self._no_constraint_rm_msg = False # internal (just not to produce silly messages)
self.supported_cib_re = "^pacemaker-1[.]0$"
def is_cib_sane(self):
if not self.doc:
empty_cib_err()
return False
return True
#
# check internal structures
#
def check_topnode(self,obj):
if not obj.node.parentNode.isSameNode(self.topnode[obj.parent_type]):
common_err("object %s is not linked to %s"%(obj.obj_id,obj.parent_type))
def check_parent(self,obj,parent):
if not obj in parent.children:
common_err("object %s does not reference its child %s"%(parent.obj_id,obj.obj_id))
return False
if not parent.node.isSameNode(obj.node.parentNode):
common_err("object %s node is not a child of its parent %s, but %s:%s"%(obj.obj_id,parent.obj_id,obj.node.tagName,obj.node.getAttribute("id")))
return False
def check_structure(self):
#print "Checking structure..."
if not self.doc:
empty_cib_err()
return False
rc = True
for obj in self.cib_objects:
#print "Checking %s... (%s)" % (obj.obj_id,obj.nocli)
if obj.parent:
if self.check_parent(obj,obj.parent) == False:
rc = False
else:
if self.check_topnode(obj) == False:
rc = False
for child in obj.children:
if self.check_parent(child,child.parent) == False:
rc = False
return rc
def regression_testing(self,param):
# provide some help for regression testing
# in particular by trying to provide output which is
# easier to predict
if param == "off":
self.regtest = False
elif param == "on":
self.regtest = True
else:
common_warn("bad parameter for regtest: %s" % param)
def createElement(self,tag):
if self.doc:
return self.doc.createElement(tag)
else:
empty_cib_err()
def is_cib_supported(self,cib):
'Do we support this CIB?'
req = cib.getAttribute("crm_feature_set")
validator = cib.getAttribute("validate-with")
if validator and re.match(self.supported_cib_re,validator):
return True
cib_ver_unsupported_err(validator,req)
return False
def upgrade_cib_06to10(self,force = False):
'Upgrade the CIB from 0.6 to 1.0.'
if not self.doc:
empty_cib_err()
return False
req = self.doc.getAttribute("crm_feature_set")
validator = self.doc.getAttribute("validate-with")
if force or not validator or re.match("0[.]6",validator):
return ext_cmd(cib_upgrade) == 0
def import_cib(self):
'Parse the current CIB (from cibadmin -Q).'
self.doc,cib = read_cib(cibdump2doc)
if not self.doc:
return False
if not cib:
common_err("CIB has no cib element")
self.reset()
return False
if not self.is_cib_supported(cib):
self.reset()
return False
for attr in cib.attributes.keys():
self.cib_attrs[attr] = cib.getAttribute(attr)
for t in cib_topnodes:
self.topnode[t] = get_conf_elem(self.doc, t)
if not self.topnode[t]:
self.topnode[t] = mk_topnode(self.doc, t)
self.missing_topnodes.append(t)
if not self.topnode[t]:
common_err("could not create %s node; out of memory?" % t)
self.reset()
return False
return True
#
# create a doc from the list of objects
# (used by CibObjectSetRaw)
#
def regtest_filter(self,cib):
for attr in ("epoch","admin_epoch"):
if cib.getAttribute(attr):
cib.setAttribute(attr,"0")
for attr in ("cib-last-written",):
if cib.getAttribute(attr):
cib.removeAttribute(attr)
def set_cib_attributes(self,cib):
for attr in self.cib_attrs:
cib.setAttribute(attr,self.cib_attrs[attr])
if self.regtest:
self.regtest_filter(cib)
def objlist2doc(self,obj_list,obj_filter = None):
'''
Return document containing objects in obj_list.
Must remove all children from the object list, because
printing xml of parents will include them.
Optional filter to sieve objects.
'''
doc,cib,crm_config,rsc_defaults,op_defaults,nodes,resources,constraints = new_cib()
# get only top parents for the objects in the list
# e.g. if we get a primitive which is part of a clone,
# then the clone gets in, not the primitive
# dict will weed out duplicates
d = {}
for obj in obj_list:
if obj_filter and not obj_filter(obj):
continue
d[obj.top_parent()] = 1
for obj in d:
i_node = doc.importNode(obj.node,1)
if obj.parent_type == "nodes":
nodes.appendChild(i_node)
elif obj.parent_type == "resources":
resources.appendChild(i_node)
elif obj.parent_type == "constraints":
constraints.appendChild(i_node)
elif obj.parent_type == "crm_config":
crm_config.appendChild(i_node)
elif obj.parent_type == "rsc_defaults":
rsc_defaults.appendChild(i_node)
elif obj.parent_type == "op_defaults":
op_defaults.appendChild(i_node)
self.set_cib_attributes(cib)
return doc
#
# commit changed objects to the CIB
#
def attr_match(self,c,a):
'Does attribute match?'
try: cib_attr = self.cib_attrs[a]
except: cib_attr = None
return c.getAttribute(a) == cib_attr
def is_current_cib_equal(self, silent = False):
if self.overwrite:
return True
doc,cib = read_cib(cibdump2doc)
if not doc:
return False
if not cib:
doc.unlink()
return False
rc = self.attr_match(cib,'epoch') and \
self.attr_match(cib,'admin_epoch')
if not silent and not rc:
common_warn("CIB changed in the meantime: won't touch it!")
doc.unlink()
return rc
def add_missing_topnodes(self):
cib_create_topnode = "cibadmin -C -o configuration -X"
for tag in self.missing_topnodes:
if not self.topnode[tag].hasChildNodes():
continue
if ext_cmd("%s '<%s/>'" % (cib_create_topnode, tag)) != 0:
common_err("could not create %s in the cib" % tag)
return False
return True
def state_header(self):
'Print object status header'
print CibObject.state_fmt % \
("","origin","updated","moved","invalid","parent","children")
def showobjects(self):
self.state_header()
for obj in self.cib_objects:
obj.dump_state()
if self.remove_queue:
print "Remove queue:"
for obj in self.remove_queue:
obj.dump_state()
def showqueue(self, title, obj_filter):
upd_list = self.cib_objs4cibadmin(obj_filter)
if title == "delete":
upd_list += self.remove_queue
if upd_list:
s = ''
upd_list = processing_sort_cli(upd_list)
if title == "delete":
upd_list = reversed(upd_list)
for obj in upd_list:
s = s + " " + obj.obj_string()
print "%s:%s" % (title,s)
def showqueues(self):
'Show what is going to happen on commit.'
# 1. remove objects (incl. modified constraints)
self.showqueue("delete", lambda o:
o.origin == "cib" and (o.updated or o.recreate) and is_constraint(o.node))
# 2. update existing objects
self.showqueue("replace", lambda o: \
o.origin != 'user' and o.updated and not is_constraint(o.node))
# 3. create new objects
self.showqueue("create", lambda o: \
o.origin == 'user' and not is_constraint(o.node))
# 4. create objects moved from a container
self.showqueue("create", lambda o: \
not o.parent and o.moved and o.origin == "cib")
# 5. create constraints
self.showqueue("create", lambda o: is_constraint(o.node) and \
(((o.updated or o.recreate) and o.origin == "cib") or o.origin == "user"))
def commit(self):
'Commit the configuration to the CIB.'
if not self.doc:
empty_cib_err()
return False
if not self.add_missing_topnodes():
return False
# all_committed is updated in the invoked object methods
self.all_committed = True
cnt = 0
# 1. remove objects (incl. modified constraints)
cnt += self.delete_objects(lambda o:
o.origin == "cib" and (o.updated or o.recreate) and is_constraint(o.node))
# 2. update existing objects
cnt += self.replace_objects(lambda o: \
o.origin != 'user' and o.updated and not is_constraint(o.node))
# 3. create new objects
cnt += self.create_objects(lambda o: \
o.origin == 'user' and not is_constraint(o.node))
# 4. create objects moved from a container
cnt += self.create_objects(lambda o: \
not o.parent and o.moved and o.origin == "cib")
# 5. create constraints
cnt += self.create_objects(lambda o: is_constraint(o.node) and \
(((o.updated or o.recreate) and o.origin == "cib") or o.origin == "user"))
if cnt:
# reload the cib!
self.reset()
self.initialize()
return self.all_committed
def cib_objs4cibadmin(self,obj_filter):
'''
Filter objects from our cib_objects list. But add only
top parents.
For this to work, the filter must not filter out parents.
That's guaranteed by the updated flag propagation.
'''
upd_list = []
for obj in self.cib_objects:
if not obj_filter or obj_filter(obj):
if not obj.parent and not obj in upd_list:
upd_list.append(obj)
return upd_list
def delete_objects(self,obj_filter):
cnt = 0
upd_list = self.cib_objs4cibadmin(obj_filter)
if not (self.remove_queue + upd_list):
return 0
obj_list = processing_sort_cli(self.remove_queue + upd_list)
for obj in reversed(obj_list):
if cib_delete_element(obj) == 0:
if obj in self.remove_queue:
self.remove_queue.remove(obj)
cnt += 1
else:
self.all_committed = False
return cnt
def create_objects(self,obj_filter):
upd_list = self.cib_objs4cibadmin(obj_filter)
if not upd_list:
return 0
for obj in upd_list:
cib_delete_moved_children(obj)
if cib_update_elements(upd_list) == 0:
for obj in upd_list:
obj.reset_updated()
return len(upd_list)
else:
self.all_committed = False
return 0
def replace_objects(self,obj_filter):
cnt = 0
upd_list = self.cib_objs4cibadmin(obj_filter)
if not upd_list:
return 0
for obj in processing_sort_cli(upd_list):
#print obj.node.toprettyxml()
cib_delete_moved_children(obj)
if cib_replace_element(obj) == 0:
cnt += 1
obj.reset_updated()
else:
self.all_committed = False
return cnt
#
# initialize cib_objects from CIB
#
def save_node(self,node,pnode = None):
if not pnode:
pnode = node
obj = cib_object_map[pnode.tagName][1](pnode.tagName)
obj.origin = "cib"
self.cib_objects.append(obj)
if not obj.save_xml(node):
obj.nocli = True
def populate(self):
"Walk the cib and collect cib objects."
all_nodes = get_interesting_nodes(self.doc,[])
if not all_nodes:
return
for node in processing_sort(all_nodes):
if is_defaults(node):
for c in node.childNodes:
if not is_element(c) or c.tagName != "meta_attributes":
continue
self.save_node(c,node)
else:
self.save_node(node)
for obj in self.cib_objects:
obj.update_links()
def initialize(self):
if self.doc:
return True
if not self.import_cib():
return False
sanitize_cib(self.doc)
show_unrecognized_elems(self.doc)
self.populate()
return self.check_structure()
def init_vars(self):
self.doc = None # the cib
self.topnode = {}
for t in cib_topnodes:
self.topnode[t] = None
self.missing_topnodes = []
self.cib_attrs = {} # cib version dictionary
self.cib_objects = [] # a list of cib objects
self.remove_queue = [] # a list of cib objects to be removed
self.overwrite = False # update cib unconditionally
def reset(self):
if not self.doc:
return
self.doc.unlink()
self.init_vars()
id_store.clear()
def find_object(self,obj_id):
"Find an object for id."
for obj in self.cib_objects:
if obj.obj_id == obj_id:
return obj
return None
#
# tab completion functions
#
def id_list(self):
"List of ids (for completion)."
return [x.obj_id for x in self.cib_objects]
def prim_id_list(self):
"List of primitives ids (for group completion)."
return [x.obj_id for x in self.cib_objects if x.obj_type == "primitive"]
def children_id_list(self):
"List of child ids (for clone/master completion)."
return [x.obj_id for x in self.cib_objects if x.obj_type in children_tags]
def rsc_id_list(self):
"List of resource ids (for constraint completion)."
return [x.obj_id for x in self.cib_objects \
if x.obj_type in resource_tags and not x.parent]
def f_prim_id_list(self):
"List of possible primitives ids (for group completion)."
return [x.obj_id for x in self.cib_objects \
if x.obj_type == "primitive" and not x.parent]
def f_children_id_list(self):
"List of possible child ids (for clone/master completion)."
return [x.obj_id for x in self.cib_objects \
if x.obj_type in children_tags and not x.parent]
#
# a few helper functions
#
def find_object_for_node(self,node):
"Find an object which matches a dom node."
for obj in self.cib_objects:
if node.getAttribute("id") == obj.obj_id:
return obj
return None
def resolve_id_ref(self,attr_list_type,id_ref):
'''
User is allowed to specify id_ref either as a an object
id or as attributes id. Here we try to figure out which
one, i.e. if the former is the case to find the right
id to reference.
'''
obj= self.find_object(id_ref)
if obj:
node_l = obj.node.getElementsByTagName(attr_list_type)
if node_l:
if len(node_l) > 1:
common_warn("%s contains more than one %s, using first" % \
(obj.obj_id,attr_list_type))
id = node_l[0].getAttribute("id")
if not id:
common_err("%s reference not found" % id_ref)
return id_ref # hope that user will fix that
return id
# verify if id_ref exists
node_l = self.doc.getElementsByTagName(attr_list_type)
for node in node_l:
if node.getAttribute("id") == id_ref:
return id_ref
common_err("%s reference not found" % id_ref)
return id_ref # hope that user will fix that
def get_property(self,property):
'''
Get the value of the given cluster property.
'''
for obj in self.cib_objects:
if obj.obj_type == "property" and obj.node:
pl = nvpairs2list(obj.node)
v = find_value(pl, property)
if v:
return v
return None
def new_object(self,obj_type,obj_id):
"Create a new object of type obj_type."
if id_in_use(obj_id):
return None
for xml_obj_type,v in cib_object_map.items():
if v[0] == obj_type:
obj = v[1](xml_obj_type,obj_id)
if obj.obj_id:
return obj
else:
return None
return None
def mkobj_list(self,mode,*args):
obj_list = []
for obj in self.cib_objects:
f = lambda: obj.filter(*args)
if not f():
continue
if mode == "cli" and obj.nocli:
obj_cli_err(obj.obj_id)
continue
obj_list.append(obj)
return obj_list
def has_cib_changed(self):
return self.mkobj_list("xml","changed") or self.remove_queue
def verify_constraints(self,cli_list):
'''
Check if all resources referenced in a constraint exist
'''
rc = True
head = cli_list[0]
constraint_id = find_value(head[1],"id")
for obj_id in referenced_resources_cli(cli_list):
if not self.find_object(obj_id):
constraint_norefobj_err(constraint_id,obj_id)
rc = False
return rc
def verify_children(self,cli_list):
'''
Check prerequisites:
a) all children must exist
b) no child may have other parent than me
(or should we steal children?)
c) there may not be duplicate children
'''
head = cli_list[0]
obj_type = head[0]
obj_id = find_value(head[1],"id")
c_ids = find_value(head[1],"$children")
if not c_ids:
return True
rc = True
c_dict = {}
for child_id in c_ids:
if not self.verify_child(child_id,obj_type,obj_id):
rc = False
if child_id in c_dict:
common_err("in group %s child %s listed more than once"%(obj_id,child_id))
rc = False
c_dict[child_id] = 1
return rc
def verify_child(self,child_id,obj_type,obj_id):
'Check if child exists and obj_id is (or may become) its parent.'
child = self.find_object(child_id)
if not child:
no_object_err(child_id)
return False
if child.parent and child.parent.obj_id != obj_id:
common_err("%s already in use at %s"%(child_id,child.parent.obj_id))
return False
if obj_type == "group" and child.obj_type != "primitive":
common_err("a group may contain only primitives; %s is %s"%(child_id,child.obj_type))
return False
if not child.obj_type in children_tags:
common_err("%s may contain a primitive or a group; %s is %s"%(obj_type,child_id,child.obj_type))
return False
return True
def verify_cli(self,cli_list):
'''
Can we create this object given its CLI representation.
This is not about syntax, we're past that, but about
semantics.
Right now we check if the children, if any, are fit for
the parent. And if this is a constraint, if all
referenced resources are present.
'''
rc = True
if not self.verify_children(cli_list):
rc = False
if not self.verify_constraints(cli_list):
rc = False
return rc
def create_object(self,*args):
s = []
s += args
return self.create_from_cli(parse_cli(s)) != None
def set_property_cli(self,cli_list):
head_pl = cli_list[0]
obj_type = head_pl[0].lower()
pset_id = find_value(head_pl[1],"$id")
if pset_id:
head_pl[1].remove(["$id",pset_id])
else:
pset_id = cib_object_map[backtrans[obj_type]][3]
obj = self.find_object(pset_id)
if not obj:
if not is_id_valid(pset_id):
invalid_id_err(pset_id)
return None
obj = self.new_object(obj_type,pset_id)
if not obj:
return None
self.topnode[obj.parent_type].appendChild(obj.node)
obj.origin = "user"
self.cib_objects.append(obj)
for n,v in head_pl[1]:
set_nvpair(obj.node,n,v)
obj.updated = True
return obj
def add_op(self,cli_list):
'''Add an op to a primitive.'''
head = cli_list[0]
# does the referenced primitive exist
rsc_id = find_value(head[1],"rsc")
rsc_obj = cib_factory.find_object(rsc_id)
if not rsc_obj:
no_object_err(rsc_id)
return None
if rsc_obj.obj_type != "primitive":
common_err("%s is not a primitive" % rsc_id)
return None
# check if there is already an op with the same interval
name = find_value(head[1], "name")
interval = find_value(head[1], "interval")
if find_operation(rsc_obj.node,name,interval):
common_err("%s already has a %s op with interval %s" % \
(rsc_id, name, interval))
return None
# drop the rsc attribute
head[1].remove(["rsc",rsc_id])
# create an xml node
mon_node = mkxmlsimple(head, None, rsc_id)
# get the place to append it to
try:
op_node = rsc_obj.node.getElementsByTagName("operations")[0]
except:
op_node = self.createElement("operations")
rsc_obj.node.appendChild(op_node)
op_node.appendChild(mon_node)
# the resource is updated
rsc_obj.updated = True
rsc_obj.propagate_updated()
return rsc_obj
def create_from_cli(self,cli):
'Create a new cib object from the cli representation.'
cli_list = mk_cli_list(cli)
if not cli_list:
return None
if not self.verify_cli(cli_list):
return None
head = cli_list[0]
obj_type = head[0].lower()
if obj_type in nvset_cli_names:
return self.set_property_cli(cli_list)
if obj_type == "op":
return self.add_op(cli_list)
obj_id = find_value(head[1],"id")
if not is_id_valid(obj_id):
invalid_id_err(obj_id)
return None
obj = self.new_object(obj_type,obj_id)
if not obj:
return None
obj.node = obj.cli2node(cli_list)
if user_prefs.is_check_always() \
and obj.check_sanity() > 1:
id_store.remove_xml(obj.node)
obj.node.unlink()
return None
self.topnode[obj.parent_type].appendChild(obj.node)
self.adjust_children(obj,cli_list)
obj.origin = "user"
for child in obj.children:
# redirect constraints to the new parent
for c_obj in self.related_constraints(child):
self.remove_queue.append(c_obj.mkcopy())
rename_rscref(c_obj,child.obj_id,obj.obj_id)
# drop useless constraints which may have been created above
for c_obj in self.related_constraints(obj):
if silly_constraint(c_obj.node,obj.obj_id):
self._no_constraint_rm_msg = True
self._remove_obj(c_obj)
self._no_constraint_rm_msg = False
self.cib_objects.append(obj)
return obj
def update_moved(self,obj):
'Updated the moved flag. Mark affected constraints.'
obj.moved = not obj.moved
if obj.moved:
for c_obj in self.related_constraints(obj):
c_obj.recreate = True
def adjust_children(self,obj,cli_list):
'''
All stuff children related: manage the nodes of children,
update the list of children for the parent, update
parents in the children.
'''
head = cli_list[0]
children_ids = find_value(head[1],"$children")
if not children_ids:
return
new_children = []
for child_id in children_ids:
new_children.append(self.find_object(child_id))
self._relink_orphans(obj,new_children)
obj.children = new_children
self._update_children(obj)
def _relink_child(self,obj):
'Relink a child to the top node.'
obj.node.parentNode.removeChild(obj.node)
self.topnode[obj.parent_type].appendChild(obj.node)
self.update_moved(obj)
obj.parent = None
def _update_children(self,obj):
'''For composite objects: update all children nodes.
'''
# unlink all and find them in the new node
for child in obj.children:
oldnode = child.node
child.node = obj.find_child_in_node(child)
if child.children: # and children of children
self._update_children(child)
rmnode(oldnode)
if not child.parent:
self.update_moved(child)
if child.parent and child.parent != obj:
child.parent.updated = True # the other parent updated
child.parent = obj
def _relink_orphans(self,obj,new_children):
"New orphans move to the top level for the object type."
for child in obj.children:
if child not in new_children:
self._relink_child(child)
def add_obj(self,obj_type,node):
obj = self.new_object(obj_type, node.getAttribute("id"))
if not obj:
return None
if not obj.save_xml(node):
obj.nocli = True
obj.update_links()
obj.origin = "user"
self.cib_objects.append(obj)
return obj
def create_from_node(self,node):
'Create a new cib object from a document node.'
if not node:
return None
obj_type = cib_object_map[node.tagName][0]
node = self.doc.importNode(node,1)
obj = None
if is_defaults(node):
for c in node.childNodes:
if not is_element(c) or c.tagName != "meta_attributes":
continue
obj = self.add_obj(obj_type,c)
else:
obj = self.add_obj(obj_type,node)
if obj:
self.topnode[obj.parent_type].appendChild(node)
return obj
def cib_objects_string(self, obj_list = None):
l = []
if not obj_list:
obj_list = self.cib_objects
for obj in obj_list:
l.append(obj.obj_string())
return ' '.join(l)
def _remove_obj(self,obj):
"Remove a cib object and its children."
# remove children first
# can't remove them here from obj.children!
common_debug("remove object %s" % obj.obj_string())
for child in obj.children:
#self._remove_obj(child)
# just relink, don't remove children
self._relink_child(child)
if obj.parent: # remove obj from its parent, if any
obj.parent.children.remove(obj)
id_store.remove_xml(obj.node)
rmnode(obj.node)
obj.invalid = True
self.add_to_remove_queue(obj)
self.cib_objects.remove(obj)
for c_obj in self.related_constraints(obj):
if is_simpleconstraint(c_obj.node) and obj.children:
# the first child inherits constraints
rename_rscref(c_obj,obj.obj_id,obj.children[0].obj_id)
delete_rscref(c_obj,obj.obj_id)
if silly_constraint(c_obj.node,obj.obj_id):
# remove invalid constraints
self._remove_obj(c_obj)
if not self._no_constraint_rm_msg:
err_buf.info("hanging %s deleted" % c_obj.obj_string())
def related_constraints(self,obj):
if not is_resource(obj.node):
return []
c_list = []
for obj2 in self.cib_objects:
if not is_constraint(obj2.node):
continue
if rsc_constraint(obj.obj_id,obj2.node):
c_list.append(obj2)
return c_list
def add_to_remove_queue(self,obj):
if obj.origin == "cib":
self.remove_queue.append(obj)
#print self.cib_objects_string(self.remove_queue)
def delete_1(self,obj):
'''
Remove an object and its parent in case the object is the
only child.
'''
if obj.parent and len(obj.parent.children) == 1:
self.delete_1(obj.parent)
if obj in self.cib_objects: # don't remove parents twice
self._remove_obj(obj)
def delete(self,*args):
'Delete a cib object.'
if not self.doc:
empty_cib_err()
return False
rc = True
l = []
for obj_id in args:
obj = self.find_object(obj_id)
if not obj:
no_object_err(obj_id)
rc = False
continue
if is_rsc_running(obj_id):
common_warn("resource %s is running, can't delete it" % obj_id)
else:
l.append(obj)
if l:
l = processing_sort_cli(l)
for obj in reversed(l):
self.delete_1(obj)
return rc
def remove_on_rename(self,obj):
'''
If the renamed object is coming from the cib, then it
must be removed and a new one created.
'''
if obj.origin == "cib":
self.remove_queue.append(obj.mkcopy())
obj.origin = "user"
def rename(self,old_id,new_id):
'''
Rename a cib object.
- check if the resource (if it's a resource) is stopped
- check if the new id is not taken
- find the object with old id
- rename old id to new id in all related objects
(constraints)
- if the object came from the CIB, then it must be
deleted and the one with the new name created
- rename old id to new id in the object
'''
if not self.doc:
empty_cib_err()
return False
if id_in_use(new_id):
return False
obj = self.find_object(old_id)
if not obj:
no_object_err(old_id)
return False
if not obj.can_be_renamed():
return False
for c_obj in self.related_constraints(obj):
rename_rscref(c_obj,old_id,new_id)
self.remove_on_rename(obj)
rename_id(obj.node,old_id,new_id)
obj.obj_id = new_id
id_store.rename(old_id,new_id)
obj.updated = True
obj.propagate_updated()
def erase(self):
"Remove all cib objects."
# remove only bottom objects and no constraints
# the rest will automatically follow
if not self.doc:
empty_cib_err()
return False
erase_ok = True
l = []
for obj in [obj for obj in self.cib_objects \
if not obj.children and not is_constraint(obj.node) \
and obj.obj_type != "node" ]:
if is_rsc_running(obj.obj_id):
common_warn("resource %s is running, can't delete it" % obj.obj_id)
erase_ok = False
else:
l.append(obj)
if not erase_ok:
common_err("CIB erase aborted (nothing was deleted)")
return False
self._no_constraint_rm_msg = True
for obj in l:
self.delete(obj.obj_id)
self._no_constraint_rm_msg = False
remaining = 0
for obj in self.cib_objects:
if obj.obj_type != "node":
remaining += 1
if remaining > 0:
common_err("strange, but these objects remained:")
for obj in self.cib_objects:
if obj.obj_type != "node":
print >> sys.stderr, obj.obj_string()
self.cib_objects = []
return True
def erase_nodes(self):
"Remove nodes only."
if not self.doc:
empty_cib_err()
return False
l = [obj for obj in self.cib_objects if obj.obj_type == "node"]
for obj in l:
self.delete(obj.obj_id)
def refresh(self):
"Refresh from the CIB."
self.reset()
self.initialize()
class TopLevel(UserInterface):
'''
The top level.
'''
crm_mon = "crm_mon -1"
status_opts = {
"bynode": "-n",
"inactive": "-r",
"ops": "-o",
"timing": "-t",
"failcounts": "-f",
}
help_table = odict()
help_table["."] = ("","""This is the CRM command line interface program.""")
help_table["cib"] = ("manage shadow CIBs", """
A shadow CIB is a regular cluster configuration which is kept in
a file. The CRM and the CRM tools may manage a shadow CIB in the
same way as the live CIB (i.e. the current cluster configuration).
A shadow CIB may be applied to the cluster in one step.
""")
help_table["resource"] = ("resources management", """
Everything related to resources management is available at this
level. Most commands are implemented using the crm_resource(8)
program.
""")
help_table["node"] = ("nodes management", """
A few node related tasks such as node standby are implemented
here.
""")
help_table["options"] = ("user preferences", """
Several user preferences are available. Note that it is possible
to save the preferences to a startup file.
""")
help_table["configure"] = ("CRM cluster configuration", """
The configuration level.
Note that you can change the working CIB at the cib level. It is
advisable to configure shadow CIBs and then commit them to the
cluster.
""")
help_table["ra"] = ("resource agents information center", """
This level contains commands which show various information about
the installed resource agents. It is available both at the top
level and at the `configure` level.
""")
help_table["status"] = ("show cluster status", """
Show cluster status. The status is displayed by crm_mon. Supply
additional arguments for more information or different format.
See crm_mon(8) for more details.
Usage:
...............
status [<option> ...]
option :: bynode | inactive | ops | timing | failcounts
...............
""")
help_table["quit"] = ("exit the program", "")
help_table["help"] = ("show help", "")
help_table["end"] = ("go back one level", "")
def __init__(self):
UserInterface.__init__(self)
self.cmd_table['cib'] = CibShadow
self.cmd_table['resource'] = RscMgmt
self.cmd_table['configure'] = CibConfig
self.cmd_table['node'] = NodeMgmt
self.cmd_table['options'] = CliOptions
self.cmd_table['status'] = (self.status,(0,5),0)
self.cmd_table['ra'] = RA
setup_aliases(self)
def status(self,cmd,*args):
"""usage: status [<option> ...]
option :: bynode | inactive | ops | timing | failcounts
"""
status_cmd = self.crm_mon
for par in args:
if par in self.status_opts:
status_cmd = "%s %s" % (status_cmd, self.status_opts[par])
else:
syntax_err((cmd,par), context = 'status')
return False
return ext_cmd(status_cmd) == 0
class CompletionHelp(object):
'''
Print some help on whatever last word in the line.
'''
timeout = 60 # don't print again and again
def __init__(self):
self.laststamp = 0
self.lastitem = ''
def help(self,f,*args):
words = readline.get_line_buffer().split()
if not words:
return
key = words[-1]
if key.endswith('='):
key = key[0:-1]
if self.lastitem == key and \
time.time() - self.laststamp < self.timeout:
return
help_s = f(key,*args)
if help_s:
print "\n%s" % help_s
print "%s%s" % (prompt,readline.get_line_buffer()),
self.laststamp = time.time()
self.lastitem = key
def attr_cmds(idx,delimiter = False):
if delimiter:
return ' '
return ["delete","set","show"]
def listnodes():
if wcache.is_cached("listnodes"):
return wcache.retrieve("listnodes")
nodes = []
doc = cibdump2doc("nodes")
if not doc:
return []
nodes_node = get_conf_elem(doc, "nodes")
if not nodes_node:
return []
for c in nodes_node.childNodes:
if not is_element(c):
continue
if c.tagName != "node":
continue
if c.getAttribute("type") == 'normal':
nodes.append(c.getAttribute("uname"))
return wcache.store("property_list",nodes)
def nodes_list(idx,delimiter = False):
if delimiter:
return ' '
return listnodes()
def shadows_list(idx,delimiter = False):
if delimiter:
return ' '
return listshadows()
def templates_list(idx,delimiter = False):
if delimiter:
return ' '
return listtemplates()
def config_list(idx,delimiter = False):
if delimiter:
return ' '
return listconfigs()
def config_list_method(idx,delimiter = False):
if delimiter:
return ' '
return listconfigs() + ["replace","update"]
def shadows_live_list(idx,delimiter = False):
if delimiter:
return ' '
return listshadows() + ['live']
def rsc_list(idx,delimiter = False):
if delimiter:
return ' '
doc = resources_xml()
if not doc:
return []
nodes = get_interesting_nodes(doc,[])
return [x.getAttribute("id") for x in nodes if is_resource(x)]
def null_list(idx,delimiter = False):
if delimiter:
return ' '
return []
def loop(idx,delimiter = False):
"just a marker in a list"
pass
def id_xml_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_factory.id_list() + ['xml','changed']
def id_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_factory.id_list()
def f_prim_id_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_factory.f_prim_id_list()
def f_children_id_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_factory.f_children_id_list()
def rsc_id_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_factory.rsc_id_list()
def status_node_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_status.status_node_list()
def status_rsc_list(idx,delimiter = False):
if delimiter:
return ' '
return cib_status.status_rsc_list()
def node_states_list(idx,delimiter = False):
if delimiter:
return ' '
return StatusMgmt.node_states
def ra_operations_list(idx,delimiter = False):
if delimiter:
return ' '
return StatusMgmt.ra_operations
def lrm_exit_codes_list(idx,delimiter = False):
if delimiter:
return ' '
return StatusMgmt.lrm_exit_codes.keys()
def lrm_status_codes_list(idx,delimiter = False):
if delimiter:
return ' '
return StatusMgmt.lrm_status_codes.keys()
def skills_list(idx,delimiter = False):
if delimiter:
return ' '
return user_prefs.skill_levels.keys()
def ra_classes_list(idx,delimiter = False):
if delimiter:
return ':'
return ra_classes()
def get_primitive_type(words):
try:
idx = words.index("primitive") + 2
type_word = words[idx]
except: type_word = ''
return type_word
def ra_type_list(toks,idx,delimiter):
if idx == 2:
if toks[0] == "ocf":
dchar = ':'
l = ra_providers_all()
else:
dchar = ' '
l = ra_types(toks[0])
elif idx == 3:
dchar = ' '
if toks[0] == "ocf":
l = ra_types(toks[0],toks[1])
else:
l = ra_types(toks[0])
if delimiter:
return dchar
return l
def prim_meta_attr_list():
return [\
"allow-migrate", \
"globally-unique", \
"is-managed", \
"migration-threshold", \
"priority", \
"multiple-active", \
"failure-timeout", \
"resource-stickiness", \
"target-role", \
]
def op_attr_list():
return op_attributes
def operations_list():
return op_cli_names
def prim_complete_meta(ra,delimiter):
if delimiter:
return '='
return prim_meta_attr_list()
def prim_complete_op(ra,delimiter):
words = split_buffer()
if (readline.get_line_buffer()[-1] == ' ' and words[-1] == "op") \
or (readline.get_line_buffer()[-1] != ' ' and words[-2] == "op"):
dchar = ' '
l = operations_list()
else:
if readline.get_line_buffer()[-1] == '=':
dchar = ' '
l = []
else:
dchar = '='
l = op_attr_list()
if delimiter:
return dchar
return l
def prim_complete_params(ra,delimiter):
if readline.get_line_buffer()[-1] == '=':
dchar = ' '
l = []
else:
dchar = '='
l = ra.params().keys()
if delimiter:
return dchar
return l
def prim_params_info(key,ra):
return ra.meta_parameter(key)
def meta_attr_info(key,ra):
pass
def op_attr_info(key,ra):
pass
def get_lastkeyw(words,keyw):
revwords = copy.copy(words)
revwords.reverse()
for w in revwords:
if w in keyw:
return w
def primitive_complete_complex(idx,delimiter = False):
'''
This completer depends on the content of the line, i.e. on
previous tokens, in particular on the type of the RA.
'''
completers_set = {
"params": (prim_complete_params, prim_params_info),
"meta": (prim_complete_meta, meta_attr_info),
"op": (prim_complete_op, op_attr_info),
}
# manage the resource type
words = readline.get_line_buffer().split()
type_word = get_primitive_type(words)
toks = type_word.split(':')
if toks[0] != "ocf":
idx += 1
if idx in (2,3):
return ra_type_list(toks,idx,delimiter)
# create an ra object
ra = None
ra_class,provider,rsc_type = disambiguate_ra_type(type_word)
if ra_type_validate(type_word,ra_class,provider,rsc_type):
ra = RAInfo(ra_class,rsc_type,provider)
keywords = completers_set.keys()
if idx == 4:
if delimiter:
return ' '
return keywords
lastkeyw = get_lastkeyw(words,keywords)
if '=' in words[-1] and readline.get_line_buffer()[-1] != ' ':
if not delimiter and lastkeyw and \
readline.get_line_buffer()[-1] == '=' and len(words[-1]) > 1:
compl_help.help(completers_set[lastkeyw][1],ra)
if delimiter:
return ' '
return ['*']
else:
if lastkeyw:
return completers_set[lastkeyw][0](ra,delimiter)
def property_complete(idx,delimiter = False):
'''
This completer depends on the content of the line, i.e. on
previous tokens, in particular on the type of the RA.
'''
words = readline.get_line_buffer().split()
if '=' in words[-1] and readline.get_line_buffer()[-1] != ' ':
if not delimiter and \
readline.get_line_buffer()[-1] == '=' and len(words[-1]) > 1:
compl_help.help(prim_params_info,pe_metadata)
if delimiter:
return ' '
return ['*']
else:
return prim_complete_params(pe_metadata,delimiter)
def topics_dict(help_tab):
if not help_tab:
return {}
topics = {}
for topic in help_tab:
if topic != '.':
topics[topic] = None
return topics
def mk_completion_tab(obj,ctab):
cmd_table = obj.cmd_table
for key,value in cmd_table.items():
if key.startswith("_"):
continue
if type(value) == type(object):
ctab[key] = {}
elif key == "help":
ctab[key] = topics_dict(obj.help_table)
else:
try:
ctab[key] = value[3]
except:
ctab[key] = None
pass
def lookup_dynamic(fun_list,idx,f_idx,words):
if not fun_list:
return []
if fun_list[f_idx] == loop:
f_idx -= 1
f = fun_list[f_idx]
w = words[0]
wordlist = f(idx)
delimiter = f(idx,1)
if len(wordlist) == 1 and wordlist[0] == '*':
return lookup_dynamic(fun_list,idx+1,f_idx+1,words[1:])
elif len(words) == 1:
return [x+delimiter for x in wordlist if x.startswith(w)]
return lookup_dynamic(fun_list,idx+1,f_idx+1,words[1:])
def lookup_words(ctab,words):
if not ctab:
return []
if type(ctab) == type(()):
return lookup_dynamic(ctab,0,0,words)
if len(words) == 1:
return [x+' ' for x in ctab if x.startswith(words[0])]
elif words[0] in ctab.keys():
return lookup_words(ctab[words[0]],words[1:])
return []
def split_buffer():
p = readline.get_line_buffer()
p = p.replace(':',' ').replace('=',' ')
return p.split()
def completer(txt,state):
words = split_buffer()
if readline.get_begidx() == readline.get_endidx():
words.append('')
matched = lookup_words(levels.completion_tab,words)
matched.append(None)
return matched[state]
termctrl = TerminalController()
wcache = WCache()
user_prefs = UserPrefs()
id_store = IdMgmt()
cib_factory = CibFactory()
cib_status = CibStatus()
cli_display = CliDisplay()
pe_metadata = RAInfo("pengine","metadata")
stonithd_metadata = RAInfo("stonithd","metadata")
ra_if = RaLrmd()
if not ra_if.good:
ra_if = RaOS()
tmpfiles = []
def load_rc(rcfile):
try: f = open(rcfile)
except: return
save_stdin = sys.stdin
sys.stdin = f
while True:
inp = multi_input()
if inp == None:
break
try: parse_line(levels,shlex.split(inp))
except ValueError, msg:
common_err(msg)
f.close()
sys.stdin = save_stdin
def multi_input(prompt = ''):
"""
Get input from user
Allow multiple lines using a continuation character
"""
global lineno
line = []
while True:
try:
text = raw_input(prompt)
except EOFError:
return None
if lineno >= 0:
lineno += 1
if regression_tests:
print ".INP:",text
sys.stdout.flush()
sys.stderr.flush()
stripped = text.strip()
if stripped.endswith('\\'):
stripped = stripped.rstrip('\\')
line.append(stripped)
if prompt:
prompt = '> '
else:
line.append(stripped)
break
return ''.join(line)
class Levels(object):
'''
Keep track of levels and prompts.
'''
def __init__(self,start_level):
self._marker = 0
self._in_transit = False
self.level_stack = []
self.comp_stack = []
self.current_level = start_level()
self.parse_root = self.current_level.cmd_table
self.prompts = []
self.completion_tab = {}
mk_completion_tab(self.current_level,self.completion_tab)
def getprompt(self):
return ' '.join(self.prompts)
def mark(self):
self._marker = len(self.level_stack)
self._in_transit = False
def release(self):
while len(self.level_stack) > self._marker:
self.droplevel()
def new_level(self,level_obj,token):
self.level_stack.append(self.current_level)
self.comp_stack.append(self.completion_tab)
self.prompts.append(token)
self.current_level = level_obj()
self.parse_root = self.current_level.cmd_table
try:
if not self.completion_tab[token]:
mk_completion_tab(self.current_level,self.completion_tab[token])
self.completion_tab = self.completion_tab[token]
except:
pass
self._in_transit = True
def previous(self):
if self.level_stack:
return self.level_stack[-1]
def droplevel(self):
if self.level_stack:
self.current_level.end_game(self._in_transit)
self.current_level = self.level_stack.pop()
self.completion_tab = self.comp_stack.pop()
self.parse_root = self.current_level.cmd_table
self.prompts.pop()
def check_args(args,argsdim):
if not argsdim: return True
if len(argsdim) == 1:
minargs = argsdim[0]
return len(args) >= minargs
else:
minargs,maxargs = argsdim
return len(args) >= minargs and len(args) <= maxargs
#
# Note on parsing
#
# Parsing tables are python dictionaries.
#
# Keywords are used as keys and the corresponding values are
# lists (actually tuples, since they should be read-only) or
# classes. In the former case, the keyword is a terminal and
# in the latter, a new object for the class is created. The class
# must have the cmd_table variable.
#
# The list has the following content:
#
# function: a function to handle this command
# numargs_list: number of minimum/maximum arguments; for example,
# (0,1) means one optional argument, (1,1) one required; if the
# list is empty then the function will parse arguments itself
# required minimum skill level: operator, administrator, expert
# (encoded as a small integer from 0 to 2)
# list of completer functions (optional)
#
def show_usage(cmd):
p = None
try: p = cmd.__doc__
except: pass
if p:
print >> sys.stderr, p
else:
syntax_err(cmd.__name__)
def parse_line(lvl,s):
if not s: return True
if s[0].startswith('#'): return True
lvl.mark()
pt = lvl.parse_root
cmd = None
i = 0
for i in range(len(s)):
token = s[i]
if token in pt:
if type(pt[token]) == type(object):
lvl.new_level(pt[token],token)
pt = lvl.parse_root # move to the next level
else:
cmd = pt[token] # terminal symbol
break # and stop parsing
else:
syntax_err(s[i:])
lvl.release()
return False
if cmd: # found a terminal symbol
if not user_prefs.check_skill_level(cmd[2]):
lvl.release()
skill_err(s[i])
return False
args = s[i+1:]
if not check_args(args,cmd[1]):
lvl.release()
show_usage(cmd[0])
return False
args = s[i:]
d = lambda: cmd[0](*args)
rv = d() # execute the command
lvl.release()
return rv != False
return True
# three modes: interactive (no args supplied), batch (input from
# a file), half-interactive (args supplied, but not batch)
interactive = False
batch = False
inp_file = ''
prompt = ''
def cib_prompt():
return cib_in_use or "live"
def usage():
print >> sys.stderr, """
usage:
crm [-D display_type] [-f file] [-hF] [args]
Use crm without arguments for an interactive session.
Supply one or more arguments for a "single-shot" use.
Specify with -f a file which contains a script. Use '-' for
standard input or use pipe/redirection.
crm displays cli format configurations using a color scheme
and/or in uppercase. Pick one of "color" or "uppercase", or
use "-D color,uppercase" if you want colorful uppercase.
Get plain output by "-D plain". The default may be set in
user preferences (options).
-F stands for force, if set all operations will behave as if
force was specified on the line (e.g. configure commit).
Examples:
# crm -f stopapp2.cli
# crm < stopapp2.cli
# crm resource stop global_www
# crm status
"""
sys.exit(1)
hist_file = os.environ.get('HOME')+"/.crm_history"
rc_file = os.environ.get('HOME')+"/.crm.rc"
help_sys = HelpSystem()
levels = Levels(TopLevel)
this_node = os.uname()[1]
cib_in_use = os.getenv(CibShadow().envvar)
load_rc(rc_file)
if not sys.stdin.isatty():
lineno = 0
batch = True
else:
interactive = True
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], \
'hdf:FRD:', ("help","debug","file=",\
"force","regression-tests","display="))
for o,p in opts:
if o in ("-h","--help"):
usage()
elif o == "-d":
user_prefs.set_debug()
elif o == "-R":
regression_tests = True
elif o in ("-D","--display"):
user_prefs.set_output(p)
elif o in ("-F","--force"):
user_prefs.set_force()
elif o in ("-f","--file"):
batch = True
lineno = 0
inp_file = p
except getopt.GetoptError,msg:
print msg
usage()
if len(args) == 1 and args[0].startswith("conf"):
parse_line(levels,["configure"])
interactive = True
elif len(args) > 0:
lineno = 0
interactive = False
if parse_line(levels,shlex.split(' '.join(args))):
# if the user entered a level, then just continue
if levels.previous():
if not inp_file and sys.stdin.isatty():
interactive = True
else:
sys.exit(0)
else:
sys.exit(1)
if inp_file == "-":
pass
elif inp_file:
try:
f = open(inp_file)
except IOError, msg:
common_err(msg)
usage()
sys.stdin = f
if interactive:
compl_help = CompletionHelp()
readline.set_history_length(100)
readline.parse_and_bind("tab: complete")
readline.set_completer(completer)
readline.set_completer_delims(\
readline.get_completer_delims().replace('-','').replace('/','').replace('=',''))
try: readline.read_history_file(hist_file)
except: pass
while True:
if interactive:
prompt = "crm(%s)%s# " % (cib_prompt(),levels.getprompt())
inp = multi_input(prompt)
if inp == None:
cmd_exit("eof")
try: parse_line(levels,shlex.split(inp))
except ValueError, msg:
common_err(msg)
# vim:ts=4:sw=4:et:
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 15cc70a796..003e1f0cd8 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,1790 +1,1793 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <sys/utsname.h>
#include <crm/msg_xml.h>
#include <crm/common/util.h>
#include <crm/common/xml.h>
#include <crm/common/ipc.h>
#include <crm/common/mainloop.h>
#include <crm/cib.h>
#include <crm/pengine/status.h>
#include <../lib/pengine/unpack.h>
/* GMainLoop *mainloop = NULL; */
void wait_for_refresh(int offset, const char *prefix, int msec);
void clean_up(int rc);
void crm_diff_update(const char *event, xmlNode *msg);
gboolean mon_refresh_display(gpointer user_data);
int cib_connect(gboolean full);
char *xml_file = NULL;
char *as_html_file = NULL;
char *pid_file = NULL;
char *snmp_target = NULL;
gboolean as_console = TRUE;;
gboolean simple_status = FALSE;
gboolean group_by_node = FALSE;
gboolean inactive_resources = FALSE;
gboolean web_cgi = FALSE;
int reconnect_msec = 5000;
gboolean daemonize = FALSE;
GMainLoop *mainloop = NULL;
guint timer_id = 0;
const char *crm_mail_host = NULL;
const char *crm_mail_prefix = NULL;
const char *crm_mail_from = NULL;
const char *crm_mail_to = NULL;
const char *external_agent = NULL;
const char *external_recipient = NULL;
cib_t *cib = NULL;
xmlNode *current_cib = NULL;
gboolean one_shot = FALSE;
gboolean has_warnings = FALSE;
gboolean print_failcount = FALSE;
gboolean print_operations = FALSE;
gboolean print_timing = FALSE;
gboolean log_diffs = FALSE;
gboolean log_updates = FALSE;
long last_refresh = 0;
crm_trigger_t *refresh_trigger = NULL;
/*
* 1.3.6.1.4.1.32723 has been assigned to the project by IANA
* http://www.iana.org/assignments/enterprise-numbers
*/
#define PACEMAKER_PREFIX "1.3.6.1.4.1.32723"
#define PACEMAKER_TRAP_PREFIX PACEMAKER_PREFIX ".1"
#define snmp_crm_trap_oid PACEMAKER_TRAP_PREFIX
#define snmp_crm_oid_node PACEMAKER_TRAP_PREFIX ".1"
#define snmp_crm_oid_rsc PACEMAKER_TRAP_PREFIX ".2"
#define snmp_crm_oid_task PACEMAKER_TRAP_PREFIX ".3"
#define snmp_crm_oid_desc PACEMAKER_TRAP_PREFIX ".4"
#define snmp_crm_oid_status PACEMAKER_TRAP_PREFIX ".5"
#define snmp_crm_oid_rc PACEMAKER_TRAP_PREFIX ".6"
#define snmp_crm_oid_trc PACEMAKER_TRAP_PREFIX ".7"
#if CURSES_ENABLED
# define print_dot() if(as_console) { \
printw("."); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, "."); \
}
#else
# define print_dot() fprintf(stdout, ".");
#endif
#if CURSES_ENABLED
# define print_as(fmt, args...) if(as_console) { \
printw(fmt, ##args); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, fmt, ##args); \
}
#else
# define print_as(fmt, args...) fprintf(stdout, fmt, ##args);
#endif
static void
blank_screen(void)
{
#if CURSES_ENABLED
int lpc = 0;
for(lpc = 0; lpc < LINES; lpc++) {
move(lpc, 0);
clrtoeol();
}
move(0, 0);
refresh();
#endif
}
static gboolean
mon_timer_popped(gpointer data)
{
int rc = cib_ok;
if(timer_id > 0) {
g_source_remove(timer_id);
}
rc = cib_connect(TRUE);
if(rc != cib_ok) {
print_dot();
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return FALSE;
}
static void mon_cib_connection_destroy(gpointer user_data)
{
print_as("Connection to the CIB terminated\n");
if(cib) {
print_as("Reconnecting...");
cib->cmds->signoff(cib);
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return;
}
/*
* Mainloop signal handler.
*/
static void
mon_shutdown(int nsig)
{
clean_up(LSB_EXIT_OK);
}
int cib_connect(gboolean full)
{
int rc = cib_ok;
static gboolean need_pass = TRUE;
CRM_CHECK(cib != NULL, return cib_missing);
if(getenv("CIB_passwd") != NULL) {
need_pass = FALSE;
}
if(cib->state != cib_connected_query
&& cib->state != cib_connected_command) {
crm_debug_4("Connecting to the CIB");
if(as_console && need_pass && cib->variant == cib_remote) {
need_pass = FALSE;
print_as("Password:");
}
rc = cib->cmds->signon(cib, crm_system_name, cib_query);
if(rc != cib_ok) {
return rc;
}
current_cib = get_cib_copy(cib);
mon_refresh_display(NULL);
if(full) {
if(rc == cib_ok) {
rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy);
if(rc == cib_NOTSUPPORTED) {
print_as("Notification setup failed, won't be able to reconnect after failure");
if(as_console) { sleep(2); }
rc = cib_ok;
}
}
if(rc == cib_ok) {
cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
}
if(rc != cib_ok) {
print_as("Notification setup failed, could not monitor CIB actions");
if(as_console) { sleep(2); }
clean_up(-rc);
}
}
}
return rc;
}
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"-spacer-", 1, 0, '-', "\nModes:"},
{"as-html", 1, 0, 'h', "Write cluster status to the named file"},
{"web-cgi", 0, 0, 'w', "\tWeb mode with output suitable for cgi"},
{"simple-status", 0, 0, 's', "Display the cluster status once as a simple one line output (suitable for nagios)"},
{"snmp-traps", 1, 0, 'S', "Send SNMP traps to this station", !ENABLE_SNMP},
{"mail-to", 1, 0, 'T', "Send Mail alerts to this user. See also --mail-from, --mail-host, --mail-prefix", !ENABLE_ESMTP},
{"-spacer-", 1, 0, '-', "\nDisplay Options:"},
{"group-by-node", 0, 0, 'n', "Group resources by node" },
{"inactive", 0, 0, 'r', "Display inactive resources" },
{"failcounts", 0, 0, 'f', "Display resource fail counts"},
{"operations", 0, 0, 'o', "Display resource operation history" },
{"timing-details", 0, 0, 't', "Display resource operation history with timing details\n" },
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"interval", 1, 0, 'i', "\tUpdate frequency in seconds" },
{"one-shot", 0, 0, '1', "\tDisplay the cluster status once on the console and exit"},
{"disable-ncurses",0, 0, 'N', "\tDisable the use of ncurses", !CURSES_ENABLED},
{"daemonize", 0, 0, 'd', "\tRun in the background as a daemon"},
{"pid-file", 1, 0, 'p', "\t(Advanced) Daemon pid file location"},
{"mail-from", 1, 0, 'F', "\tMail alerts should come from the named user", !ENABLE_ESMTP},
{"mail-host", 1, 0, 'H', "\tMail alerts should be sent via the named host", !ENABLE_ESMTP},
{"mail-prefix", 1, 0, 'P', "Subjects for mail alerts should start with this string", !ENABLE_ESMTP},
{"external-agent", 1, 0, 'E', "A program to run when resource operations take place."},
{"external-recipient",1, 0, 'e', "A recipient for your program (assuming you want the program to send something to someone)."},
{"xml-file", 1, 0, 'x', NULL, 1},
{"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "Display the cluster´s status on the console with updates as they occur:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display the cluster´s status on the console just once then exit:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon -1", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display your cluster´s status, group resources by node, and include inactive resources in the list:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --group-by-node --inactive", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it write the cluster´s status to an HTML file:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --as-html /path/to/docroot/filename.html", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it send email alerts:", pcmk_option_paragraph|!ENABLE_ESMTP},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --mail-to user@example.com --mail-host mail.example.com", pcmk_option_example|!ENABLE_ESMTP},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it send SNMP alerts:", pcmk_option_paragraph|!ENABLE_SNMP},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --snmp-traps snmptrapd.example.com", pcmk_option_example|!ENABLE_SNMP},
{NULL, 0, 0, 0}
};
int
main(int argc, char **argv)
{
int flag;
int argerr = 0;
int exit_code = 0;
int option_index = 0;
pid_file = crm_strdup("/tmp/ClusterMon.pid");
crm_log_init(basename(argv[0]), LOG_CRIT, FALSE, FALSE, 0, NULL);
crm_set_options("V?$i:nrh:dp:s1wx:oftNS:T:F:H:P:E:e:", "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
+
+ /* prevent zombies */
+ signal(SIGCLD, SIG_IGN);
if (strcmp(crm_system_name, "crm_mon.cgi")==0) {
web_cgi = TRUE;
one_shot = TRUE;
}
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1)
break;
switch(flag) {
case 'V':
cl_log_enable_stderr(TRUE);
alter_debug(DEBUG_INC);
break;
case 'i':
reconnect_msec = crm_get_msec(optarg);
break;
case 'n':
group_by_node = TRUE;
break;
case 'r':
inactive_resources = TRUE;
break;
case 'd':
daemonize = TRUE;
break;
case 't':
print_timing = TRUE;
print_operations = TRUE;
break;
case 'o':
print_operations = TRUE;
break;
case 'f':
print_failcount = TRUE;
break;
case 'p':
crm_free(pid_file);
pid_file = crm_strdup(optarg);
break;
case 'x':
xml_file = crm_strdup(optarg);
one_shot = TRUE;
break;
case 'h':
as_html_file = crm_strdup(optarg);
break;
case 'w':
web_cgi = TRUE;
one_shot = TRUE;
break;
case 's':
simple_status = TRUE;
one_shot = TRUE;
break;
case 'S':
snmp_target = optarg;
break;
case 'T':
crm_mail_to = optarg;
break;
case 'F':
crm_mail_from = optarg;
break;
case 'H':
crm_mail_host = optarg;
break;
case 'P':
crm_mail_prefix = optarg;
break;
case 'E':
external_agent = optarg;
break;
case 'e':
external_recipient = optarg;
break;
case '1':
one_shot = TRUE;
break;
case 'N':
as_console = FALSE;
break;
case '$':
case '?':
crm_help(flag, LSB_EXIT_OK);
break;
default:
printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
++argerr;
break;
}
}
if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\n");
}
if (argerr) {
crm_help('?', LSB_EXIT_GENERIC);
}
if(one_shot) {
as_console = FALSE;
} else if(daemonize) {
as_console = FALSE;
cl_log_enable_stderr(FALSE);
if(!as_html_file && !snmp_target && !crm_mail_to && !external_agent) {
printf("Looks like you forgot to specify one or more of: --as-html, --mail-to, --snmp-target, --external-agent\n");
crm_help('?', LSB_EXIT_GENERIC);
}
crm_make_daemon(crm_system_name, TRUE, pid_file);
} else if(as_console) {
#if CURSES_ENABLED
initscr();
cbreak();
noecho();
cl_log_enable_stderr(FALSE);
#else
one_shot = TRUE;
as_console = FALSE;
printf("Defaulting to one-shot mode\n");
printf("You need to have curses available at compile time to enable console mode\n");
#endif
}
crm_info("Starting %s", crm_system_name);
if(xml_file != NULL) {
current_cib = filename2xml(xml_file);
mon_refresh_display(NULL);
return exit_code;
}
if(current_cib == NULL) {
cib = cib_new();
if(!one_shot) {
print_as("Attempting connection to the cluster...");
}
do {
exit_code = cib_connect(!one_shot);
if(one_shot) {
break;
} else if(exit_code != cib_ok) {
print_dot();
sleep(reconnect_msec/1000);
}
} while(exit_code == cib_connection);
if(exit_code != cib_ok) {
print_as("\nConnection to cluster failed: %s\n", cib_error2string(exit_code));
if(as_console) { sleep(2); }
clean_up(-exit_code);
}
}
if(one_shot) {
return exit_code;
}
mainloop = g_main_new(FALSE);
mainloop_add_signal(SIGTERM, mon_shutdown);
mainloop_add_signal(SIGINT, mon_shutdown);
refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
g_main_run(mainloop);
g_main_destroy(mainloop);
crm_info("Exiting %s", crm_system_name);
clean_up(0);
return 0; /* never reached */
}
void
wait_for_refresh(int offset, const char *prefix, int msec)
{
int lpc = msec / 1000;
struct timespec sleept = {1 , 0};
if(as_console == FALSE) {
timer_id = g_timeout_add(msec, mon_timer_popped, NULL);
return;
}
crm_notice("%sRefresh in %ds...", prefix?prefix:"", lpc);
while(lpc > 0) {
#if CURSES_ENABLED
move(offset, 0);
/* printw("%sRefresh in \033[01;32m%ds\033[00m...", prefix?prefix:"", lpc); */
printw("%sRefresh in %ds...\n", prefix?prefix:"", lpc);
clrtoeol();
refresh();
#endif
lpc--;
if(lpc == 0) {
timer_id = g_timeout_add(
1000, mon_timer_popped, NULL);
} else {
if (nanosleep(&sleept, NULL) != 0) {
return;
}
}
}
}
#define mon_warn(fmt...) do { \
if (!has_warnings) { \
print_as("Warning:"); \
} else { \
print_as(","); \
} \
print_as(fmt); \
has_warnings = TRUE; \
} while(0)
static int
print_simple_status(pe_working_set_t *data_set)
{
node_t *dc = NULL;
int nodes_online = 0;
int nodes_standby = 0;
dc = data_set->dc_node;
if(dc == NULL) {
mon_warn("No DC ");
}
slist_iter(node, node_t, data_set->nodes, lpc2,
if(node->details->standby && node->details->online) {
nodes_standby++;
} else if(node->details->online) {
nodes_online++;
} else {
mon_warn("offline node: %s", node->details->uname);
}
);
if (!has_warnings) {
print_as("Ok: %d nodes online", nodes_online);
if (nodes_standby > 0) {
print_as(", %d standby nodes", nodes_standby);
}
print_as(", %d resources configured",
g_list_length(data_set->resources));
}
print_as("\n");
return 0;
}
extern int get_failcount(node_t *node, resource_t *rsc, int *last_failure, pe_working_set_t *data_set);
static void get_ping_score(node_t *node, pe_working_set_t *data_set)
{
const char *attr = "pingd";
const char *value = NULL;
value = g_hash_table_lookup(node->details->attrs, attr);
if(value != NULL) {
print_as(" %s=%s", attr, value);
}
}
static void print_date(time_t time)
{
int lpc = 0;
char date_str[26];
asctime_r(localtime(&time), date_str);
for(; lpc < 26; lpc++) {
if(date_str[lpc] == '\n') {
date_str[lpc] = 0;
}
}
print_as("'%s'", date_str);
}
static void print_rsc_summary(pe_working_set_t *data_set, node_t *node, resource_t *rsc, gboolean all)
{
gboolean printed = FALSE;
time_t last_failure = 0;
char *fail_attr = crm_concat("fail-count", rsc->id, '-');
const char *value = g_hash_table_lookup(node->details->attrs, fail_attr);
int failcount = char2score(value); /* Get the true value, not the effective one from get_failcount() */
get_failcount(node, rsc, (int*)&last_failure, data_set);
if(all || failcount || last_failure > 0) {
printed = TRUE;
print_as(" %s: migration-threshold=%d",
rsc->id, rsc->migration_threshold);
}
if(failcount > 0) {
printed = TRUE;
print_as(" fail-count=%d", failcount);
}
if(last_failure > 0) {
printed = TRUE;
print_as(" last-failure=");
print_date(last_failure);
}
if(printed) {
print_as("\n");
}
}
static void print_rsc_history(pe_working_set_t *data_set, node_t *node, xmlNode *rsc_entry)
{
GListPtr op_list = NULL;
gboolean print_name = TRUE;
GListPtr sorted_op_list = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
xml_child_iter_filter(
rsc_entry, rsc_op, XML_LRM_TAG_RSC_OP,
op_list = g_list_append(op_list, rsc_op);
);
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
slist_iter(xml_op, xmlNode, sorted_op_list, lpc,
const char *value = NULL;
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
const char *interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
int rc = crm_parse_int(op_rc, "0");
if(safe_str_eq(task, CRMD_ACTION_STATUS)
&& safe_str_eq(interval, "0")) {
task = "probe";
}
if(rc == 7 && safe_str_eq(task, "probe")) {
continue;
} else if(safe_str_eq(task, CRMD_ACTION_NOTIFY)) {
continue;
}
if(print_name) {
print_name = FALSE;
if(rsc == NULL) {
print_as("Orphan resource: %s", rsc_id);
} else {
print_rsc_summary(data_set, node, rsc, TRUE);
}
}
print_as(" + (%s) %s:", call, task);
if(safe_str_neq(interval, "0")) {
print_as(" interval=%sms", interval);
}
if(print_timing) {
int int_value;
const char *attr = "last-rc-change";
value = crm_element_value(xml_op, attr);
if(value) {
int_value = crm_parse_int(value, NULL);
print_as(" %s=", attr);
print_date(int_value);
}
attr = "last-run";
value = crm_element_value(xml_op, attr);
if(value) {
int_value = crm_parse_int(value, NULL);
print_as(" %s=", attr);
print_date(int_value);
}
attr = "exec-time";
value = crm_element_value(xml_op, attr);
if(value) {
int_value = crm_parse_int(value, NULL);
print_as(" %s=%dms", attr, int_value);
}
attr = "queue-time";
value = crm_element_value(xml_op, attr);
if(value) {
int_value = crm_parse_int(value, NULL);
print_as(" %s=%dms", attr, int_value);
}
}
print_as(" rc=%s (%s)\n", op_rc, execra_code2string(rc));
);
/* no need to free the contents */
g_list_free(sorted_op_list);
}
static void print_node_summary(pe_working_set_t *data_set, gboolean operations)
{
xmlNode *lrm_rsc = NULL;
xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
if(operations) {
print_as("\nOperations:\n");
} else {
print_as("\nMigration summary:\n");
}
xml_child_iter_filter(
cib_status, node_state, XML_CIB_TAG_STATE,
node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
if(node == NULL || node->details->online == FALSE){
continue;
}
print_as("* Node %s: ", crm_element_value(node_state, XML_ATTR_UNAME));
get_ping_score(node, data_set);
print_as("\n");
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
xml_child_iter_filter(
lrm_rsc, rsc_entry, XML_LRM_TAG_RESOURCE,
if(operations) {
print_rsc_history(data_set, node, rsc_entry);
} else {
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
if(rsc) {
print_rsc_summary(data_set, node, rsc, FALSE);
} else {
print_as(" %s: orphan\n", rsc_id);
}
}
);
);
}
static char *
add_list_element(char *list, const char *value)
{
int len = 0;
int last = 0;
if(value == NULL) {
return list;
}
if(list) {
last = strlen(list);
}
len = last + 2; /* +1 space, +1 EOS */
len += strlen(value);
crm_realloc(list, len);
sprintf(list + last, " %s", value);
return list;
}
static int
print_status(pe_working_set_t *data_set)
{
static int updates = 0;
node_t *dc = NULL;
char *since_epoch = NULL;
char *online_nodes = NULL;
char *offline_nodes = NULL;
xmlNode *dc_version = NULL;
xmlNode *quorum_node = NULL;
xmlNode *stack = NULL;
time_t a_time = time(NULL);
int configured_resources = 0;
int print_opts = pe_print_ncurses;
const char *quorum_votes = "unknown";
if(as_console) {
blank_screen();
} else {
print_opts = pe_print_printf;
}
updates++;
dc = data_set->dc_node;
print_as("============\n");
if(a_time == (time_t)-1) {
crm_perror(LOG_ERR,"set_node_tstamp(): Invalid time returned");
return 1;
}
since_epoch = ctime(&a_time);
if(since_epoch != NULL) {
print_as("Last updated: %s", since_epoch);
}
stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']", data_set->input, LOG_DEBUG);
if(stack) {
print_as("Stack: %s\n", crm_element_value(stack, XML_NVPAIR_ATTR_VALUE));
}
dc_version = get_xpath_object("//nvpair[@name='dc-version']", data_set->input, LOG_DEBUG);
if(dc == NULL) {
print_as("Current DC: NONE\n");
} else {
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
if(safe_str_neq(dc->details->uname, dc->details->id)) {
print_as("Current DC: %s (%s)",
dc->details->uname, dc->details->id);
} else {
print_as("Current DC: %s", dc->details->uname);
}
print_as(" - partition %s quorum\n",
crm_is_true(quorum)?"with":"WITHOUT");
if(dc_version) {
print_as("Version: %s\n", crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE));
}
}
quorum_node = get_xpath_object("//nvpair[@name='"XML_ATTR_EXPECTED_VOTES"']", data_set->input, LOG_DEBUG);
if(quorum_node) {
quorum_votes = crm_element_value(quorum_node, XML_NVPAIR_ATTR_VALUE);
}
slist_iter(rsc, resource_t, data_set->resources, lpc,
if(is_not_set(rsc->flags, pe_rsc_orphan)) {
configured_resources++;
}
);
print_as("%d Nodes configured, %s expected votes\n", g_list_length(data_set->nodes), quorum_votes);
print_as("%d Resources configured.\n", configured_resources);
print_as("============\n\n");
slist_iter(node, node_t, data_set->nodes, lpc2,
const char *node_mode = NULL;
if(node->details->unclean) {
if(node->details->online && node->details->unclean) {
node_mode = "UNCLEAN (online)";
} else if(node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if(node->details->pending) {
node_mode = "pending";
} else if(node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if(node->details->standby) {
if(node->details->online) {
node_mode = "standby";
} else {
node_mode = "OFFLINE (standby)";
}
} else if(node->details->online) {
node_mode = "online";
if(group_by_node == FALSE) {
online_nodes = add_list_element(online_nodes, node->details->uname);
continue;
}
} else {
node_mode = "OFFLINE";
if(group_by_node == FALSE) {
offline_nodes = add_list_element(offline_nodes, node->details->uname);
continue;
}
}
if(safe_str_eq(node->details->uname, node->details->id)) {
print_as("Node %s: %s\n",
node->details->uname, node_mode);
} else {
print_as("Node %s (%s): %s\n",
node->details->uname, node->details->id,
node_mode);
}
if(group_by_node) {
slist_iter(rsc, resource_t,
node->details->running_rsc, lpc2,
rsc->fns->print(
rsc, "\t", print_opts|pe_print_rsconly, stdout);
);
}
);
if(online_nodes) {
print_as("Online: [%s ]\n", online_nodes);
crm_free(online_nodes);
}
if(offline_nodes) {
print_as("OFFLINE: [%s ]\n", offline_nodes);
crm_free(offline_nodes);
}
if(group_by_node == FALSE && inactive_resources) {
print_as("\nFull list of resources:\n");
} else if(inactive_resources) {
print_as("\nInactive resources:\n");
}
if(group_by_node == FALSE || inactive_resources) {
print_as("\n");
slist_iter(rsc, resource_t, data_set->resources, lpc2,
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
if(is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) {
continue;
} else if(group_by_node == FALSE) {
if(partially_active || inactive_resources) {
rsc->fns->print(rsc, NULL, print_opts, stdout);
}
} else if(is_active == FALSE && inactive_resources) {
rsc->fns->print(rsc, NULL, print_opts, stdout);
}
);
}
if(print_operations || print_failcount) {
print_node_summary(data_set, print_operations);
}
if(xml_has_children(data_set->failed)) {
print_as("\nFailed actions:\n");
xml_child_iter(data_set->failed, xml_op,
int val = 0;
const char *id = ID(xml_op);
const char *last = crm_element_value(xml_op, "last_run");
const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
const char *rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
const char *status = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS);
val = crm_parse_int(status, "0");
print_as(" %s (node=%s, call=%s, rc=%s, status=%s",
id, node, call, rc, op_status2text(val));
if(last) {
time_t run_at = crm_parse_int(last, "0");
print_as(", last-run=%s, queued=%sms, exec=%sms\n",
ctime(&run_at),
crm_element_value(xml_op, "exec_time"),
crm_element_value(xml_op, "queue_time"));
}
val = crm_parse_int(rc, "0");
print_as("): %s\n", execra_code2string(val));
);
}
#if CURSES_ENABLED
if(as_console) {
refresh();
}
#endif
return 0;
}
static int
print_html_status(pe_working_set_t *data_set, const char *filename, gboolean web_cgi)
{
FILE *stream;
node_t *dc = NULL;
static int updates = 0;
char *filename_tmp = NULL;
if (web_cgi) {
stream=stdout;
fprintf(stream, "Content-type: text/html\n\n");
} else {
filename_tmp = crm_concat(filename, "tmp", '.');
stream = fopen(filename_tmp, "w");
if(stream == NULL) {
crm_perror(LOG_ERR,"Cannot open %s for writing", filename_tmp);
crm_free(filename_tmp);
return -1;
}
}
updates++;
dc = data_set->dc_node;
fprintf(stream, "<html>");
fprintf(stream, "<head>");
fprintf(stream, "<title>Cluster status</title>");
/* content="%d;url=http://webdesign.about.com" */
fprintf(stream,
"<meta http-equiv=\"refresh\" content=\"%d\">", reconnect_msec/1000);
fprintf(stream, "</head>");
/*** SUMMARY ***/
fprintf(stream, "<h2>Cluster summary</h2>");
{
char *now_str = NULL;
time_t now = time(NULL);
now_str = ctime(&now);
now_str[24] = EOS; /* replace the newline */
fprintf(stream, "Last updated: <b>%s</b><br/>\n", now_str);
}
if(dc == NULL) {
fprintf(stream, "Current DC: <font color=\"red\"><b>NONE</b></font><br/>");
} else {
fprintf(stream, "Current DC: %s (%s)<br/>",
dc->details->uname, dc->details->id);
}
fprintf(stream, "%d Nodes configured.<br/>",
g_list_length(data_set->nodes));
fprintf(stream, "%d Resources configured.<br/>",
g_list_length(data_set->resources));
/*** CONFIG ***/
fprintf(stream, "<h3>Config Options</h3>\n");
fprintf(stream, "<table>\n");
fprintf(stream, "<tr><td>Default resource stickiness</td><td>:</td><td>%d</td></tr>\n",
data_set->default_resource_stickiness);
fprintf(stream, "<tr><td>STONITH of failed nodes</td><td>:</td><td>%s</td></tr>\n",
is_set(data_set->flags, pe_flag_stonith_enabled)?"enabled":"disabled");
fprintf(stream, "<tr><td>Cluster is</td><td>:</td><td>%ssymmetric</td></tr>\n",
is_set(data_set->flags, pe_flag_symmetric_cluster)?"":"a-");
fprintf(stream, "<tr><td>No Quorum Policy</td><td>:</td><td>");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "Freeze resources");
break;
case no_quorum_stop:
fprintf(stream, "Stop ALL resources");
break;
case no_quorum_ignore:
fprintf(stream, "Ignore");
break;
case no_quorum_suicide:
fprintf(stream, "Suicide");
break;
}
fprintf(stream, "\n</td></tr>\n</table>\n");
/*** NODE LIST ***/
fprintf(stream, "<h2>Node List</h2>\n");
fprintf(stream, "<ul>\n");
slist_iter(node, node_t, data_set->nodes, lpc2,
fprintf(stream, "<li>");
if(node->details->standby_onfail && node->details->online) {
fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"<font color=\"orange\">standby (on-fail)</font>\n");
} else if(node->details->standby && node->details->online) {
fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"<font color=\"orange\">standby</font>\n");
} else if(node->details->standby) {
fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"<font color=\"red\">OFFLINE (standby)</font>\n");
} else if(node->details->online) {
fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"<font color=\"green\">online</font>\n");
} else {
fprintf(stream, "Node: %s (%s): %s",node->details->uname, node->details->id,"<font color=\"red\">OFFLINE</font>\n");
}
if(group_by_node) {
fprintf(stream, "<ul>\n");
slist_iter(rsc, resource_t,
node->details->running_rsc, lpc2,
fprintf(stream, "<li>");
rsc->fns->print(rsc, NULL,
pe_print_html|pe_print_rsconly, stream);
fprintf(stream, "</li>\n");
);
fprintf(stream, "</ul>\n");
}
fprintf(stream, "</li>\n");
);
fprintf(stream, "</ul>\n");
if(group_by_node && inactive_resources) {
fprintf(stream, "<h2>(Partially) Inactive Resources</h2>\n");
} else if(group_by_node == FALSE) {
fprintf(stream, "<h2>Resource List</h2>\n");
}
if(group_by_node == FALSE || inactive_resources) {
slist_iter(rsc, resource_t, data_set->resources, lpc2,
if(group_by_node && rsc->fns->active(rsc, TRUE)) {
continue;
}
rsc->fns->print(rsc, NULL, pe_print_html, stream);
);
}
fprintf(stream, "</html>");
fflush(stream);
fclose(stream);
if (!web_cgi) {
if(rename(filename_tmp, filename) != 0) {
crm_perror(LOG_ERR,"Unable to rename %s->%s", filename_tmp, filename);
}
crm_free(filename_tmp);
}
return 0;
}
#if ENABLE_SNMP
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/snmpv3_api.h>
#include <net-snmp/agent/agent_trap.h>
#include <net-snmp/library/snmp_client.h>
#include <net-snmp/library/mib.h>
#include <net-snmp/library/snmp_debug.h>
#define add_snmp_field(list, oid_string, value) do { \
oid name[MAX_OID_LEN]; \
size_t name_length = MAX_OID_LEN; \
if (snmp_parse_oid(oid_string, name, &name_length)) { \
int s_rc = snmp_add_var(list, name, name_length, 's', (value)); \
if(s_rc != 0) { \
crm_err("Could not add %s=%s rc=%d", oid_string, value, s_rc); \
} else { \
crm_debug_2("Added %s=%s", oid_string, value); \
} \
} else { \
crm_err("Could not parse OID: %s", oid_string); \
} \
} while(0) \
#define add_snmp_field_int(list, oid_string, value) do { \
oid name[MAX_OID_LEN]; \
size_t name_length = MAX_OID_LEN; \
if (snmp_parse_oid(oid_string, name, &name_length)) { \
if(NULL == snmp_pdu_add_variable( \
list, name, name_length, ASN_INTEGER, \
(u_char *) & value, sizeof(value))) { \
crm_err("Could not add %s=%d", oid_string, value); \
} else { \
crm_debug_2("Added %s=%d", oid_string, value); \
} \
} else { \
crm_err("Could not parse OID: %s", oid_string); \
} \
} while(0) \
static int snmp_input(int operation, netsnmp_session *session,
int reqid, netsnmp_pdu *pdu, void *magic)
{
return 1;
}
static netsnmp_session *crm_snmp_init(const char *target)
{
static netsnmp_session *session = NULL;
if(session) {
return session;
}
if(target == NULL) {
return NULL;
}
if(crm_log_level > LOG_INFO) {
char *debug_tokens = crm_strdup("run:shell,snmptrap,tdomain");
debug_register_tokens(debug_tokens);
snmp_set_do_debugging(1);
}
crm_malloc0(session, sizeof(netsnmp_session));
snmp_sess_init(session);
session->version = SNMP_VERSION_2c;
session->callback = snmp_input;
session->callback_magic = NULL;
session = snmp_add(session,
netsnmp_transport_open_client("snmptrap", target),
NULL, NULL);
if (session == NULL) {
snmp_sess_perror("Could not create snmp transport", session);
}
return session;
}
#endif
static int
send_snmp_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc)
{
int ret = 1;
#if ENABLE_SNMP
static oid snmptrap_oid[] = { 1,3,6,1,6,3,1,1,4,1,0 };
static oid sysuptime_oid[] = { 1,3,6,1,2,1,1,3,0 };
netsnmp_pdu *trap_pdu;
netsnmp_session *session = crm_snmp_init(snmp_target);
trap_pdu = snmp_pdu_create(SNMP_MSG_TRAP2);
if ( !trap_pdu ) {
crm_err("Failed to create SNMP notification");
return SNMPERR_GENERR;
}
if(1) {
/* send uptime */
char csysuptime[20];
time_t now = time(NULL);
sprintf(csysuptime, "%ld", now);
snmp_add_var(trap_pdu, sysuptime_oid, sizeof(sysuptime_oid) / sizeof(oid), 't', csysuptime);
}
/* Indicate what the trap is by setting snmpTrapOid.0 */
ret = snmp_add_var(trap_pdu, snmptrap_oid, sizeof(snmptrap_oid) / sizeof(oid), 'o', snmp_crm_trap_oid);
if (ret != 0) {
crm_err("Failed set snmpTrapOid.0=%s", snmp_crm_trap_oid);
return ret;
}
/* Add extries to the trap */
add_snmp_field(trap_pdu, snmp_crm_oid_rsc, rsc);
add_snmp_field(trap_pdu, snmp_crm_oid_node, node);
add_snmp_field(trap_pdu, snmp_crm_oid_task, task);
add_snmp_field(trap_pdu, snmp_crm_oid_desc, desc);
add_snmp_field_int(trap_pdu, snmp_crm_oid_rc, rc);
add_snmp_field_int(trap_pdu, snmp_crm_oid_trc, target_rc);
add_snmp_field_int(trap_pdu, snmp_crm_oid_status, status);
/* Send and cleanup */
ret = snmp_send(session, trap_pdu);
if(ret == 0) {
/* error */
snmp_sess_perror("Could not send SNMP trap", session);
snmp_free_pdu(trap_pdu);
ret = SNMPERR_GENERR;
} else {
ret = SNMPERR_SUCCESS;
}
#else
crm_err("Sending SNMP traps is not supported by this installation");
#endif
return ret;
}
#if ENABLE_ESMTP
#include <auth-client.h>
#include <libesmtp.h>
static void print_recipient_status(
smtp_recipient_t recipient, const char *mailbox, void *arg)
{
const smtp_status_t *status;
status = smtp_recipient_status (recipient);
printf ("%s: %d %s", mailbox, status->code, status->text);
}
static void event_cb (smtp_session_t session, int event_no, void *arg, ...)
{
int *ok;
va_list alist;
va_start(alist, arg);
switch(event_no) {
case SMTP_EV_CONNECT:
case SMTP_EV_MAILSTATUS:
case SMTP_EV_RCPTSTATUS:
case SMTP_EV_MESSAGEDATA:
case SMTP_EV_MESSAGESENT:
case SMTP_EV_DISCONNECT:
break;
case SMTP_EV_WEAK_CIPHER: {
int bits = va_arg(alist, long);
ok = va_arg(alist, int*);
crm_debug("SMTP_EV_WEAK_CIPHER, bits=%d - accepted.", bits);
*ok = 1; break;
}
case SMTP_EV_STARTTLS_OK:
crm_debug("SMTP_EV_STARTTLS_OK - TLS started here.");
break;
case SMTP_EV_INVALID_PEER_CERTIFICATE: {
long vfy_result = va_arg(alist, long);
ok = va_arg(alist, int*);
/* There is a table in handle_invalid_peer_certificate() of mail-file.c */
crm_err("SMTP_EV_INVALID_PEER_CERTIFICATE: %ld", vfy_result);
*ok = 1; break;
}
case SMTP_EV_NO_PEER_CERTIFICATE:
ok = va_arg(alist, int*);
crm_debug("SMTP_EV_NO_PEER_CERTIFICATE - accepted.");
*ok = 1;
break;
case SMTP_EV_WRONG_PEER_CERTIFICATE:
ok = va_arg(alist, int*);
crm_debug("SMTP_EV_WRONG_PEER_CERTIFICATE - accepted.");
*ok = 1;
break;
case SMTP_EV_NO_CLIENT_CERTIFICATE:
ok = va_arg(alist, int*);
crm_debug("SMTP_EV_NO_CLIENT_CERTIFICATE - accepted.");
*ok = 1;
break;
default:
crm_debug("Got event: %d - ignored.\n", event_no);
}
va_end(alist);
}
#endif
#define BODY_MAX 2048
#if ENABLE_ESMTP
static void
crm_smtp_debug (const char *buf, int buflen, int writing, void *arg)
{
char type = 0;
int lpc = 0, last = 0, level = *(int*)arg;
if (writing == SMTP_CB_HEADERS) {
type = 'H';
} else if(writing) {
type = 'C';
} else {
type = 'S';
}
for(; lpc < buflen; lpc++) {
switch(buf[lpc]) {
case 0:
case '\n':
if(last > 0) {
do_crm_log(level, " %.*s", lpc-last, buf+last);
} else {
do_crm_log(level, "%c: %.*s", type, lpc-last, buf+last);
}
last = lpc + 1;
break;
}
}
}
#endif
static int
send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc)
{
pid_t pid;
/*setenv needs chars, these are ints*/
char *rc_s = crm_itoa(rc);
char *status_s = crm_itoa(status);
char *target_rc_s = crm_itoa(target_rc);
crm_debug("Sending external notification to '%s' via '%s'", external_recipient, external_agent);
setenv("CRM_notify_recipient",external_recipient,1);
setenv("CRM_notify_node",node,1);
setenv("CRM_notify_rsc",rsc,1);
setenv("CRM_notify_task",task,1);
setenv("CRM_notify_desc",desc,1);
setenv("CRM_notify_rc",rc_s,1);
setenv("CRM_notify_target_rc",target_rc_s,1);
setenv("CRM_notify_status",status_s,1);
pid=fork();
if(pid == -1) {
cl_perror("notification fork() failed.");
}
if(pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
execl(external_agent,external_agent,NULL);
}
crm_debug_2("Finished running custom notification program '%s'.",external_agent);
crm_free(target_rc_s);
crm_free(status_s);
crm_free(rc_s);
return 0;
}
static int
send_smtp_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc)
{
#if ENABLE_ESMTP
smtp_session_t session;
smtp_message_t message;
auth_context_t authctx;
struct sigaction sa;
int len = 20;
int noauth = 1;
int smtp_debug = LOG_DEBUG;
char crm_mail_body[BODY_MAX];
char *crm_mail_subject = NULL;
if(node == NULL) {
node = "-";
}
if(rsc == NULL) {
rsc = "-";
}
if(desc == NULL) {
desc = "-";
}
if(crm_mail_to == NULL) {
return 1;
}
if(crm_mail_host == NULL) {
crm_mail_host = "localhost:25";
}
if(crm_mail_prefix == NULL) {
crm_mail_prefix = "Cluster notification";
}
crm_debug("Sending '%s' mail to %s via %s", crm_mail_prefix, crm_mail_to, crm_mail_host);
len += strlen(crm_mail_prefix);
len += strlen(task);
len += strlen(rsc);
len += strlen(node);
len += strlen(desc);
len++;
crm_malloc0(crm_mail_subject, len);
snprintf(crm_mail_subject, len, "%s - %s event for %s on %s: %s", crm_mail_prefix, task, rsc, node, desc);
len = 0;
len += snprintf(crm_mail_body+len, BODY_MAX-len, "%s\n", crm_mail_prefix);
len += snprintf(crm_mail_body+len, BODY_MAX-len, "====\n\n");
if(rc==target_rc) {
len += snprintf(crm_mail_body+len, BODY_MAX-len,
"Completed operation %s for resource %s on %s\n", task, rsc, node);
} else {
len += snprintf(crm_mail_body+len, BODY_MAX-len,
"Operation %s for resource %s on %s failed: %s\n", task, rsc, node, desc);
}
len += snprintf(crm_mail_body+len, BODY_MAX-len, "\nDetails:\n");
len += snprintf(crm_mail_body+len, BODY_MAX-len,
"\toperation status: (%d) %s\n", status, op_status2text(status));
if(status == LRM_OP_DONE) {
len += snprintf(crm_mail_body+len, BODY_MAX-len,
"\tscript returned: (%d) %s\n", rc, execra_code2string(rc));
len += snprintf(crm_mail_body+len, BODY_MAX-len,
"\texpected return value: (%d) %s\n", target_rc, execra_code2string(target_rc));
}
auth_client_init();
session = smtp_create_session();
message = smtp_add_message(session);
smtp_starttls_enable (session, Starttls_ENABLED);
sa.sa_handler = SIG_IGN;
sigemptyset (&sa.sa_mask);
sa.sa_flags = 0;
sigaction (SIGPIPE, &sa, NULL);
smtp_set_server (session, crm_mail_host);
authctx = auth_create_context ();
auth_set_mechanism_flags (authctx, AUTH_PLUGIN_PLAIN, 0);
smtp_set_eventcb(session, event_cb, NULL);
/* Now tell libESMTP it can use the SMTP AUTH extension.
*/
if (!noauth) {
crm_debug("Adding authentication context");
smtp_auth_set_context (session, authctx);
}
if(crm_mail_from == NULL) {
struct utsname us;
char auto_from[BODY_MAX];
CRM_ASSERT(uname(&us) == 0);
snprintf(auto_from, BODY_MAX, "crm_mon@%s", us.nodename);
smtp_set_reverse_path (message, auto_from);
} else {
/* NULL is ok */
smtp_set_reverse_path (message, crm_mail_from);
}
smtp_set_header (message, "To", NULL/*phrase*/, NULL/*addr*/); /* "Phrase" <addr> */
smtp_add_recipient (message, crm_mail_to);
/* Set the Subject: header and override any subject line in the message headers. */
smtp_set_header (message, "Subject", crm_mail_subject);
smtp_set_header_option (message, "Subject", Hdr_OVERRIDE, 1);
smtp_set_message_str(message, crm_mail_body);
smtp_set_monitorcb (session, crm_smtp_debug, &smtp_debug, 1);
if (smtp_start_session (session)) {
char buf[128];
int rc = smtp_errno();
crm_err("SMTP server problem: %s (%d)\n", smtp_strerror (rc, buf, sizeof buf), rc);
} else {
char buf[128];
int rc = smtp_errno();
const smtp_status_t *smtp_status = smtp_message_transfer_status(message);
if(rc != 0) {
crm_err("SMTP server problem: %s (%d)\n", smtp_strerror (rc, buf, sizeof buf), rc);
}
crm_info("Send status: %d %s", smtp_status->code, crm_str(smtp_status->text));
smtp_enumerate_recipients (message, print_recipient_status, NULL);
}
smtp_destroy_session(session);
auth_destroy_context(authctx);
auth_client_exit();
#endif
return 0;
}
static void handle_rsc_op(xmlNode *rsc_op)
{
int rc = -1;
int status = -1;
int action = -1;
int interval = 0;
int target_rc = -1;
int transition_num = -1;
gboolean notify = TRUE;
char *rsc = NULL;
char *task = NULL;
const char *desc = NULL;
const char *node = NULL;
const char *magic = NULL;
const char *id = ID(rsc_op);
char *update_te_uuid = NULL;
xmlNode *n = rsc_op;
magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC);
if(magic == NULL) {
/* non-change */
return;
}
if(FALSE == decode_transition_magic(
magic, &update_te_uuid, &transition_num, &action,
&status, &rc, &target_rc)) {
crm_err("Invalid event %s detected for %s", magic, id);
return;
}
if(parse_op_key(id, &rsc, &task, &interval) == FALSE) {
crm_err("Invalid event detected for %s", id);
return;
}
while(n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) {
n = n->parent;
}
node = ID(n);
if(node == NULL) {
crm_err("No node detected for event %s (%s)", magic, id);
return;
}
/* look up where we expected it to be? */
desc = cib_error2string(cib_ok);
if(status == LRM_OP_DONE && target_rc == rc) {
crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
if(rc == EXECRA_NOT_RUNNING) {
notify = FALSE;
}
} else if(status == LRM_OP_DONE) {
desc = execra_code2string(rc);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
} else {
desc = op_status2text(status);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
}
if(notify && snmp_target) {
send_snmp_trap(node, rsc, task, target_rc, rc, status, desc);
}
if(notify && crm_mail_to) {
send_smtp_trap(node, rsc, task, target_rc, rc, status, desc);
}
if(notify && external_agent) {
send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
}
}
void
crm_diff_update(const char *event, xmlNode *msg)
{
int rc = -1;
long now = time(NULL);
const char *op = NULL;
unsigned int log_level = LOG_INFO;
xmlNode *diff = NULL;
xmlNode *cib_last = NULL;
xmlNode *update = get_message_xml(msg, F_CIB_UPDATE);
print_dot();
if(msg == NULL) {
crm_err("NULL update");
return;
}
crm_element_value_int(msg, F_CIB_RC, &rc);
op = crm_element_value(msg, F_CIB_OPERATION);
diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
if(rc < cib_ok) {
log_level = LOG_WARNING;
do_crm_log(log_level, "[%s] %s ABORTED: %s",
event, op, cib_error2string(rc));
return;
}
if(current_cib != NULL) {
cib_last = current_cib; current_cib = NULL;
rc = cib_process_diff(op, cib_force_diff, NULL, NULL, diff, cib_last, &current_cib, NULL);
if(rc != cib_ok) {
crm_debug("Update didn't apply, requesting full copy: %s", cib_error2string(rc));
free_xml(current_cib);
current_cib = NULL;
}
}
if(current_cib == NULL) {
current_cib = get_cib_copy(cib);
}
if(log_diffs && diff) {
log_cib_diff(LOG_DEBUG, diff, op);
}
if(log_updates && update != NULL) {
print_xml_formatted(LOG_DEBUG, "raw_update", update, NULL);
}
if(diff && (crm_mail_to || snmp_target || external_agent)) {
/* Process operation updates */
xmlXPathObject *xpathObj = xpath_search(
diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_LRM_TAG_RSC_OP);
if(xpathObj && xpathObj->nodesetval->nodeNr > 0) {
int lpc = 0, max = xpathObj->nodesetval->nodeNr;
for(lpc = 0; lpc < max; lpc++) {
xmlNode *rsc_op = getXpathResult(xpathObj, lpc);
handle_rsc_op(rsc_op);
}
xmlXPathFreeObject(xpathObj);
}
}
if((now - last_refresh) > (reconnect_msec/1000)) {
/* Force a refresh */
mon_refresh_display(NULL);
} else {
mainloop_set_trigger(refresh_trigger);
}
free_xml(cib_last);
}
gboolean
mon_refresh_display(gpointer user_data)
{
xmlNode *cib_copy = copy_xml(current_cib);
pe_working_set_t data_set;
last_refresh = time(NULL);
if(cli_config_update(&cib_copy, NULL, FALSE) == FALSE) {
if(cib) {
cib->cmds->signoff(cib);
}
print_as("Upgrade failed: %s", cib_error2string(cib_dtd_validation));
if(as_console) { sleep(2); }
clean_up(LSB_EXIT_GENERIC);
return FALSE;
}
set_working_set_defaults(&data_set);
data_set.input = cib_copy;
cluster_status(&data_set);
if(as_html_file || web_cgi) {
if (print_html_status(&data_set, as_html_file, web_cgi) != 0) {
fprintf(stderr, "Critical: Unable to output html file\n");
clean_up(LSB_EXIT_GENERIC);
}
} else if(daemonize) {
/* do nothing */
} else if (simple_status) {
print_simple_status(&data_set);
if (has_warnings) {
clean_up(LSB_EXIT_GENERIC);
}
} else {
print_status(&data_set);
}
cleanup_calculations(&data_set);
return TRUE;
}
/*
* De-init ncurses, signoff from the CIB and deallocate memory.
*/
void clean_up(int rc)
{
#if ENABLE_SNMP
netsnmp_session *session = crm_snmp_init(NULL);
if(session) {
snmp_close(session);
snmp_shutdown("snmpapp");
}
#endif
#if CURSES_ENABLED
if(as_console) {
as_console = FALSE;
echo();
nocbreak();
endwin();
}
#endif
if (cib != NULL) {
cib->cmds->signoff(cib);
cib_delete(cib);
cib = NULL;
}
crm_free(as_html_file);
crm_free(xml_file);
crm_free(pid_file);
if(rc >= 0) {
exit(rc);
}
return;
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 12:54 PM (1 d, 4 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018739
Default Alt Text
(776 KB)

Event Timeline